diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 548e5f4e86..25a977366c 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -11,6 +11,8 @@ - [ ] Any new logging statements use an appropriate subsystem and logging level - [ ] Code has been formatted with `go fmt` +- [ ] Protobuf files (`lnrpc/**/*.proto`) have been formatted with + `make rpc-format` and compiled with `make rpc` - [ ] For code and documentation: lines are wrapped at 80 characters (the tab character should be counted as 8 characters, not 4, as some IDEs do per default) diff --git a/.gitignore b/.gitignore index 48efdd2abd..ed8d0a8d8b 100644 --- a/.gitignore +++ b/.gitignore @@ -63,3 +63,6 @@ profile.tmp .DS_Store .vscode + +# Coverage test +coverage.txt \ No newline at end of file diff --git a/.golangci.yml b/.golangci.yml index ca3e68818a..c0057fce9f 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -2,23 +2,20 @@ run: # timeout for analysis deadline: 4m - # Linting uses a lot of memory. Keep it under control by only running a single - # worker. - concurrency: 1 - # Skip autogenerated files for mobile. skip-files: - "mobile\\/.*generated\\.go" + skip-dirs: + - channeldb/migration_01_to_11 + build-tags: - autopilotrpc - chainrpc - invoicesrpc - - routerrpc - signrpc - walletrpc - watchtowerrpc - - wtclientrpc linters-settings: govet: @@ -55,6 +52,16 @@ linters: # the linter. - prealloc + # Init functions are used by loggers throughout the codebase. + - gochecknoinits + issues: # Only show newly introduced problems. new-from-rev: 01f696afce2f9c0d4ed854edefa3846891d01d8a + + exclude-rules: + # Exclude gosec from running for tests so that tests with weak randomness + # (math/rand) will pass the linter. + - path: _test\.go + linters: + - gosec diff --git a/.travis.yml b/.travis.yml index 8253912e3a..79e007e9d8 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,12 +1,20 @@ language: go cache: directories: - - ~/bitcoin/bitcoin-0.18.1/bin + - ~/bitcoin/bitcoin-0.19.1/bin + - $DOWNLOAD_CACHE - $GOCACHE - $GOPATH/pkg/mod - $GOPATH/src/github.com/btcsuite - $GOPATH/src/github.com/golang + - $GOPATH/src/github.com/grpc-ecosystem - $GOPATH/src/gopkg.in/alecthomas + - $GOPATH/src/google.golang.org + +# Remove Travis' default flag --depth=50 from the git clone command to make sure +# we have the whole git history, including the commit we lint against. +git: + depth: false go: - "1.13.x" @@ -14,33 +22,39 @@ go: env: global: - GOCACHE=$HOME/.go-build - matrix: - - RACE=true - - ITEST=true - - NEUTRINO_ITEST=true - - BITCOIND_ITEST=true - - COVER=true + - DOWNLOAD_CACHE=$HOME/download_cache sudo: required -script: - - export GO111MODULE=on - - bash ./scripts/install_bitcoind.sh - - # Run unit tests with race condition detector. - - 'if [ "$RACE" = true ]; then make travis-race ; fi' +addons: + apt: + packages: + - clang-format - # Run btcd integration tests. - - 'if [ "$ITEST" = true ]; then make travis-itest; fi' - - # Run neutrino integration tests. - - 'if [ "$NEUTRINO_ITEST" = true ]; then make travis-itest backend=neutrino; fi' - - # Run bitcoind integration tests. - - 'if [ "$BITCOIND_ITEST" = true ]; then make travis-itest backend=bitcoind; fi' +before_script: + - bash ./scripts/install_travis_proto.sh + - bash ./scripts/install_bitcoind.sh - # Run unit tests and generate coverage report. - - 'if [ "$COVER" = true ]; then make travis-cover; fi' +jobs: + include: + - stage: Build + script: + - make rpc-check + - make unit pkg=... case=_NONE_ + - make lint workers=1 + - make btcd + - make release sys=windows-amd64 + - stage: Test + script: make travis-cover + name: Unit Cover + - script: make travis-race + name: Unit Race + - script: make itest + name: Btcd Integration + - script: make itest backend=bitcoind + name: Bitcoind Integration + - script: make itest backend=neutrino + name: Neutrino Integration after_script: - LOG_FILES=./lntest/itest/*.log diff --git a/Dockerfile b/Dockerfile index b0408f8fa7..d4185fa10b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,6 +4,11 @@ FROM golang:1.13-alpine as builder # queries required to connect to linked containers succeed. ENV GODEBUG netdns=cgo +# Pass a tag, branch or a commit using build-arg. This allows a docker +# image to be built from a specified Git state. The default image +# will use the Git tip of master by default. +ARG checkout="master" + # Install dependencies and build the binaries. RUN apk add --no-cache --update alpine-sdk \ git \ @@ -11,8 +16,9 @@ RUN apk add --no-cache --update alpine-sdk \ gcc \ && git clone https://github.com/groestlcoin/lnd /go/src/github.com/groestlcoin/lnd \ && cd /go/src/github.com/groestlcoin/lnd \ +&& git checkout $checkout \ && make \ -&& make install tags="signrpc walletrpc chainrpc invoicesrpc routerrpc" +&& make install tags="signrpc walletrpc chainrpc invoicesrpc" # Start a new, final image. FROM alpine as final diff --git a/Makefile b/Makefile index 1f7a95f114..98b1b82493 100644 --- a/Makefile +++ b/Makefile @@ -22,7 +22,7 @@ ANDROID_BUILD_DIR := $(MOBILE_BUILD_DIR)/android ANDROID_BUILD := $(ANDROID_BUILD_DIR)/Lndmobile.aar COMMIT := $(shell git describe --abbrev=40 --dirty) -LDFLAGS := -ldflags "-X $(PKG)/build.Commit=$(COMMIT)" +COMMIT_HASH := $(shell git rev-parse HEAD) BTCD_COMMIT := $(shell cat go.mod | \ grep $(BTCD_PKG) | \ @@ -36,8 +36,9 @@ GOACC_COMMIT := ddc355013f90fea78d83d3a6c71f1d37ac07ecd5 DEPGET := cd /tmp && GO111MODULE=on go get -v GOBUILD := GO111MODULE=on go build -v GOINSTALL := GO111MODULE=on go install -v -GOTEST := GO111MODULE=on go test -v +GOTEST := GO111MODULE=on go test +GOVERSION := $(shell go version | awk '{print $$3}') GOFILES_NOVENDOR = $(shell find . -type f -name '*.go' -not -path "./vendor/*") GOLIST := go list -deps $(PKG)/... | grep '$(PKG)'| grep -v '/vendor/' GOLISTCOVER := $(shell go list -deps -f '{{.ImportPath}}' ./... | grep '$(PKG)' | sed -e 's/^$(ESCPKG)/./') @@ -48,10 +49,33 @@ MAKE := make XARGS := xargs -L 1 include make/testing_flags.mk +include make/release_flags.mk DEV_TAGS := $(if ${tags},$(DEV_TAGS) ${tags},$(DEV_TAGS)) -LINT = $(LINT_BIN) run -v +# We only return the part inside the double quote here to avoid escape issues +# when calling the external release script. The second parameter can be used to +# add additional ldflags if needed (currently only used for the release). +make_ldflags = $(2) -X $(PKG)/build.Commit=$(COMMIT) \ + -X $(PKG)/build.CommitHash=$(COMMIT_HASH) \ + -X $(PKG)/build.GoVersion=$(GOVERSION) \ + -X $(PKG)/build.RawTags=$(shell echo $(1) | sed -e 's/ /,/g') + +LDFLAGS := -ldflags "$(call make_ldflags, ${tags})" +DEV_LDFLAGS := -ldflags "$(call make_ldflags, $(DEV_TAGS))" +ITEST_LDFLAGS := -ldflags "$(call make_ldflags, $(ITEST_TAGS))" + +# For the release, we want to remove the symbol table and debug information (-s) +# and omit the DWARF symbol table (-w). Also we clear the build ID. +RELEASE_LDFLAGS := $(call make_ldflags, $(RELEASE_TAGS), -s -w -buildid=) + +# Linting uses a lot of memory, so keep it under control by limiting the number +# of workers if requested. +ifneq ($(workers),) +LINT_WORKERS = --concurrency=$(workers) +endif + +LINT = $(LINT_BIN) run -v $(LINT_WORKERS) GREEN := "\\033[0;32m" NC := "\\033[0m" @@ -89,19 +113,24 @@ btcd: build: @$(call print, "Building debug lnd and lncli.") - $(GOBUILD) -tags="$(DEV_TAGS)" -o lnd-debug $(LDFLAGS) $(PKG)/cmd/lnd - $(GOBUILD) -tags="$(DEV_TAGS)" -o lncli-debug $(LDFLAGS) $(PKG)/cmd/lncli + $(GOBUILD) -tags="$(DEV_TAGS)" -o lnd-debug $(DEV_LDFLAGS) $(PKG)/cmd/lnd + $(GOBUILD) -tags="$(DEV_TAGS)" -o lncli-debug $(DEV_LDFLAGS) $(PKG)/cmd/lncli build-itest: @$(call print, "Building itest lnd and lncli.") - $(GOBUILD) -tags="$(ITEST_TAGS)" -o lnd-itest $(LDFLAGS) $(PKG)/cmd/lnd - $(GOBUILD) -tags="$(ITEST_TAGS)" -o lncli-itest $(LDFLAGS) $(PKG)/cmd/lncli + $(GOBUILD) -tags="$(ITEST_TAGS)" -o lnd-itest $(ITEST_LDFLAGS) $(PKG)/cmd/lnd + $(GOBUILD) -tags="$(ITEST_TAGS)" -o lncli-itest $(ITEST_LDFLAGS) $(PKG)/cmd/lncli install: @$(call print, "Installing lnd and lncli.") $(GOINSTALL) -tags="${tags}" $(LDFLAGS) $(PKG)/cmd/lnd $(GOINSTALL) -tags="${tags}" $(LDFLAGS) $(PKG)/cmd/lncli +release: + @$(call print, "Releasing lnd and lncli binaries.") + $(VERSION_CHECK) + ./build/release/release.sh build-release "$(VERSION_TAG)" "$(BUILD_SYSTEM)" "$(RELEASE_TAGS)" "$(RELEASE_LDFLAGS)" + scratch: build @@ -135,11 +164,9 @@ goveralls: $(GOVERALLS_BIN) $(GOVERALLS_BIN) -coverprofile=coverage.txt -service=travis-ci -travis-race: lint btcd unit-race - -travis-cover: lint btcd unit-cover goveralls +travis-race: btcd unit-race -travis-itest: lint itest +travis-cover: btcd unit-cover goveralls # ============= # FLAKE HUNTING @@ -176,6 +203,14 @@ rpc: @$(call print, "Compiling protos.") cd ./lnrpc; ./gen_protos.sh +rpc-format: + @$(call print, "Formatting protos.") + cd ./lnrpc; find . -name "*.proto" | xargs clang-format --style=file -i + +rpc-check: rpc + @$(call print, "Verifying protos.") + if test -n "$$(git describe --dirty | grep dirty)"; then echo "Protos not properly formatted or not compiled with v3.4.0"; git status; git diff; exit 1; fi + mobile-rpc: @$(call print, "Creating mobile RPC from protos.") cd ./mobile; ./gen_bindings.sh @@ -225,6 +260,8 @@ clean: lint \ list \ rpc \ + rpc-format \ + rpc-check \ mobile-rpc \ vendor \ ios \ diff --git a/README.md b/README.md index f8143baba1..22030e5292 100644 --- a/README.md +++ b/README.md @@ -58,7 +58,7 @@ resources including talks, articles, and example applications can be found at: [dev.lightning.community](https://dev.lightning.community). Finally, we also have an active -[Slack](https://join.slack.com/t/lightningcommunity/shared_invite/enQtMzQ0OTQyNjE5NjU1LThmOGJkZDIzY2U2ODI5ODhmOWMzM2FkOTY5ZTdkYTc5NDYyN2U1YTJkZTE1MWU2OTZlZTAyOWY4NGY2M2M1NTM) where protocol developers, application developers, testers and users gather to +[Slack](https://lightning.engineering/slack.html) where protocol developers, application developers, testers and users gather to discuss various aspects of `lnd` and also Lightning in general. ## Installation @@ -67,12 +67,19 @@ discuss various aspects of `lnd` and also Lightning in general. ## Docker To run lnd from Docker, please see the main [Docker instructions](docs/DOCKER.md) - + ## IRC * irc.freenode.net * channel #lnd * [webchat](https://webchat.freenode.net/?channels=lnd) +## Safety + +When operating a mainnet `lnd` node, please refer to our [operational safety +guildelines](docs/safety.md). It is important to note that `lnd` is still +**beta** software and that ignoring these operational guidelines can lead to +loss of funds. + ## Security The developers of `lnd` take security _very_ seriously. The disclosure of diff --git a/aezeed/cipherseed_rpctest.go b/aezeed/cipherseed_rpctest.go new file mode 100644 index 0000000000..82f782cd2d --- /dev/null +++ b/aezeed/cipherseed_rpctest.go @@ -0,0 +1,13 @@ +// +build rpctest + +package aezeed + +import "github.com/btcsuite/btcwallet/waddrmgr" + +func init() { + // For the purposes of our itest, we'll crank down the scrypt params a + // bit. + scryptN = waddrmgr.FastScryptOptions.N + scryptR = waddrmgr.FastScryptOptions.R + scryptP = waddrmgr.FastScryptOptions.P +} diff --git a/autopilot/agent.go b/autopilot/agent.go index dc8a3af91c..6fe72f98ba 100644 --- a/autopilot/agent.go +++ b/autopilot/agent.go @@ -141,6 +141,10 @@ type Agent struct { // time. chanOpenFailures chan *chanOpenFailureUpdate + // heuristicUpdates is a channel where updates from active heurstics + // will be sent. + heuristicUpdates chan *heuristicUpdate + // totalBalance is the total number of satoshis the backing wallet is // known to control at any given instance. This value will be updated // when the agent receives external balance update signals. @@ -179,6 +183,7 @@ func New(cfg Config, initialState []Channel) (*Agent, error) { balanceUpdates: make(chan *balanceUpdate, 1), nodeUpdates: make(chan *nodeUpdates, 1), chanOpenFailures: make(chan *chanOpenFailureUpdate, 1), + heuristicUpdates: make(chan *heuristicUpdate, 1), pendingOpenUpdates: make(chan *chanPendingOpenUpdate, 1), failedNodes: make(map[NodeID]struct{}), pendingConns: make(map[NodeID]struct{}), @@ -256,6 +261,13 @@ type chanPendingOpenUpdate struct{} // a previous channel open failed, and that it might be possible to try again. type chanOpenFailureUpdate struct{} +// heuristicUpdate is an update sent when one of the autopilot heuristics has +// changed, and prompts the agent to make a new attempt at opening more +// channels. +type heuristicUpdate struct { + heuristic AttachmentHeuristic +} + // chanCloseUpdate is a type of external state update that indicates that the // backing Lightning Node has closed a previously open channel. type chanCloseUpdate struct { @@ -329,6 +341,17 @@ func (a *Agent) OnChannelClose(closedChans ...lnwire.ShortChannelID) { }() } +// OnHeuristicUpdate is a method called when a heuristic has been updated, to +// trigger the agent to do a new state assessment. +func (a *Agent) OnHeuristicUpdate(h AttachmentHeuristic) { + select { + case a.heuristicUpdates <- &heuristicUpdate{ + heuristic: h, + }: + default: + } +} + // mergeNodeMaps merges the Agent's set of nodes that it already has active // channels open to, with the other sets of nodes that should be removed from // consideration during heuristic selection. This ensures that the Agent doesn't @@ -470,6 +493,12 @@ func (a *Agent) controller() { log.Debugf("Node updates received, assessing " + "need for more channels") + // Any of the deployed heuristics has been updated, check + // whether we have new channel candidates available. + case upd := <-a.heuristicUpdates: + log.Debugf("Heuristic %v updated, assessing need for "+ + "more channels", upd.heuristic.Name()) + // The agent has been signalled to exit, so we'll bail out // immediately. case <-a.quit: @@ -541,10 +570,28 @@ func (a *Agent) openChans(availableFunds btcutil.Amount, numChans uint32, connectedNodes := a.chanState.ConnectedNodes() a.chanStateMtx.Unlock() + for nID := range connectedNodes { + log.Tracef("Skipping node %x with open channel", nID[:]) + } + a.pendingMtx.Lock() + + for nID := range a.pendingOpens { + log.Tracef("Skipping node %x with pending channel open", nID[:]) + } + + for nID := range a.pendingConns { + log.Tracef("Skipping node %x with pending connection", nID[:]) + } + + for nID := range a.failedNodes { + log.Tracef("Skipping failed node %v", nID[:]) + } + nodesToSkip := mergeNodeMaps(a.pendingOpens, a.pendingConns, connectedNodes, a.failedNodes, ) + a.pendingMtx.Unlock() // Gather the set of all nodes in the graph, except those we diff --git a/autopilot/agent_test.go b/autopilot/agent_test.go index bd703c9e06..9cc9366e6f 100644 --- a/autopilot/agent_test.go +++ b/autopilot/agent_test.go @@ -330,6 +330,55 @@ func TestAgentChannelOpenSignal(t *testing.T) { } } +// TestAgentHeuristicUpdateSignal tests that upon notification about a +// heuristic update, the agent reconsults the heuristic. +func TestAgentHeuristicUpdateSignal(t *testing.T) { + t.Parallel() + + testCtx, cleanup := setup(t, nil) + defer cleanup() + + pub, err := testCtx.graph.addRandNode() + if err != nil { + t.Fatalf("unable to generate key: %v", err) + } + + // We'll send an initial "no" response to advance the agent past its + // initial check. + respondMoreChans(t, testCtx, moreChansResp{0, 0}) + + // Next we'll signal that one of the heuristcs have been updated. + testCtx.agent.OnHeuristicUpdate(testCtx.heuristic) + + // The update should trigger the agent to ask for a channel budget.so + // we'll respond that there is a budget for opening 1 more channel. + respondMoreChans(t, testCtx, + moreChansResp{ + numMore: 1, + amt: 1 * btcutil.SatoshiPerBitcoin, + }, + ) + + // At this point, the agent should now be querying the heuristic for + // scores. We'll respond. + nodeID := NewNodeID(pub) + scores := map[NodeID]*NodeScore{ + nodeID: { + NodeID: nodeID, + Score: 0.5, + }, + } + respondNodeScores(t, testCtx, scores) + + // Finally, this should result in the agent opening a channel. + chanController := testCtx.chanController.(*mockChanController) + select { + case <-chanController.openChanSignals: + case <-time.After(time.Second * 10): + t.Fatalf("channel not opened in time") + } +} + // A mockFailingChanController always fails to open a channel. type mockFailingChanController struct { } diff --git a/autopilot/betweenness_centrality.go b/autopilot/betweenness_centrality.go new file mode 100644 index 0000000000..85041864f9 --- /dev/null +++ b/autopilot/betweenness_centrality.go @@ -0,0 +1,265 @@ +package autopilot + +import ( + "fmt" + "sync" +) + +// stack is a simple int stack to help with readability of Brandes' +// betweenness centrality implementation below. +type stack struct { + stack []int +} + +func (s *stack) push(v int) { + s.stack = append(s.stack, v) +} + +func (s *stack) top() int { + return s.stack[len(s.stack)-1] +} + +func (s *stack) pop() { + s.stack = s.stack[:len(s.stack)-1] +} + +func (s *stack) empty() bool { + return len(s.stack) == 0 +} + +// queue is a simple int queue to help with readability of Brandes' +// betweenness centrality implementation below. +type queue struct { + queue []int +} + +func (q *queue) push(v int) { + q.queue = append(q.queue, v) +} + +func (q *queue) front() int { + return q.queue[0] +} + +func (q *queue) pop() { + q.queue = q.queue[1:] +} + +func (q *queue) empty() bool { + return len(q.queue) == 0 +} + +// BetweennessCentrality is a NodeMetric that calculates node betweenness +// centrality using Brandes' algorithm. Betweenness centrality for each node +// is the number of shortest paths passing trough that node, not counting +// shortest paths starting or ending at that node. This is a useful metric +// to measure control of individual nodes over the whole network. +type BetweennessCentrality struct { + // workers number of goroutines are used to parallelize + // centrality calculation. + workers int + + // centrality stores original (not normalized) centrality values for + // each node in the graph. + centrality map[NodeID]float64 + + // min is the minimum centrality in the graph. + min float64 + + // max is the maximum centrality in the graph. + max float64 +} + +// NewBetweennessCentralityMetric creates a new BetweennessCentrality instance. +// Users can specify the number of workers to use for calculating centrality. +func NewBetweennessCentralityMetric(workers int) (*BetweennessCentrality, error) { + // There should be at least one worker. + if workers < 1 { + return nil, fmt.Errorf("workers must be positive") + } + return &BetweennessCentrality{ + workers: workers, + }, nil +} + +// Name returns the name of the metric. +func (bc *BetweennessCentrality) Name() string { + return "betweeness_centrality" +} + +// betweennessCentrality is the core of Brandes' algorithm. +// We first calculate the shortest paths from the start node s to all other +// nodes with BFS, then update the betweenness centrality values by using +// Brandes' dependency trick. +// For detailed explanation please read: +// https://www.cl.cam.ac.uk/teaching/1617/MLRD/handbook/brandes.html +func betweennessCentrality(g *SimpleGraph, s int, centrality []float64) { + // pred[w] is the list of nodes that immediately precede w on a + // shortest path from s to t for each node t. + pred := make([][]int, len(g.Nodes)) + + // sigma[t] is the number of shortest paths between nodes s and t + // for each node t. + sigma := make([]int, len(g.Nodes)) + sigma[s] = 1 + + // dist[t] holds the distance between s and t for each node t. + // We initialize this to -1 (meaning infinity) for each t != s. + dist := make([]int, len(g.Nodes)) + for i := range dist { + dist[i] = -1 + } + + dist[s] = 0 + + var ( + st stack + q queue + ) + q.push(s) + + // BFS to calculate the shortest paths (sigma and pred) + // from s to t for each node t. + for !q.empty() { + v := q.front() + q.pop() + st.push(v) + + for _, w := range g.Adj[v] { + // If distance from s to w is infinity (-1) + // then set it and enqueue w. + if dist[w] < 0 { + dist[w] = dist[v] + 1 + q.push(w) + } + + // If w is on a shortest path the update + // sigma and add v to w's predecessor list. + if dist[w] == dist[v]+1 { + sigma[w] += sigma[v] + pred[w] = append(pred[w], v) + } + } + } + + // delta[v] is the ratio of the shortest paths between s and t that go + // through v and the total number of shortest paths between s and t. + // If we have delta then the betweenness centrality is simply the sum + // of delta[w] for each w != s. + delta := make([]float64, len(g.Nodes)) + + for !st.empty() { + w := st.top() + st.pop() + + // pred[w] is the list of nodes that immediately precede w on a + // shortest path from s. + for _, v := range pred[w] { + // Update delta using Brandes' equation. + delta[v] += (float64(sigma[v]) / float64(sigma[w])) * (1.0 + delta[w]) + } + + if w != s { + // As noted above centrality is simply the sum + // of delta[w] for each w != s. + centrality[w] += delta[w] + } + } +} + +// Refresh recaculates and stores centrality values. +func (bc *BetweennessCentrality) Refresh(graph ChannelGraph) error { + cache, err := NewSimpleGraph(graph) + if err != nil { + return err + } + + var wg sync.WaitGroup + work := make(chan int) + partials := make(chan []float64, bc.workers) + + // Each worker will compute a partial result. + // This partial result is a sum of centrality updates + // on roughly N / workers nodes. + worker := func() { + defer wg.Done() + partial := make([]float64, len(cache.Nodes)) + + // Consume the next node, update centrality + // parital to avoid unnecessary synchronizaton. + for node := range work { + betweennessCentrality(cache, node, partial) + } + partials <- partial + } + + // Now start the N workers. + wg.Add(bc.workers) + for i := 0; i < bc.workers; i++ { + go worker() + } + + // Distribute work amongst workers. + // Should be fair when the graph is sufficiently large. + for node := range cache.Nodes { + work <- node + } + + close(work) + wg.Wait() + close(partials) + + // Collect and sum partials for final result. + centrality := make([]float64, len(cache.Nodes)) + for partial := range partials { + for i := 0; i < len(partial); i++ { + centrality[i] += partial[i] + } + } + + // Get min/max to be able to normalize + // centrality values between 0 and 1. + bc.min = 0 + bc.max = 0 + if len(centrality) > 0 { + for _, v := range centrality { + if v < bc.min { + bc.min = v + } else if v > bc.max { + bc.max = v + } + } + } + + // Divide by two as this is an undirected graph. + bc.min /= 2.0 + bc.max /= 2.0 + + bc.centrality = make(map[NodeID]float64) + for u, value := range centrality { + // Divide by two as this is an undirected graph. + bc.centrality[cache.Nodes[u]] = value / 2.0 + } + + return nil +} + +// GetMetric returns the current centrality values for each node indexed +// by node id. +func (bc *BetweennessCentrality) GetMetric(normalize bool) map[NodeID]float64 { + // Normalization factor. + var z float64 + if (bc.max - bc.min) > 0 { + z = 1.0 / (bc.max - bc.min) + } + + centrality := make(map[NodeID]float64) + for k, v := range bc.centrality { + if normalize { + v = (v - bc.min) * z + } + centrality[k] = v + } + + return centrality +} diff --git a/autopilot/betweenness_centrality_test.go b/autopilot/betweenness_centrality_test.go new file mode 100644 index 0000000000..76ece3ceab --- /dev/null +++ b/autopilot/betweenness_centrality_test.go @@ -0,0 +1,192 @@ +package autopilot + +import ( + "fmt" + "testing" + + "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcutil" +) + +func TestBetweennessCentralityMetricConstruction(t *testing.T) { + failing := []int{-1, 0} + ok := []int{1, 10} + + for _, workers := range failing { + m, err := NewBetweennessCentralityMetric(workers) + if m != nil || err == nil { + t.Fatalf("construction must fail with <= 0 workers") + } + } + + for _, workers := range ok { + m, err := NewBetweennessCentralityMetric(workers) + if m == nil || err != nil { + t.Fatalf("construction must succeed with >= 1 workers") + } + } +} + +// Tests that empty graph results in empty centrality result. +func TestBetweennessCentralityEmptyGraph(t *testing.T) { + centralityMetric, err := NewBetweennessCentralityMetric(1) + if err != nil { + t.Fatalf("construction must succeed with positive number of workers") + } + + for _, chanGraph := range chanGraphs { + graph, cleanup, err := chanGraph.genFunc() + success := t.Run(chanGraph.name, func(t1 *testing.T) { + if err != nil { + t1.Fatalf("unable to create graph: %v", err) + } + if cleanup != nil { + defer cleanup() + } + + if err := centralityMetric.Refresh(graph); err != nil { + t.Fatalf("unexpected failure during metric refresh: %v", err) + } + + centrality := centralityMetric.GetMetric(false) + if len(centrality) > 0 { + t.Fatalf("expected empty metric, got: %v", len(centrality)) + } + + centrality = centralityMetric.GetMetric(true) + if len(centrality) > 0 { + t.Fatalf("expected empty metric, got: %v", len(centrality)) + } + + }) + if !success { + break + } + } +} + +// testGraphDesc is a helper type to describe a test graph. +type testGraphDesc struct { + nodes int + edges map[int][]int +} + +// buildTestGraph builds a test graph from a passed graph desriptor. +func buildTestGraph(t *testing.T, + graph testGraph, desc testGraphDesc) map[int]*btcec.PublicKey { + + nodes := make(map[int]*btcec.PublicKey) + + for i := 0; i < desc.nodes; i++ { + key, err := graph.addRandNode() + if err != nil { + t.Fatalf("cannot create random node") + } + + nodes[i] = key + } + + const chanCapacity = btcutil.SatoshiPerBitcoin + for u, neighbors := range desc.edges { + for _, v := range neighbors { + _, _, err := graph.addRandChannel(nodes[u], nodes[v], chanCapacity) + if err != nil { + t.Fatalf("unexpected error adding random channel: %v", err) + } + } + } + + return nodes +} + +// Test betweenness centrality calculating using an example graph. +func TestBetweennessCentralityWithNonEmptyGraph(t *testing.T) { + graphDesc := testGraphDesc{ + nodes: 9, + edges: map[int][]int{ + 0: {1, 2, 3}, + 1: {2}, + 2: {3}, + 3: {4, 5}, + 4: {5, 6, 7}, + 5: {6, 7}, + 6: {7, 8}, + }, + } + + workers := []int{1, 3, 9, 100} + + results := []struct { + normalize bool + centrality []float64 + }{ + { + normalize: true, + centrality: []float64{ + 0.2, 0.0, 0.2, 1.0, 0.4, 0.4, 7.0 / 15.0, 0.0, 0.0, + }, + }, + { + normalize: false, + centrality: []float64{ + 3.0, 0.0, 3.0, 15.0, 6.0, 6.0, 7.0, 0.0, 0.0, + }, + }, + } + + for _, numWorkers := range workers { + for _, chanGraph := range chanGraphs { + numWorkers := numWorkers + graph, cleanup, err := chanGraph.genFunc() + if err != nil { + t.Fatalf("unable to create graph: %v", err) + } + if cleanup != nil { + defer cleanup() + } + + testName := fmt.Sprintf("%v %d workers", chanGraph.name, numWorkers) + success := t.Run(testName, func(t1 *testing.T) { + centralityMetric, err := NewBetweennessCentralityMetric( + numWorkers, + ) + if err != nil { + t.Fatalf("construction must succeed with " + + "positive number of workers") + } + + graphNodes := buildTestGraph(t1, graph, graphDesc) + if err := centralityMetric.Refresh(graph); err != nil { + t1.Fatalf("error while calculating betweeness centrality") + } + for _, expected := range results { + expected := expected + centrality := centralityMetric.GetMetric(expected.normalize) + + if len(centrality) != graphDesc.nodes { + t.Fatalf("expected %v values, got: %v", + graphDesc.nodes, len(centrality)) + } + + for node, nodeCentrality := range expected.centrality { + nodeID := NewNodeID(graphNodes[node]) + calculatedCentrality, ok := centrality[nodeID] + if !ok { + t1.Fatalf("no result for node: %x (%v)", + nodeID, node) + } + + if nodeCentrality != calculatedCentrality { + t1.Errorf("centrality for node: %v "+ + "should be %v, got: %v", + node, nodeCentrality, calculatedCentrality) + } + } + } + }) + if !success { + break + } + } + } +} diff --git a/autopilot/graph.go b/autopilot/graph.go index 413aaf1371..5641bb216e 100644 --- a/autopilot/graph.go +++ b/autopilot/graph.go @@ -10,9 +10,10 @@ import ( "github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcutil" - "github.com/coreos/bbolt" "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/routing/route" ) var ( @@ -50,7 +51,7 @@ func ChannelGraphFromDatabase(db *channeldb.ChannelGraph) ChannelGraph { // channeldb.LightningNode. The wrapper method implement the autopilot.Node // interface. type dbNode struct { - tx *bbolt.Tx + tx kvdb.ReadTx node *channeldb.LightningNode } @@ -83,7 +84,7 @@ func (d dbNode) Addrs() []net.Addr { // // NOTE: Part of the autopilot.Node interface. func (d dbNode) ForEachChannel(cb func(ChannelEdge) error) error { - return d.node.ForEachChannel(d.tx, func(tx *bbolt.Tx, + return d.node.ForEachChannel(d.tx, func(tx kvdb.ReadTx, ei *channeldb.ChannelEdgeInfo, ep, _ *channeldb.ChannelEdgePolicy) error { // Skip channels for which no outgoing edge policy is available. @@ -120,7 +121,7 @@ func (d dbNode) ForEachChannel(cb func(ChannelEdge) error) error { // // NOTE: Part of the autopilot.ChannelGraph interface. func (d *databaseChannelGraph) ForEachNode(cb func(Node) error) error { - return d.db.ForEachNode(nil, func(tx *bbolt.Tx, n *channeldb.LightningNode) error { + return d.db.ForEachNode(nil, func(tx kvdb.ReadTx, n *channeldb.LightningNode) error { // We'll skip over any node that doesn't have any advertised // addresses. As we won't be able to reach them to actually @@ -145,7 +146,14 @@ func (d *databaseChannelGraph) addRandChannel(node1, node2 *btcec.PublicKey, fetchNode := func(pub *btcec.PublicKey) (*channeldb.LightningNode, error) { if pub != nil { - dbNode, err := d.db.FetchLightningNode(pub) + vertex, err := route.NewVertexFromBytes( + pub.SerializeCompressed(), + ) + if err != nil { + return nil, err + } + + dbNode, err := d.db.FetchLightningNode(nil, vertex) switch { case err == channeldb.ErrGraphNodeNotFound: fallthrough @@ -157,8 +165,9 @@ func (d *databaseChannelGraph) addRandChannel(node1, node2 *btcec.PublicKey, IP: bytes.Repeat([]byte("a"), 16), }, }, - Features: lnwire.NewFeatureVector(nil, - lnwire.GlobalFeatures), + Features: lnwire.NewFeatureVector( + nil, lnwire.Features, + ), AuthSigBytes: testSig.Serialize(), } graphNode.AddPubKey(pub) @@ -183,7 +192,9 @@ func (d *databaseChannelGraph) addRandChannel(node1, node2 *btcec.PublicKey, IP: bytes.Repeat([]byte("a"), 16), }, }, - Features: lnwire.NewFeatureVector(nil, lnwire.GlobalFeatures), + Features: lnwire.NewFeatureVector( + nil, lnwire.Features, + ), AuthSigBytes: testSig.Serialize(), } dbNode.AddPubKey(nodeKey) @@ -287,7 +298,9 @@ func (d *databaseChannelGraph) addRandNode() (*btcec.PublicKey, error) { IP: bytes.Repeat([]byte("a"), 16), }, }, - Features: lnwire.NewFeatureVector(nil, lnwire.GlobalFeatures), + Features: lnwire.NewFeatureVector( + nil, lnwire.Features, + ), AuthSigBytes: testSig.Serialize(), } dbNode.AddPubKey(nodeKey) @@ -302,7 +315,7 @@ func (d *databaseChannelGraph) addRandNode() (*btcec.PublicKey, error) { // memChannelGraph is an implementation of the autopilot.ChannelGraph backed by // an in-memory graph. type memChannelGraph struct { - graph map[NodeID]memNode + graph map[NodeID]*memNode } // A compile time assertion to ensure memChannelGraph meets the @@ -313,7 +326,7 @@ var _ ChannelGraph = (*memChannelGraph)(nil) // implementation. func newMemChannelGraph() *memChannelGraph { return &memChannelGraph{ - graph: make(map[NodeID]memNode), + graph: make(map[NodeID]*memNode), } } @@ -355,14 +368,14 @@ func (m *memChannelGraph) addRandChannel(node1, node2 *btcec.PublicKey, capacity btcutil.Amount) (*ChannelEdge, *ChannelEdge, error) { var ( - vertex1, vertex2 memNode + vertex1, vertex2 *memNode ok bool ) if node1 != nil { vertex1, ok = m.graph[NewNodeID(node1)] if !ok { - vertex1 = memNode{ + vertex1 = &memNode{ pub: node1, addrs: []net.Addr{ &net.TCPAddr{ @@ -376,7 +389,7 @@ func (m *memChannelGraph) addRandChannel(node1, node2 *btcec.PublicKey, if err != nil { return nil, nil, err } - vertex1 = memNode{ + vertex1 = &memNode{ pub: newPub, addrs: []net.Addr{ &net.TCPAddr{ @@ -389,7 +402,7 @@ func (m *memChannelGraph) addRandChannel(node1, node2 *btcec.PublicKey, if node2 != nil { vertex2, ok = m.graph[NewNodeID(node2)] if !ok { - vertex2 = memNode{ + vertex2 = &memNode{ pub: node2, addrs: []net.Addr{ &net.TCPAddr{ @@ -403,7 +416,7 @@ func (m *memChannelGraph) addRandChannel(node1, node2 *btcec.PublicKey, if err != nil { return nil, nil, err } - vertex2 = memNode{ + vertex2 = &memNode{ pub: newPub, addrs: []net.Addr{ &net.TCPAddr{ @@ -441,7 +454,7 @@ func (m *memChannelGraph) addRandNode() (*btcec.PublicKey, error) { if err != nil { return nil, err } - vertex := memNode{ + vertex := &memNode{ pub: newPub, addrs: []net.Addr{ &net.TCPAddr{ diff --git a/autopilot/interface.go b/autopilot/interface.go index b63661e312..7efeba7657 100644 --- a/autopilot/interface.go +++ b/autopilot/interface.go @@ -147,6 +147,23 @@ type AttachmentHeuristic interface { map[NodeID]*NodeScore, error) } +// NodeMetric is a common interface for all graph metrics that are not +// directly used as autopilot node scores but may be used in compositional +// heuristics or statistical information exposed to users. +type NodeMetric interface { + // Name returns the unique name of this metric. + Name() string + + // Refresh refreshes the metric values based on the current graph. + Refresh(graph ChannelGraph) error + + // GetMetric returns the latest value of this metric. Values in the + // map are per node and can be in arbitrary domain. If normalize is + // set to true, then the returned values are normalized to either + // [0, 1] or [-1, 1] depending on the metric. + GetMetric(normalize bool) map[NodeID]float64 +} + // ScoreSettable is an interface that indicates that the scores returned by the // heuristic can be mutated by an external caller. The ExternalScoreAttachment // currently implements this interface, and so should any heuristic that is diff --git a/autopilot/manager.go b/autopilot/manager.go index 3253fdaf68..88f6c37d1e 100644 --- a/autopilot/manager.go +++ b/autopilot/manager.go @@ -319,8 +319,12 @@ func (m *Manager) queryHeuristics(nodes map[NodeID]struct{}, localState bool) ( // We'll start by getting the scores from each available sub-heuristic, // in addition the current agent heuristic. + var heuristics []AttachmentHeuristic + heuristics = append(heuristics, availableHeuristics...) + heuristics = append(heuristics, m.cfg.PilotCfg.Heuristic) + report := make(HeuristicScores) - for _, h := range append(availableHeuristics, m.cfg.PilotCfg.Heuristic) { + for _, h := range heuristics { name := h.Name() // If the agent heuristic is among the simple heuristics it @@ -354,6 +358,9 @@ func (m *Manager) queryHeuristics(nodes map[NodeID]struct{}, localState bool) ( // SetNodeScores is used to set the scores of the given heuristic, if it is // active, and ScoreSettable. func (m *Manager) SetNodeScores(name string, scores map[NodeID]float64) error { + m.Lock() + defer m.Unlock() + // It must be ScoreSettable to be available for external // scores. s, ok := m.cfg.PilotCfg.Heuristic.(ScoreSettable) @@ -372,5 +379,11 @@ func (m *Manager) SetNodeScores(name string, scores map[NodeID]float64) error { return fmt.Errorf("heuristic with name %v not found", name) } + // If the autopilot agent is active, notify about the updated + // heuristic. + if m.pilot != nil { + m.pilot.OnHeuristicUpdate(m.cfg.PilotCfg.Heuristic) + } + return nil } diff --git a/autopilot/simple_graph.go b/autopilot/simple_graph.go new file mode 100644 index 0000000000..208a784e9f --- /dev/null +++ b/autopilot/simple_graph.go @@ -0,0 +1,66 @@ +package autopilot + +// SimpleGraph stores a simplifed adj graph of a channel graph to speed +// up graph processing by eliminating all unnecessary hashing and map access. +type SimpleGraph struct { + // Nodes is a map from node index to NodeID. + Nodes []NodeID + + // Adj stores nodes and neighbors in an adjacency list. + Adj [][]int +} + +// NewSimpleGraph creates a simplified graph from the current channel graph. +// Returns an error if the channel graph iteration fails due to underlying +// failure. +func NewSimpleGraph(g ChannelGraph) (*SimpleGraph, error) { + nodes := make(map[NodeID]int) + adj := make(map[int][]int) + nextIndex := 0 + + // getNodeIndex returns the integer index of the passed node. + // The returned index is then used to create a simplifed adjacency list + // where each node is identified by its index instead of its pubkey, and + // also to create a mapping from node index to node pubkey. + getNodeIndex := func(node Node) int { + key := NodeID(node.PubKey()) + nodeIndex, ok := nodes[key] + + if !ok { + nodes[key] = nextIndex + nodeIndex = nextIndex + nextIndex++ + } + + return nodeIndex + } + + // Iterate over each node and each channel and update the adj and the node + // index. + err := g.ForEachNode(func(node Node) error { + u := getNodeIndex(node) + + return node.ForEachChannel(func(edge ChannelEdge) error { + v := getNodeIndex(edge.Peer) + + adj[u] = append(adj[u], v) + return nil + }) + }) + if err != nil { + return nil, err + } + + graph := &SimpleGraph{ + Nodes: make([]NodeID, len(nodes)), + Adj: make([][]int, len(nodes)), + } + + // Fill the adj and the node index to node pubkey mapping. + for nodeID, nodeIndex := range nodes { + graph.Adj[nodeIndex] = adj[nodeIndex] + graph.Nodes[nodeIndex] = nodeID + } + + return graph, nil +} diff --git a/breacharbiter.go b/breacharbiter.go index 6584cc17bd..f454aad3be 100644 --- a/breacharbiter.go +++ b/breacharbiter.go @@ -13,14 +13,15 @@ import ( "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcutil" - "github.com/coreos/bbolt" "github.com/davecgh/go-spew/spew" "github.com/lightningnetwork/lnd/chainntnfs" "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/htlcswitch" "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/lnwallet" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" ) var ( @@ -78,7 +79,7 @@ type BreachConfig struct { // Estimator is used by the breach arbiter to determine an appropriate // fee level when generating, signing, and broadcasting sweep // transactions. - Estimator lnwallet.FeeEstimator + Estimator chainfee.Estimator // GenSweepScript generates the receiving scripts for swept outputs. GenSweepScript func() ([]byte, error) @@ -821,7 +822,7 @@ func (b *breachArbiter) handleBreachHandoff(breachEvent *ContractBreachEvent) { type breachedOutput struct { amt btcutil.Amount outpoint wire.OutPoint - witnessType input.WitnessType + witnessType input.StandardWitnessType signDesc input.SignDescriptor confHeight uint32 @@ -833,7 +834,7 @@ type breachedOutput struct { // makeBreachedOutput assembles a new breachedOutput that can be used by the // breach arbiter to construct a justice or sweep transaction. func makeBreachedOutput(outpoint *wire.OutPoint, - witnessType input.WitnessType, + witnessType input.StandardWitnessType, secondLevelScript []byte, signDescriptor *input.SignDescriptor, confHeight uint32) breachedOutput { @@ -883,9 +884,7 @@ func (bo *breachedOutput) CraftInputScript(signer input.Signer, txn *wire.MsgTx, // First, we ensure that the witness generation function has been // initialized for this breached output. - bo.witnessFunc = bo.witnessType.GenWitnessFunc( - signer, bo.SignDesc(), - ) + bo.witnessFunc = bo.witnessType.WitnessGenerator(signer, bo.SignDesc()) // Now that we have ensured that the witness generation function has // been initialized, we can proceed to execute it and generate the @@ -897,6 +896,13 @@ func (bo *breachedOutput) CraftInputScript(signer input.Signer, txn *wire.MsgTx, // must be built on top of the confirmation height before the output can be // spent. func (bo *breachedOutput) BlocksToMaturity() uint32 { + // If the output is a to_remote output we can claim, and it's of the + // confirmed type, we must wait one block before claiming it. + if bo.witnessType == input.CommitmentToRemoteConfirmed { + return 1 + } + + // All other breached outputs have no CSV delay. return 0 } @@ -953,6 +959,12 @@ func newRetributionInfo(chanPoint *wire.OutPoint, witnessType = input.CommitSpendNoDelayTweakless } + // If the local delay is non-zero, it means this output is of + // the confirmed to_remote type. + if breachInfo.LocalDelay != 0 { + witnessType = input.CommitmentToRemoteConfirmed + } + localOutput := makeBreachedOutput( &breachInfo.LocalOutpoint, witnessType, @@ -993,7 +1005,7 @@ func newRetributionInfo(chanPoint *wire.OutPoint, // Using the breachedHtlc's incoming flag, determine the // appropriate witness type that needs to be generated in order // to sweep the HTLC output. - var htlcWitnessType input.WitnessType + var htlcWitnessType input.StandardWitnessType if breachedHtlc.IsIncoming { htlcWitnessType = input.HtlcAcceptedRevoke } else { @@ -1051,32 +1063,15 @@ func (b *breachArbiter) createJusticeTx( // Grab locally scoped reference to breached output. inp := &r.breachedOutputs[i] - // First, select the appropriate estimated witness weight for + // First, determine the appropriate estimated witness weight for // the give witness type of this breached output. If the witness - // type is unrecognized, we will omit it from the transaction. - var witnessWeight int - switch inp.WitnessType() { - case input.CommitSpendNoDelayTweakless: - fallthrough - case input.CommitmentNoDelay: - witnessWeight = input.P2WKHWitnessSize - - case input.CommitmentRevoke: - witnessWeight = input.ToLocalPenaltyWitnessSize - - case input.HtlcOfferedRevoke: - witnessWeight = input.OfferedHtlcPenaltyWitnessSize - - case input.HtlcAcceptedRevoke: - witnessWeight = input.AcceptedHtlcPenaltyWitnessSize - - case input.HtlcSecondLevelRevoke: - witnessWeight = input.ToLocalPenaltyWitnessSize - - default: - brarLog.Warnf("breached output in retribution info "+ - "contains unexpected witness type: %v", - inp.WitnessType()) + // weight cannot be estimated, we will omit it from the + // transaction. + witnessWeight, _, err := inp.WitnessType().SizeUpperBound() + if err != nil { + brarLog.Warnf("could not determine witness weight "+ + "for breached output in retribution info: %v", + err) continue } weightEstimate.AddWitnessInput(witnessWeight) @@ -1135,6 +1130,7 @@ func (b *breachArbiter) sweepSpendableOutputsTxn(txWeight int64, for _, input := range inputs { txn.AddTxIn(&wire.TxIn{ PreviousOutPoint: *input.OutPoint(), + Sequence: input.BlocksToMaturity(), }) } @@ -1241,10 +1237,10 @@ func newRetributionStore(db *channeldb.DB) *retributionStore { // Add adds a retribution state to the retributionStore, which is then persisted // to disk. func (rs *retributionStore) Add(ret *retributionInfo) error { - return rs.db.Update(func(tx *bbolt.Tx) error { + return kvdb.Update(rs.db, func(tx kvdb.RwTx) error { // If this is our first contract breach, the retributionBucket // won't exist, in which case, we just create a new bucket. - retBucket, err := tx.CreateBucketIfNotExists(retributionBucket) + retBucket, err := tx.CreateTopLevelBucket(retributionBucket) if err != nil { return err } @@ -1268,8 +1264,8 @@ func (rs *retributionStore) Add(ret *retributionInfo) error { // startup and re-register for confirmation notifications. func (rs *retributionStore) Finalize(chanPoint *wire.OutPoint, finalTx *wire.MsgTx) error { - return rs.db.Update(func(tx *bbolt.Tx) error { - justiceBkt, err := tx.CreateBucketIfNotExists(justiceTxnBucket) + return kvdb.Update(rs.db, func(tx kvdb.RwTx) error { + justiceBkt, err := tx.CreateTopLevelBucket(justiceTxnBucket) if err != nil { return err } @@ -1295,8 +1291,8 @@ func (rs *retributionStore) GetFinalizedTxn( chanPoint *wire.OutPoint) (*wire.MsgTx, error) { var finalTxBytes []byte - if err := rs.db.View(func(tx *bbolt.Tx) error { - justiceBkt := tx.Bucket(justiceTxnBucket) + if err := kvdb.View(rs.db, func(tx kvdb.ReadTx) error { + justiceBkt := tx.ReadBucket(justiceTxnBucket) if justiceBkt == nil { return nil } @@ -1329,8 +1325,8 @@ func (rs *retributionStore) GetFinalizedTxn( // that has already been breached. func (rs *retributionStore) IsBreached(chanPoint *wire.OutPoint) (bool, error) { var found bool - err := rs.db.View(func(tx *bbolt.Tx) error { - retBucket := tx.Bucket(retributionBucket) + err := kvdb.View(rs.db, func(tx kvdb.ReadTx) error { + retBucket := tx.ReadBucket(retributionBucket) if retBucket == nil { return nil } @@ -1354,8 +1350,8 @@ func (rs *retributionStore) IsBreached(chanPoint *wire.OutPoint) (bool, error) { // Remove removes a retribution state and finalized justice transaction by // channel point from the retribution store. func (rs *retributionStore) Remove(chanPoint *wire.OutPoint) error { - return rs.db.Update(func(tx *bbolt.Tx) error { - retBucket := tx.Bucket(retributionBucket) + return kvdb.Update(rs.db, func(tx kvdb.RwTx) error { + retBucket := tx.ReadWriteBucket(retributionBucket) // We return an error if the bucket is not already created, // since normal operation of the breach arbiter should never try @@ -1381,7 +1377,7 @@ func (rs *retributionStore) Remove(chanPoint *wire.OutPoint) error { // If we have not finalized this channel breach, we can exit // early. - justiceBkt := tx.Bucket(justiceTxnBucket) + justiceBkt := tx.ReadWriteBucket(justiceTxnBucket) if justiceBkt == nil { return nil } @@ -1393,10 +1389,10 @@ func (rs *retributionStore) Remove(chanPoint *wire.OutPoint) error { // ForAll iterates through all stored retributions and executes the passed // callback function on each retribution. func (rs *retributionStore) ForAll(cb func(*retributionInfo) error) error { - return rs.db.View(func(tx *bbolt.Tx) error { + return kvdb.View(rs.db, func(tx kvdb.ReadTx) error { // If the bucket does not exist, then there are no pending // retributions. - retBucket := tx.Bucket(retributionBucket) + retBucket := tx.ReadBucket(retributionBucket) if retBucket == nil { return nil } @@ -1555,7 +1551,7 @@ func (bo *breachedOutput) Decode(r io.Reader) error { if _, err := io.ReadFull(r, scratch[:2]); err != nil { return err } - bo.witnessType = input.WitnessType( + bo.witnessType = input.StandardWitnessType( binary.BigEndian.Uint16(scratch[:2]), ) diff --git a/breacharbiter_test.go b/breacharbiter_test.go index 7f471ea2ef..72448ab54c 100644 --- a/breacharbiter_test.go +++ b/breacharbiter_test.go @@ -31,6 +31,7 @@ import ( "github.com/lightningnetwork/lnd/keychain" "github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lnwallet" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/shachain" ) @@ -1675,7 +1676,7 @@ func createTestArbiter(t *testing.T, contractBreaches chan *ContractBreachEvent, ba := newBreachArbiter(&BreachConfig{ CloseLink: func(_ *wire.OutPoint, _ htlcswitch.ChannelCloseType) {}, DB: db, - Estimator: lnwallet.NewStaticFeeEstimator(12500, 0), + Estimator: chainfee.NewStaticEstimator(12500, 0), GenSweepScript: func() ([]byte, error) { return nil, nil }, ContractBreaches: contractBreaches, Signer: signer, @@ -1798,7 +1799,7 @@ func createInitChannels(revocationWindow int) (*lnwallet.LightningChannel, *lnwa aliceCommitTx, bobCommitTx, err := lnwallet.CreateCommitmentTxns( channelBal, channelBal, &aliceCfg, &bobCfg, aliceCommitPoint, - bobCommitPoint, *fundingTxIn, true, + bobCommitPoint, *fundingTxIn, channeldb.SingleFunderTweaklessBit, ) if err != nil { return nil, nil, nil, err @@ -1824,7 +1825,7 @@ func createInitChannels(revocationWindow int) (*lnwallet.LightningChannel, *lnwa return nil, nil, nil, err } - estimator := lnwallet.NewStaticFeeEstimator(12500, 0) + estimator := chainfee.NewStaticEstimator(12500, 0) feePerKw, err := estimator.EstimateFeePerKW(1) if err != nil { return nil, nil, nil, err @@ -1865,7 +1866,7 @@ func createInitChannels(revocationWindow int) (*lnwallet.LightningChannel, *lnwa IdentityPub: aliceKeyPub, FundingOutpoint: *prevOut, ShortChannelID: shortChanID, - ChanType: channeldb.SingleFunderTweakless, + ChanType: channeldb.SingleFunderTweaklessBit, IsInitiator: true, Capacity: channelCapacity, RemoteCurrentRevocation: bobCommitPoint, @@ -1883,7 +1884,7 @@ func createInitChannels(revocationWindow int) (*lnwallet.LightningChannel, *lnwa IdentityPub: bobKeyPub, FundingOutpoint: *prevOut, ShortChannelID: shortChanID, - ChanType: channeldb.SingleFunderTweakless, + ChanType: channeldb.SingleFunderTweaklessBit, IsInitiator: false, Capacity: channelCapacity, RemoteCurrentRevocation: aliceCommitPoint, diff --git a/brontide/noise.go b/brontide/noise.go index 7d5f42d4ed..f7a6685589 100644 --- a/brontide/noise.go +++ b/brontide/noise.go @@ -47,7 +47,7 @@ const ( ) var ( - // ErrMaxMessageLengthExceeded is returned a message to be written to + // ErrMaxMessageLengthExceeded is returned when a message to be written to // the cipher session exceeds the maximum allowed message payload. ErrMaxMessageLengthExceeded = errors.New("the generated payload exceeds " + "the max allowed message length of (2^16)-1") @@ -209,7 +209,7 @@ type symmetricState struct { handshakeDigest [32]byte } -// mixKey is implements a basic HKDF-based key ratchet. This method is called +// mixKey implements a basic HKDF-based key ratchet. This method is called // with the result of each DH output generated during the handshake process. // The first 32 bytes extract from the HKDF reader is the next chaining key, // then latter 32 bytes become the temp secret key using within any future AEAD @@ -315,7 +315,7 @@ func newHandshakeState(initiator bool, prologue []byte, h.InitializeSymmetric([]byte(protocolName)) h.mixHash(prologue) - // In Noise_XK, then initiator should know the responder's static + // In Noise_XK, the initiator should know the responder's static // public key, therefore we include the responder's static key in the // handshake digest. If the initiator gets this value wrong, then the // handshake will fail. @@ -330,7 +330,7 @@ func newHandshakeState(initiator bool, prologue []byte, // EphemeralGenerator is a functional option that allows callers to substitute // a custom function for use when generating ephemeral keys for ActOne or -// ActTwo. The function closure return by this function can be passed into +// ActTwo. The function closure returned by this function can be passed into // NewBrontideMachine as a function option parameter. func EphemeralGenerator(gen func() (*btcec.PrivateKey, error)) func(*Machine) { return func(m *Machine) { @@ -437,8 +437,7 @@ const ( // ActThreeSize is the size of the packet sent from initiator to // responder in ActThree. The packet consists of a handshake version, // the initiators static key encrypted with strong forward secrecy and - // a 16-byte poly1035 - // tag. + // a 16-byte poly1035 tag. // // 1 + 33 + 16 + 16 ActThreeSize = 66 @@ -519,7 +518,7 @@ func (b *Machine) RecvActOne(actOne [ActOneSize]byte) error { } // GenActTwo generates the second packet (act two) to be sent from the -// responder to the initiator. The packet for act two is identify to that of +// responder to the initiator. The packet for act two is identical to that of // act one, but then results in a different ECDH operation between the // initiator's and responder's ephemeral keys. // @@ -871,3 +870,23 @@ func (b *Machine) ReadBody(r io.Reader, buf []byte) ([]byte, error) { // TODO(roasbeef): modify to let pass in slice return b.recvCipher.Decrypt(nil, nil, buf) } + +// SetCurveToNil sets the 'Curve' parameter to nil on the handshakeState keys. +// This allows us to log the Machine object without spammy log messages. +func (b *Machine) SetCurveToNil() { + if b.localStatic != nil { + b.localStatic.Curve = nil + } + + if b.localEphemeral != nil { + b.localEphemeral.Curve = nil + } + + if b.remoteStatic != nil { + b.remoteStatic.Curve = nil + } + + if b.remoteEphemeral != nil { + b.remoteEphemeral.Curve = nil + } +} diff --git a/build/log.go b/build/log.go index 31be5d4d94..e8ce253a6e 100644 --- a/build/log.go +++ b/build/log.go @@ -1,7 +1,9 @@ package build import ( + "fmt" "io" + "strings" "github.com/btcsuite/btclog" ) @@ -93,3 +95,106 @@ func NewSubLogger(subsystem string, // For any other configurations, we'll disable logging. return btclog.Disabled } + +// SubLoggers is a type that holds a map of subsystem loggers keyed by their +// subsystem name. +type SubLoggers map[string]btclog.Logger + +// LeveledSubLogger provides the ability to retrieve the subsystem loggers of +// a logger and set their log levels individually or all at once. +type LeveledSubLogger interface { + // SubLoggers returns the map of all registered subsystem loggers. + SubLoggers() SubLoggers + + // SupportedSubsystems returns a slice of strings containing the names + // of the supported subsystems. Should ideally correspond to the keys + // of the subsystem logger map and be sorted. + SupportedSubsystems() []string + + // SetLogLevel assigns an individual subsystem logger a new log level. + SetLogLevel(subsystemID string, logLevel string) + + // SetLogLevels assigns all subsystem loggers the same new log level. + SetLogLevels(logLevel string) +} + +// ParseAndSetDebugLevels attempts to parse the specified debug level and set +// the levels accordingly on the given logger. An appropriate error is returned +// if anything is invalid. +func ParseAndSetDebugLevels(level string, logger LeveledSubLogger) error { + // When the specified string doesn't have any delimiters, treat it as + // the log level for all subsystems. + if !strings.Contains(level, ",") && !strings.Contains(level, "=") { + // Validate debug log level. + if !validLogLevel(level) { + str := "the specified debug level [%v] is invalid" + return fmt.Errorf(str, level) + } + + // Change the logging level for all subsystems. + logger.SetLogLevels(level) + + return nil + } + + // Split the specified string into subsystem/level pairs while detecting + // issues and update the log levels accordingly. + for _, logLevelPair := range strings.Split(level, ",") { + if !strings.Contains(logLevelPair, "=") { + str := "the specified debug level contains an " + + "invalid subsystem/level pair [%v]" + return fmt.Errorf(str, logLevelPair) + } + + // Extract the specified subsystem and log level. + fields := strings.Split(logLevelPair, "=") + if len(fields) != 2 { + str := "the specified debug level has an invalid " + + "format [%v] -- use format subsystem1=level1," + + "subsystem2=level2" + return fmt.Errorf(str, logLevelPair) + } + subsysID, logLevel := fields[0], fields[1] + subLoggers := logger.SubLoggers() + + // Validate subsystem. + if _, exists := subLoggers[subsysID]; !exists { + str := "the specified subsystem [%v] is invalid -- " + + "supported subsystems are %v" + return fmt.Errorf( + str, subsysID, logger.SupportedSubsystems(), + ) + } + + // Validate log level. + if !validLogLevel(logLevel) { + str := "the specified debug level [%v] is invalid" + return fmt.Errorf(str, logLevel) + } + + logger.SetLogLevel(subsysID, logLevel) + } + + return nil +} + +// validLogLevel returns whether or not logLevel is a valid debug log level. +func validLogLevel(logLevel string) bool { + switch logLevel { + case "trace": + fallthrough + case "debug": + fallthrough + case "info": + fallthrough + case "warn": + fallthrough + case "error": + fallthrough + case "critical": + fallthrough + case "off": + return true + } + return false +} diff --git a/build/logrotator.go b/build/logrotator.go new file mode 100644 index 0000000000..734a3bd7e5 --- /dev/null +++ b/build/logrotator.go @@ -0,0 +1,151 @@ +package build + +import ( + "fmt" + "io" + "os" + "path/filepath" + "sort" + + "github.com/btcsuite/btclog" + "github.com/jrick/logrotate/rotator" +) + +// RotatingLogWriter is a wrapper around the LogWriter that supports log file +// rotation. +type RotatingLogWriter struct { + // GenSubLogger is a function that returns a new logger for a subsystem + // belonging to the current RotatingLogWriter. + GenSubLogger func(string) btclog.Logger + + logWriter *LogWriter + + backendLog *btclog.Backend + + logRotator *rotator.Rotator + + subsystemLoggers SubLoggers +} + +// A compile time check to ensure RotatingLogWriter implements the +// LeveledSubLogger interface. +var _ LeveledSubLogger = (*RotatingLogWriter)(nil) + +// NewRotatingLogWriter creates a new file rotating log writer. +// +// NOTE: `InitLogRotator` must be called to set up log rotation after creating +// the writer. +func NewRotatingLogWriter() *RotatingLogWriter { + logWriter := &LogWriter{} + backendLog := btclog.NewBackend(logWriter) + return &RotatingLogWriter{ + GenSubLogger: backendLog.Logger, + logWriter: logWriter, + backendLog: backendLog, + subsystemLoggers: SubLoggers{}, + } +} + +// RegisterSubLogger registers a new subsystem logger. +func (r *RotatingLogWriter) RegisterSubLogger(subsystem string, + logger btclog.Logger) { + + r.subsystemLoggers[subsystem] = logger +} + +// InitLogRotator initializes the log file rotator to write logs to logFile and +// create roll files in the same directory. It should be called as early on +// startup and possible and must be closed on shutdown by calling `Close`. +func (r *RotatingLogWriter) InitLogRotator(logFile string, maxLogFileSize int, + maxLogFiles int) error { + + logDir, _ := filepath.Split(logFile) + err := os.MkdirAll(logDir, 0700) + if err != nil { + return fmt.Errorf("failed to create log directory: %v", err) + } + r.logRotator, err = rotator.New( + logFile, int64(maxLogFileSize*1024), false, maxLogFiles, + ) + if err != nil { + return fmt.Errorf("failed to create file rotator: %v", err) + } + + // Run rotator as a goroutine now but make sure we catch any errors + // that happen in case something with the rotation goes wrong during + // runtime (like running out of disk space or not being allowed to + // create a new logfile for whatever reason). + pr, pw := io.Pipe() + go func() { + err := r.logRotator.Run(pr) + if err != nil { + _, _ = fmt.Fprintf(os.Stderr, + "failed to run file rotator: %v\n", err) + } + }() + + r.logWriter.RotatorPipe = pw + return nil +} + +// Close closes the underlying log rotator if it has already been created. +func (r *RotatingLogWriter) Close() error { + if r.logRotator != nil { + return r.logRotator.Close() + } + return nil +} + +// SubLoggers returns all currently registered subsystem loggers for this log +// writer. +// +// NOTE: This is part of the LeveledSubLogger interface. +func (r *RotatingLogWriter) SubLoggers() SubLoggers { + return r.subsystemLoggers +} + +// SupportedSubsystems returns a sorted string slice of all keys in the +// subsystems map, corresponding to the names of the subsystems. +// +// NOTE: This is part of the LeveledSubLogger interface. +func (r *RotatingLogWriter) SupportedSubsystems() []string { + // Convert the subsystemLoggers map keys to a string slice. + subsystems := make([]string, 0, len(r.subsystemLoggers)) + for subsysID := range r.subsystemLoggers { + subsystems = append(subsystems, subsysID) + } + + // Sort the subsystems for stable display. + sort.Strings(subsystems) + return subsystems +} + +// SetLogLevel sets the logging level for provided subsystem. Invalid +// subsystems are ignored. Uninitialized subsystems are dynamically created as +// needed. +// +// NOTE: This is part of the LeveledSubLogger interface. +func (r *RotatingLogWriter) SetLogLevel(subsystemID string, logLevel string) { + // Ignore invalid subsystems. + logger, ok := r.subsystemLoggers[subsystemID] + if !ok { + return + } + + // Defaults to info if the log level is invalid. + level, _ := btclog.LevelFromString(logLevel) + logger.SetLevel(level) +} + +// SetLogLevels sets the log level for all subsystem loggers to the passed +// level. It also dynamically creates the subsystem loggers as needed, so it +// can be used to initialize the logging system. +// +// NOTE: This is part of the LeveledSubLogger interface. +func (r *RotatingLogWriter) SetLogLevels(logLevel string) { + // Configure all sub-systems with the new logging level. Dynamically + // create loggers as needed. + for subsystemID := range r.subsystemLoggers { + r.SetLogLevel(subsystemID, logLevel) + } +} diff --git a/build/prefix_log.go b/build/prefix_log.go new file mode 100644 index 0000000000..926b3f58f3 --- /dev/null +++ b/build/prefix_log.go @@ -0,0 +1,112 @@ +package build + +import "github.com/btcsuite/btclog" + +// PrefixLog is a pass-through logger that adds a prefix to every logged line. +type PrefixLog struct { + log btclog.Logger + prefix string +} + +// NewPrefixLog instantiates a new prefixed logger. +func NewPrefixLog(prefix string, log btclog.Logger) *PrefixLog { + return &PrefixLog{ + prefix: prefix, + log: log, + } +} + +// addFormatPrefix prepends the prefix to a format string. +func (p *PrefixLog) addFormatPrefix(s string) string { + return p.prefix + " " + s +} + +// addArgsPrefix prepends the prefix to a list of arguments. +func (p *PrefixLog) addArgsPrefix(args []interface{}) []interface{} { + return append([]interface{}{p.prefix}, args...) +} + +// Tracef formats message according to format specifier and writes to to log +// with LevelTrace. +func (p *PrefixLog) Tracef(format string, params ...interface{}) { + p.log.Tracef(p.addFormatPrefix(format), params...) +} + +// Debugf formats message according to format specifier and writes to log with +// LevelDebug. +func (p *PrefixLog) Debugf(format string, params ...interface{}) { + p.log.Debugf(p.addFormatPrefix(format), params...) +} + +// Infof formats message according to format specifier and writes to log with +// LevelInfo. +func (p *PrefixLog) Infof(format string, params ...interface{}) { + p.log.Infof(p.addFormatPrefix(format), params...) +} + +// Warnf formats message according to format specifier and writes to to log with +// LevelWarn. +func (p *PrefixLog) Warnf(format string, params ...interface{}) { + p.log.Warnf(p.addFormatPrefix(format), params...) +} + +// Errorf formats message according to format specifier and writes to to log +// with LevelError. +func (p *PrefixLog) Errorf(format string, params ...interface{}) { + p.log.Errorf(p.addFormatPrefix(format), params...) +} + +// Criticalf formats message according to format specifier and writes to log +// with LevelCritical. +func (p *PrefixLog) Criticalf(format string, params ...interface{}) { + p.log.Criticalf(p.addFormatPrefix(format), params...) +} + +// Trace formats message using the default formats for its operands and writes +// to log with LevelTrace. +func (p *PrefixLog) Trace(v ...interface{}) { + p.log.Trace(p.addArgsPrefix(v)...) +} + +// Debug formats message using the default formats for its operands and writes +// to log with LevelDebug. +func (p *PrefixLog) Debug(v ...interface{}) { + p.log.Debug(p.addArgsPrefix(v)...) +} + +// Info formats message using the default formats for its operands and writes to +// log with LevelInfo. +func (p *PrefixLog) Info(v ...interface{}) { + p.log.Info(p.addArgsPrefix(v)...) +} + +// Warn formats message using the default formats for its operands and writes to +// log with LevelWarn. +func (p *PrefixLog) Warn(v ...interface{}) { + p.log.Warn(p.addArgsPrefix(v)...) +} + +// Error formats message using the default formats for its operands and writes +// to log with LevelError. +func (p *PrefixLog) Error(v ...interface{}) { + p.log.Error(p.addArgsPrefix(v)...) +} + +// Critical formats message using the default formats for its operands and +// writes to log with LevelCritical. +func (p *PrefixLog) Critical(v ...interface{}) { + p.log.Critical(p.addArgsPrefix(v)...) +} + +// Level returns the current logging level. +func (p *PrefixLog) Level() btclog.Level { + return p.log.Level() +} + +// SetLevel changes the logging level to the passed level. +func (p *PrefixLog) SetLevel(level btclog.Level) { + p.log.SetLevel(level) +} + +// Assert that PrefixLog fulfills the btclog.Logger interface. +var _ btclog.Logger = &PrefixLog{} diff --git a/build/release/README.md b/build/release/README.md index b90a2dc4f7..03c72496eb 100644 --- a/build/release/README.md +++ b/build/release/README.md @@ -19,8 +19,7 @@ the release binaries following these steps: 1. `git clone https://github.com/Groestlcoin/lnd.git` 2. `cd lnd` -3. `./build/release/release.sh # is the name of the next - release/tag` +3. `make release tag= # is the name of the next release/tag` This will then create a directory of the form `lnd-` containing archives of the release binaries for each supported operating system and architecture, @@ -64,7 +63,7 @@ and `go` (matching the same version used in the release): release with `git checkout `. 7. Proceed to verify the tag with `git verify-tag ` and compile the binaries from source for the intended operating system and architecture with - `LNDBUILDSYS=OS-ARCH ./build/release/release.sh `. + `make release sys=OS-ARCH tag=`. 8. Extract the archive found in the `lnd-` directory created by the release script and recompute the `SHA256` hash of the release binaries (lnd and lncli) with `shasum -a 256 `. These should match __exactly__ diff --git a/build/release/release.sh b/build/release/release.sh index bf78cb3e14..ca650180ed 100755 --- a/build/release/release.sh +++ b/build/release/release.sh @@ -9,102 +9,152 @@ set -e -# If no tag specified, use date + version otherwise use tag. -if [[ $1x = x ]]; then - DATE=`date +%Y%m%d` - VERSION="01" - TAG=$DATE-$VERSION -else - TAG=$1 -fi +LND_VERSION_REGEX="lnd version (.+) commit" +PKG="github.com/lightningnetwork/lnd" +PACKAGE=lnd -go mod vendor -tar -cvzf vendor.tar.gz vendor +# green prints one line of green text (if the terminal supports it). +function green() { + echo -e "\e[0;32m${1}\e[0m" +} + +# red prints one line of red text (if the terminal supports it). +function red() { + echo -e "\e[0;31m${1}\e[0m" +} + +# check_tag_correct makes sure the given git tag is checked out and the git tree +# is not dirty. +# arguments: +function check_tag_correct() { + local tag=$1 + + # If a tag is specified, ensure that that tag is present and checked out. + if [[ $tag != $(git describe) ]]; then + red "tag $tag not checked out" + exit 1 + fi + + # Build lnd to extract version. + go build ${PKG}/cmd/lnd + + # Extract version command output. + lnd_version_output=$(./lnd --version) + + # Use a regex to isolate the version string. + if [[ $lnd_version_output =~ $LND_VERSION_REGEX ]]; then + # Prepend 'v' to match git tag naming scheme. + lnd_version="v${BASH_REMATCH[1]}" + green "version: $lnd_version" + + # If tag contains a release candidate suffix, append this suffix to the + # lnd reported version before we compare. + RC_REGEX="-rc[0-9]+$" + if [[ $tag =~ $RC_REGEX ]]; then + lnd_version+=${BASH_REMATCH[0]} + fi -PACKAGE=lnd -MAINDIR=$PACKAGE-$TAG -mkdir -p $MAINDIR - -cp vendor.tar.gz $MAINDIR/ -rm vendor.tar.gz -rm -r vendor - -PACKAGESRC="$MAINDIR/$PACKAGE-source-$TAG.tar" -git archive -o $PACKAGESRC HEAD -gzip -f $PACKAGESRC > "$PACKAGESRC.gz" - -cd $MAINDIR - -# If LNDBUILDSYS is set the default list is ignored. Useful to release -# for a subset of systems/architectures. -SYS=${LNDBUILDSYS:-" - darwin-386 - darwin-amd64 - dragonfly-amd64 - freebsd-386 - freebsd-amd64 - freebsd-arm - illumos-amd64 - linux-386 - linux-amd64 - linux-armv6 - linux-armv7 - linux-arm64 - linux-ppc64 - linux-ppc64le - linux-mips - linux-mipsle - linux-mips64 - linux-mips64le - linux-s390x - netbsd-386 - netbsd-amd64 - netbsd-arm - netbsd-arm64 - openbsd-386 - openbsd-amd64 - openbsd-arm - openbsd-arm64 - solaris-amd64 - windows-386 - windows-amd64 - windows-arm -"} - -# Use the first element of $GOPATH in the case where GOPATH is a list -# (something that is totally allowed). -PKG="github.com/lightningnetwork/lnd" -COMMIT=$(git describe --abbrev=40 --dirty) -COMMITFLAGS="-X $PKG/build.Commit=$COMMIT" - -for i in $SYS; do - OS=$(echo $i | cut -f1 -d-) - ARCH=$(echo $i | cut -f2 -d-) - ARM= - - if [[ $ARCH = "armv6" ]]; then - ARCH=arm - ARM=6 - elif [[ $ARCH = "armv7" ]]; then - ARCH=arm - ARM=7 + # Match git tag with lnd version. + if [[ $tag != "${lnd_version}" ]]; then + red "lnd version $lnd_version does not match tag $tag" + exit 1 + fi + else + red "malformed lnd version output" + exit 1 + fi +} + +# build_release builds the actual release binaries. +# arguments: +function build_release() { + local tag=$1 + local sys=$2 + local buildtags=$3 + local ldflags=$4 + + green " - Packaging vendor" + go mod vendor + tar -czf vendor.tar.gz vendor + + maindir=$PACKAGE-$tag + mkdir -p $maindir + + cp vendor.tar.gz $maindir/ + rm vendor.tar.gz + rm -r vendor + + package_source="${maindir}/${PACKAGE}-source-${tag}.tar" + git archive -o "${package_source}" HEAD + gzip -f "${package_source}" >"${package_source}.gz" + + cd "${maindir}" + + for i in $sys; do + os=$(echo $i | cut -f1 -d-) + arch=$(echo $i | cut -f2 -d-) + arm= + + if [[ $arch == "armv6" ]]; then + arch=arm + arm=6 + elif [[ $arch == "armv7" ]]; then + arch=arm + arm=7 fi - mkdir $PACKAGE-$i-$TAG - cd $PACKAGE-$i-$TAG + dir="${PACKAGE}-${i}-${tag}" + mkdir "${dir}" + pushd "${dir}" - echo "Building:" $OS $ARCH $ARM - env CGO_ENABLED=0 GOOS=$OS GOARCH=$ARCH GOARM=$ARM go build -v -trimpath -ldflags="-s -w -buildid= $COMMITFLAGS" -tags="autopilotrpc signrpc walletrpc chainrpc invoicesrpc routerrpc watchtowerrpc" github.com/lightningnetwork/lnd/cmd/lnd - env CGO_ENABLED=0 GOOS=$OS GOARCH=$ARCH GOARM=$ARM go build -v -trimpath -ldflags="-s -w -buildid= $COMMITFLAGS" -tags="autopilotrpc invoicesrpc walletrpc routerrpc watchtowerrpc" github.com/lightningnetwork/lnd/cmd/lncli - cd .. + green " - Building: ${os} ${arch} ${arm} with build tags '${buildtags}'" + env CGO_ENABLED=0 GOOS=$os GOARCH=$arch GOARM=$arm go build -v -trimpath -ldflags="${ldflags}" -tags="${buildtags}" ${PKG}/cmd/lnd + env CGO_ENABLED=0 GOOS=$os GOARCH=$arch GOARM=$arm go build -v -trimpath -ldflags="${ldflags}" -tags="${buildtags}" ${PKG}/cmd/lncli + popd - if [[ $OS = "windows" ]]; then - zip -r $PACKAGE-$i-$TAG.zip $PACKAGE-$i-$TAG + if [[ $os == "windows" ]]; then + zip -r "${dir}.zip" "${dir}" else - tar -cvzf $PACKAGE-$i-$TAG.tar.gz $PACKAGE-$i-$TAG + tar -cvzf "${dir}.tar.gz" "${dir}" fi - rm -r $PACKAGE-$i-$TAG -done + rm -r "${dir}" + done + + shasum -a 256 * >manifest-$tag.txt +} + +# usage prints the usage of the whole script. +function usage() { + red "Usage: " + red "release.sh check-tag " + red "release.sh build-release " +} + +# Whatever sub command is passed in, we need at least 2 arguments. +if [ "$#" -lt 2 ]; then + usage + exit 1 +fi -shasum -a 256 * > manifest-$TAG.txt +# Extract the sub command and remove it from the list of parameters by shifting +# them to the left. +SUBCOMMAND=$1 +shift + +# Call the function corresponding to the specified sub command or print the +# usage if the sub command was not found. +case $SUBCOMMAND in +check-tag) + green "Checking if version tag exists" + check_tag_correct "$@" + ;; +build-release) + green "Building release" + build_release "$@" + ;; +*) + usage + exit 1 + ;; +esac diff --git a/build/version.go b/build/version.go index 48e734711b..13422a41e1 100644 --- a/build/version.go +++ b/build/version.go @@ -6,61 +6,85 @@ package build import ( - "bytes" "fmt" "strings" ) -// Commit stores the current commit hash of this build, this should be set using -// the -ldflags during compilation. -var Commit string +var ( + // Commit stores the current commit of this build, which includes the + // most recent tag, the number of commits since that tag (if non-zero), + // the commit hash, and a dirty marker. This should be set using the + // -ldflags during compilation. + Commit string -// semanticAlphabet -const semanticAlphabet = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz-" + // CommitHash stores the current commit hash of this build, this should + // be set using the -ldflags during compilation. + CommitHash string + + // RawTags contains the raw set of build tags, separated by commas. This + // should be set using -ldflags during compilation. + RawTags string + + // GoVersion stores the go version that the executable was compiled + // with. This hsould be set using -ldflags during compilation. + GoVersion string +) + +// semanticAlphabet is the set of characters that are permitted for use in an +// AppPreRelease. +const semanticAlphabet = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz-." // These constants define the application version and follow the semantic // versioning 2.0.0 spec (http://semver.org/). const ( - appMajor uint = 0 - appMinor uint = 8 - appPatch uint = 0 + // AppMajor defines the major version of this binary. + AppMajor uint = 0 - // appPreRelease MUST only contain characters from semanticAlphabet + // AppMinor defines the minor version of this binary. + AppMinor uint = 10 + + // AppPatch defines the application patch for this binary. + AppPatch uint = 0 + + // AppPreRelease MUST only contain characters from semanticAlphabet // per the semantic versioning spec. - appPreRelease = "beta" + AppPreRelease = "grs" ) +func init() { + // Assert that AppPreRelease is valid according to the semantic + // versioning guidelines for pre-release version and build metadata + // strings. In particular it MUST only contain characters in + // semanticAlphabet. + for _, r := range AppPreRelease { + if !strings.ContainsRune(semanticAlphabet, r) { + panic(fmt.Errorf("rune: %v is not in the semantic "+ + "alphabet", r)) + } + } +} + // Version returns the application version as a properly formed string per the // semantic versioning 2.0.0 spec (http://semver.org/). func Version() string { // Start with the major, minor, and patch versions. - version := fmt.Sprintf("%d.%d.%d", appMajor, appMinor, appPatch) - - // Append pre-release version if there is one. The hyphen called for - // by the semantic versioning spec is automatically appended and should - // not be contained in the pre-release string. The pre-release version - // is not appended if it contains invalid characters. - preRelease := normalizeVerString(appPreRelease) - if preRelease != "" { - version = fmt.Sprintf("%s-%s", version, preRelease) - } + version := fmt.Sprintf("%d.%d.%d", AppMajor, AppMinor, AppPatch) - // Append commit hash of current build to version. - version = fmt.Sprintf("%s commit=%s", version, Commit) + // Append pre-release version if there is one. The hyphen called for by + // the semantic versioning spec is automatically appended and should not + // be contained in the pre-release string. + if AppPreRelease != "" { + version = fmt.Sprintf("%s-%s", version, AppPreRelease) + } return version } -// normalizeVerString returns the passed string stripped of all characters which -// are not valid according to the semantic versioning guidelines for pre-release -// version and build metadata strings. In particular they MUST only contain -// characters in semanticAlphabet. -func normalizeVerString(str string) string { - var result bytes.Buffer - for _, r := range str { - if strings.ContainsRune(semanticAlphabet, r) { - result.WriteRune(r) - } +// Tags returns the list of build tags that were compiled into the executable. +func Tags() []string { + if len(RawTags) == 0 { + return nil } - return result.String() + + return strings.Split(RawTags, ",") } diff --git a/cert/go.mod b/cert/go.mod new file mode 100644 index 0000000000..29aa7fe7ca --- /dev/null +++ b/cert/go.mod @@ -0,0 +1,3 @@ +module github.com/lightningnetwork/lnd/cert + +go 1.13 diff --git a/cert/selfsigned.go b/cert/selfsigned.go new file mode 100644 index 0000000000..244d64b798 --- /dev/null +++ b/cert/selfsigned.go @@ -0,0 +1,273 @@ +package cert + +import ( + "bytes" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "io/ioutil" + "math/big" + "net" + "os" + "time" +) + +const ( + // DefaultAutogenValidity is the default validity of a self-signed + // certificate. The value corresponds to 14 months + // (14 months * 30 days * 24 hours). + DefaultAutogenValidity = 14 * 30 * 24 * time.Hour +) + +var ( + // End of ASN.1 time. + endOfTime = time.Date(2049, 12, 31, 23, 59, 59, 0, time.UTC) + + // Max serial number. + serialNumberLimit = new(big.Int).Lsh(big.NewInt(1), 128) +) + +// ipAddresses returns the parserd IP addresses to use when creating the TLS +// certificate. +func ipAddresses(tlsExtraIPs []string) ([]net.IP, error) { + // Collect the host's IP addresses, including loopback, in a slice. + ipAddresses := []net.IP{net.ParseIP("127.0.0.1"), net.ParseIP("::1")} + + // addIP appends an IP address only if it isn't already in the slice. + addIP := func(ipAddr net.IP) { + for _, ip := range ipAddresses { + if ip.Equal(ipAddr) { + return + } + } + ipAddresses = append(ipAddresses, ipAddr) + } + + // Add all the interface IPs that aren't already in the slice. + addrs, err := net.InterfaceAddrs() + if err != nil { + return nil, err + } + for _, a := range addrs { + ipAddr, _, err := net.ParseCIDR(a.String()) + if err == nil { + addIP(ipAddr) + } + } + + // Add extra IPs to the slice. + for _, ip := range tlsExtraIPs { + ipAddr := net.ParseIP(ip) + if ipAddr != nil { + addIP(ipAddr) + } + } + + return ipAddresses, nil +} + +// dnsNames returns the host and DNS names to use when creating the TLS +// ceftificate. +func dnsNames(tlsExtraDomains []string) (string, []string) { + // Collect the host's names into a slice. + host, err := os.Hostname() + if err != nil { + // Nothing much we can do here, other than falling back to + // localhost as fallback. A hostname can still be provided with + // the tlsExtraDomain parameter if the problem persists on a + // system. + host = "localhost" + } + + dnsNames := []string{host} + if host != "localhost" { + dnsNames = append(dnsNames, "localhost") + } + dnsNames = append(dnsNames, tlsExtraDomains...) + + // Also add fake hostnames for unix sockets, otherwise hostname + // verification will fail in the client. + dnsNames = append(dnsNames, "unix", "unixpacket") + + // Also add hostnames for 'bufconn' which is the hostname used for the + // in-memory connections used on mobile. + dnsNames = append(dnsNames, "bufconn") + + return host, dnsNames +} + +// IsOutdated returns whether the given certificate is outdated w.r.t. the IPs +// and domains given. The certificate is considered up to date if it was +// created with _exactly_ the IPs and domains given. +func IsOutdated(cert *x509.Certificate, tlsExtraIPs, + tlsExtraDomains []string) (bool, error) { + + // Parse the slice of IP strings. + ips, err := ipAddresses(tlsExtraIPs) + if err != nil { + return false, err + } + + // To not consider the certificate outdated if it has duplicate IPs or + // if only the order has changed, we create two maps from the slice of + // IPs to compare. + ips1 := make(map[string]net.IP) + for _, ip := range ips { + ips1[ip.String()] = ip + } + + ips2 := make(map[string]net.IP) + for _, ip := range cert.IPAddresses { + ips2[ip.String()] = ip + } + + // If the certificate has a different number of IP addresses, it is + // definitely out of date. + if len(ips1) != len(ips2) { + return true, nil + } + + // Go through each IP address, and check that they are equal. We expect + // both the string representation and the exact IP to match. + for s, ip1 := range ips1 { + // Assert the IP string is found in both sets. + ip2, ok := ips2[s] + if !ok { + return true, nil + } + + // And that the IPs are considered equal. + if !ip1.Equal(ip2) { + return true, nil + } + } + + // Get the full list of DNS names to use. + _, dnsNames := dnsNames(tlsExtraDomains) + + // We do the same kind of deduplication for the DNS names. + dns1 := make(map[string]struct{}) + for _, n := range cert.DNSNames { + dns1[n] = struct{}{} + } + + dns2 := make(map[string]struct{}) + for _, n := range dnsNames { + dns2[n] = struct{}{} + } + + // If the number of domains are different, it is out of date. + if len(dns1) != len(dns2) { + return true, nil + } + + // Similarly, check that each DNS name matches what is found in the + // certificate. + for k := range dns1 { + if _, ok := dns2[k]; !ok { + return true, nil + } + } + + // Certificate was up-to-date. + return false, nil +} + +// GenCertPair generates a key/cert pair to the paths provided. The +// auto-generated certificates should *not* be used in production for public +// access as they're self-signed and don't necessarily contain all of the +// desired hostnames for the service. For production/public use, consider a +// real PKI. +// +// This function is adapted from https://github.com/btcsuite/btcd and +// https://github.com/btcsuite/btcutil +func GenCertPair(org, certFile, keyFile string, tlsExtraIPs, + tlsExtraDomains []string, certValidity time.Duration) error { + + now := time.Now() + validUntil := now.Add(certValidity) + + // Check that the certificate validity isn't past the ASN.1 end of time. + if validUntil.After(endOfTime) { + validUntil = endOfTime + } + + // Generate a serial number that's below the serialNumberLimit. + serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + return fmt.Errorf("failed to generate serial number: %s", err) + } + + // Get all DNS names and IP addresses to use when creating the + // certificate. + host, dnsNames := dnsNames(tlsExtraDomains) + ipAddresses, err := ipAddresses(tlsExtraIPs) + if err != nil { + return err + } + + // Generate a private key for the certificate. + priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + return err + } + + // Construct the certificate template. + template := x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + Organization: []string{org}, + CommonName: host, + }, + NotBefore: now.Add(-time.Hour * 24), + NotAfter: validUntil, + + KeyUsage: x509.KeyUsageKeyEncipherment | + x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + IsCA: true, // so can sign self. + BasicConstraintsValid: true, + + DNSNames: dnsNames, + IPAddresses: ipAddresses, + } + + derBytes, err := x509.CreateCertificate(rand.Reader, &template, + &template, &priv.PublicKey, priv) + if err != nil { + return fmt.Errorf("failed to create certificate: %v", err) + } + + certBuf := &bytes.Buffer{} + err = pem.Encode(certBuf, &pem.Block{Type: "CERTIFICATE", + Bytes: derBytes}) + if err != nil { + return fmt.Errorf("failed to encode certificate: %v", err) + } + + keybytes, err := x509.MarshalECPrivateKey(priv) + if err != nil { + return fmt.Errorf("unable to encode privkey: %v", err) + } + keyBuf := &bytes.Buffer{} + err = pem.Encode(keyBuf, &pem.Block{Type: "EC PRIVATE KEY", + Bytes: keybytes}) + if err != nil { + return fmt.Errorf("failed to encode private key: %v", err) + } + + // Write cert and key files. + if err = ioutil.WriteFile(certFile, certBuf.Bytes(), 0644); err != nil { + return err + } + if err = ioutil.WriteFile(keyFile, keyBuf.Bytes(), 0600); err != nil { + os.Remove(certFile) + return err + } + + return nil +} diff --git a/cert/selfsigned_test.go b/cert/selfsigned_test.go new file mode 100644 index 0000000000..0a5888a96e --- /dev/null +++ b/cert/selfsigned_test.go @@ -0,0 +1,135 @@ +package cert_test + +import ( + "io/ioutil" + "testing" + + "github.com/lightningnetwork/lnd/cert" +) + +var ( + extraIPs = []string{"1.1.1.1", "123.123.123.1", "199.189.12.12"} + extraDomains = []string{"home", "and", "away"} +) + +// TestIsOutdatedCert checks that we'll consider the TLS certificate outdated +// if the ip addresses or dns names don't match. +func TestIsOutdatedCert(t *testing.T) { + tempDir, err := ioutil.TempDir("", "certtest") + if err != nil { + t.Fatal(err) + } + + certPath := tempDir + "/tls.cert" + keyPath := tempDir + "/tls.key" + + // Generate TLS files with two extra IPs and domains. + err = cert.GenCertPair( + "lnd autogenerated cert", certPath, keyPath, extraIPs[:2], + extraDomains[:2], cert.DefaultAutogenValidity, + ) + if err != nil { + t.Fatal(err) + } + + // We'll attempt to check up-to-date status for all variants of 1-3 + // number of IPs and domains. + for numIPs := 1; numIPs <= len(extraIPs); numIPs++ { + for numDomains := 1; numDomains <= len(extraDomains); numDomains++ { + _, parsedCert, err := cert.LoadCert( + certPath, keyPath, + ) + if err != nil { + t.Fatal(err) + } + + // Using the test case's number of IPs and domains, get + // the outdated status of the certificate we created + // above. + outdated, err := cert.IsOutdated( + parsedCert, extraIPs[:numIPs], + extraDomains[:numDomains], + ) + if err != nil { + t.Fatal(err) + } + + // We expect it to be considered outdated if the IPs or + // domains don't match exactly what we created. + expected := numIPs != 2 || numDomains != 2 + if outdated != expected { + t.Fatalf("expected certificate to be "+ + "outdated=%v, got=%v", expected, + outdated) + } + } + } +} + +// TestIsOutdatedPermutation tests that the order of listed IPs or DNS names, +// nor dulicates in the lists, matter for whether we consider the certificate +// outdated. +func TestIsOutdatedPermutation(t *testing.T) { + tempDir, err := ioutil.TempDir("", "certtest") + if err != nil { + t.Fatal(err) + } + + certPath := tempDir + "/tls.cert" + keyPath := tempDir + "/tls.key" + + // Generate TLS files from the IPs and domains. + err = cert.GenCertPair( + "lnd autogenerated cert", certPath, keyPath, extraIPs[:], + extraDomains[:], cert.DefaultAutogenValidity, + ) + if err != nil { + t.Fatal(err) + } + _, parsedCert, err := cert.LoadCert(certPath, keyPath) + if err != nil { + t.Fatal(err) + } + + // If we have duplicate IPs or DNS names listed, that shouldn't matter. + dupIPs := make([]string, len(extraIPs)*2) + for i := range dupIPs { + dupIPs[i] = extraIPs[i/2] + } + + dupDNS := make([]string, len(extraDomains)*2) + for i := range dupDNS { + dupDNS[i] = extraDomains[i/2] + } + + outdated, err := cert.IsOutdated(parsedCert, dupIPs, dupDNS) + if err != nil { + t.Fatal(err) + } + + if outdated { + t.Fatalf("did not expect duplicate IPs or DNS names be " + + "considered outdated") + } + + // Similarly, the order of the lists shouldn't matter. + revIPs := make([]string, len(extraIPs)) + for i := range revIPs { + revIPs[i] = extraIPs[len(extraIPs)-1-i] + } + + revDNS := make([]string, len(extraDomains)) + for i := range revDNS { + revDNS[i] = extraDomains[len(extraDomains)-1-i] + } + + outdated, err = cert.IsOutdated(parsedCert, revIPs, revDNS) + if err != nil { + t.Fatal(err) + } + + if outdated { + t.Fatalf("did not expect reversed IPs or DNS names be " + + "considered outdated") + } +} diff --git a/cert/tls.go b/cert/tls.go new file mode 100644 index 0000000000..a8783158e1 --- /dev/null +++ b/cert/tls.go @@ -0,0 +1,60 @@ +package cert + +import ( + "crypto/tls" + "crypto/x509" +) + +var ( + /* + * tlsCipherSuites is the list of cipher suites we accept for TLS + * connections. These cipher suites fit the following criteria: + * - Don't use outdated algorithms like SHA-1 and 3DES + * - Don't use ECB mode or other insecure symmetric methods + * - Included in the TLS v1.2 suite + * - Are available in the Go 1.7.6 standard library (more are + * available in 1.8.3 and will be added after lnd no longer + * supports 1.7, including suites that support CBC mode) + **/ + tlsCipherSuites = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, + } +) + +// LoadCert loads a certificate and its corresponding private key from the PEM +// files indicated and returns the certificate in the two formats it is most +// commonly used. +func LoadCert(certPath, keyPath string) (tls.Certificate, *x509.Certificate, + error) { + + // The certData returned here is just a wrapper around the PEM blocks + // loaded from the file. The PEM is not yet fully parsed but a basic + // check is performed that the certificate and private key actually + // belong together. + certData, err := tls.LoadX509KeyPair(certPath, keyPath) + if err != nil { + return tls.Certificate{}, nil, err + } + + // Now parse the the PEM block of the certificate into its x509 data + // structure so it can be examined in more detail. + x509Cert, err := x509.ParseCertificate(certData.Certificate[0]) + if err != nil { + return tls.Certificate{}, nil, err + } + + return certData, x509Cert, nil +} + +// TLSConfFromCert returns the default TLS configuration used for a server, +// using the given certificate as identity. +func TLSConfFromCert(certData tls.Certificate) *tls.Config { + return &tls.Config{ + Certificates: []tls.Certificate{certData}, + CipherSuites: tlsCipherSuites, + MinVersion: tls.VersionTLS12, + } +} diff --git a/chainntnfs/bitcoindnotify/bitcoind.go b/chainntnfs/bitcoindnotify/bitcoind.go index 850b63e69c..5956a0fb7c 100644 --- a/chainntnfs/bitcoindnotify/bitcoind.go +++ b/chainntnfs/bitcoindnotify/bitcoind.go @@ -262,6 +262,14 @@ out: return } + chainntnfs.Log.Infof("Historical "+ + "spend dispatch finished "+ + "for request %v (start=%v "+ + "end=%v) with details: %v", + msg.SpendRequest, + msg.StartHeight, msg.EndHeight, + spendDetails) + // If the historical dispatch finished // without error, we will invoke // UpdateSpendDetails even if none were diff --git a/chainntnfs/height_hint_cache.go b/chainntnfs/height_hint_cache.go index 25a9247925..b80d3ea72d 100644 --- a/chainntnfs/height_hint_cache.go +++ b/chainntnfs/height_hint_cache.go @@ -4,8 +4,8 @@ import ( "bytes" "errors" - bolt "github.com/coreos/bbolt" "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/channeldb/kvdb" ) var ( @@ -95,13 +95,13 @@ func NewHeightHintCache(db *channeldb.DB) (*HeightHintCache, error) { // initBuckets ensures that the primary buckets used by the circuit are // initialized so that we can assume their existence after startup. func (c *HeightHintCache) initBuckets() error { - return c.db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucketIfNotExists(spendHintBucket) + return kvdb.Batch(c.db.Backend, func(tx kvdb.RwTx) error { + _, err := tx.CreateTopLevelBucket(spendHintBucket) if err != nil { return err } - _, err = tx.CreateBucketIfNotExists(confirmHintBucket) + _, err = tx.CreateTopLevelBucket(confirmHintBucket) return err }) } @@ -117,8 +117,8 @@ func (c *HeightHintCache) CommitSpendHint(height uint32, Log.Tracef("Updating spend hint to height %d for %v", height, spendRequests) - return c.db.Batch(func(tx *bolt.Tx) error { - spendHints := tx.Bucket(spendHintBucket) + return kvdb.Batch(c.db.Backend, func(tx kvdb.RwTx) error { + spendHints := tx.ReadWriteBucket(spendHintBucket) if spendHints == nil { return ErrCorruptedHeightHintCache } @@ -148,8 +148,8 @@ func (c *HeightHintCache) CommitSpendHint(height uint32, // cache for the outpoint. func (c *HeightHintCache) QuerySpendHint(spendRequest SpendRequest) (uint32, error) { var hint uint32 - err := c.db.View(func(tx *bolt.Tx) error { - spendHints := tx.Bucket(spendHintBucket) + err := kvdb.View(c.db, func(tx kvdb.ReadTx) error { + spendHints := tx.ReadBucket(spendHintBucket) if spendHints == nil { return ErrCorruptedHeightHintCache } @@ -180,8 +180,8 @@ func (c *HeightHintCache) PurgeSpendHint(spendRequests ...SpendRequest) error { Log.Tracef("Removing spend hints for %v", spendRequests) - return c.db.Batch(func(tx *bolt.Tx) error { - spendHints := tx.Bucket(spendHintBucket) + return kvdb.Batch(c.db.Backend, func(tx kvdb.RwTx) error { + spendHints := tx.ReadWriteBucket(spendHintBucket) if spendHints == nil { return ErrCorruptedHeightHintCache } @@ -211,8 +211,8 @@ func (c *HeightHintCache) CommitConfirmHint(height uint32, Log.Tracef("Updating confirm hints to height %d for %v", height, confRequests) - return c.db.Batch(func(tx *bolt.Tx) error { - confirmHints := tx.Bucket(confirmHintBucket) + return kvdb.Batch(c.db.Backend, func(tx kvdb.RwTx) error { + confirmHints := tx.ReadWriteBucket(confirmHintBucket) if confirmHints == nil { return ErrCorruptedHeightHintCache } @@ -242,8 +242,8 @@ func (c *HeightHintCache) CommitConfirmHint(height uint32, // the cache for the transaction hash. func (c *HeightHintCache) QueryConfirmHint(confRequest ConfRequest) (uint32, error) { var hint uint32 - err := c.db.View(func(tx *bolt.Tx) error { - confirmHints := tx.Bucket(confirmHintBucket) + err := kvdb.View(c.db, func(tx kvdb.ReadTx) error { + confirmHints := tx.ReadBucket(confirmHintBucket) if confirmHints == nil { return ErrCorruptedHeightHintCache } @@ -275,8 +275,8 @@ func (c *HeightHintCache) PurgeConfirmHint(confRequests ...ConfRequest) error { Log.Tracef("Removing confirm hints for %v", confRequests) - return c.db.Batch(func(tx *bolt.Tx) error { - confirmHints := tx.Bucket(confirmHintBucket) + return kvdb.Batch(c.db.Backend, func(tx kvdb.RwTx) error { + confirmHints := tx.ReadWriteBucket(confirmHintBucket) if confirmHints == nil { return ErrCorruptedHeightHintCache } diff --git a/chainntnfs/interface.go b/chainntnfs/interface.go index 88a389dd37..ace86e777d 100644 --- a/chainntnfs/interface.go +++ b/chainntnfs/interface.go @@ -241,6 +241,12 @@ type SpendDetail struct { SpendingHeight int32 } +// String returns a string representation of SpendDetail. +func (s *SpendDetail) String() string { + return fmt.Sprintf("%v[%d] spending %v at height=%v", s.SpenderTxHash, + s.SpenderInputIndex, s.SpentOutPoint, s.SpendingHeight) +} + // SpendEvent encapsulates a spentness notification. Its only field 'Spend' will // be sent upon once the target output passed into RegisterSpendNtfn has been // spent on the blockchain. diff --git a/chainntnfs/neutrinonotify/neutrino.go b/chainntnfs/neutrinonotify/neutrino.go index dfe6fca3aa..771c4b8627 100644 --- a/chainntnfs/neutrinonotify/neutrino.go +++ b/chainntnfs/neutrinonotify/neutrino.go @@ -523,11 +523,18 @@ func (n *NeutrinoNotifier) historicalConfDetails(confRequest chainntnfs.ConfRequ scanHeight, err) } - // With the hash computed, we can now fetch the basic filter - // for this height. + // With the hash computed, we can now fetch the basic filter for this + // height. Since the range of required items is known we avoid + // roundtrips by requesting a batched response and save bandwidth by + // limiting the max number of items per batch. Since neutrino populates + // its underline filters cache with the batch response, the next call + // will execute a network query only once per batch and not on every + // iteration. regFilter, err := n.p2pNode.GetCFilter( *blockHash, wire.GCSFilterRegular, neutrino.NumRetries(5), + neutrino.OptimisticReverseBatch(), + neutrino.MaxBatchSize(int64(scanHeight-startHeight+1)), ) if err != nil { return nil, fmt.Errorf("unable to retrieve regular filter for "+ diff --git a/chainntnfs/txnotifier.go b/chainntnfs/txnotifier.go index becdb9b15a..94a959d503 100644 --- a/chainntnfs/txnotifier.go +++ b/chainntnfs/txnotifier.go @@ -103,6 +103,9 @@ type confNtfnSet struct { // details serves as a cache of the confirmation details of a // transaction that we'll use to determine if a transaction/output // script has already confirmed at the time of registration. + // details is also used to make sure that in case of an address reuse + // (funds sent to a previously confirmed script) no additional + // notification is registered which would lead to an inconsistent state. details *TxConfirmation } @@ -325,9 +328,10 @@ func NewSpendRequest(op *wire.OutPoint, pkScript []byte) (SpendRequest, error) { // String returns the string representation of the SpendRequest. func (r SpendRequest) String() string { if r.OutPoint != ZeroOutPoint { - return fmt.Sprintf("outpoint=%v", r.OutPoint) + return fmt.Sprintf("outpoint=%v, script=%v", r.OutPoint, + r.PkScript) } - return fmt.Sprintf("script=%v", r.PkScript) + return fmt.Sprintf("outpoint=, script=%v", r.PkScript) } // SpendHintKey returns the key that will be used to index the spend request's @@ -1080,8 +1084,8 @@ func (n *TxNotifier) RegisterSpend(outpoint *wire.OutPoint, pkScript []byte, // notifications don't also attempt a historical dispatch. spendSet.rescanStatus = rescanPending - Log.Debugf("Dispatching historical spend rescan for %v", - ntfn.SpendRequest) + Log.Infof("Dispatching historical spend rescan for %v, start=%d, "+ + "end=%d", ntfn.SpendRequest, startHeight, n.currentHeight) return &SpendRegistration{ Event: ntfn.Event, @@ -1241,6 +1245,8 @@ func (n *TxNotifier) updateSpendDetails(spendRequest SpendRequest, n.currentHeight, spendRequest, err) } + Log.Debugf("Updated spend hint to height=%v for unconfirmed "+ + "spend request %v", n.currentHeight, spendRequest) return nil } @@ -1266,6 +1272,9 @@ func (n *TxNotifier) updateSpendDetails(spendRequest SpendRequest, details.SpendingHeight, spendRequest, err) } + Log.Debugf("Updated spend hint to height=%v for confirmed spend "+ + "request %v", details.SpendingHeight, spendRequest) + spendSet.details = details for _, ntfn := range spendSet.ntfns { err := n.dispatchSpendDetails(ntfn, spendSet.details) @@ -1284,11 +1293,15 @@ func (n *TxNotifier) dispatchSpendDetails(ntfn *SpendNtfn, details *SpendDetail) // If there are no spend details to dispatch or if the notification has // already been dispatched, then we can skip dispatching to this client. if details == nil || ntfn.dispatched { + Log.Debugf("Skipping dispatch of spend details(%v) for "+ + "request %v, dispatched=%v", details, ntfn.SpendRequest, + ntfn.dispatched) return nil } - Log.Infof("Dispatching confirmed spend notification for %v at height=%d", - ntfn.SpendRequest, n.currentHeight) + Log.Infof("Dispatching confirmed spend notification for %v at "+ + "current height=%d: %v", ntfn.SpendRequest, n.currentHeight, + details) select { case ntfn.Event.Spend <- details: @@ -1341,6 +1354,8 @@ func (n *TxNotifier) ConnectTip(blockHash *chainhash.Hash, blockHeight uint32, // First, we'll iterate over all the transactions found in this block to // determine if it includes any relevant transactions to the TxNotifier. + Log.Debugf("Filtering %d txns for %d spend requests at height %d", + len(txns), len(n.spendNotifications), blockHeight) for _, tx := range txns { n.filterTx( tx, blockHash, blockHeight, n.handleConfDetailsAtTip, @@ -1382,6 +1397,8 @@ func (n *TxNotifier) ConnectTip(blockHash *chainhash.Hash, blockHeight uint32, } } + Log.Debugf("Deleting mature spend request %v at "+ + "height=%d", spendRequest, blockHeight) delete(n.spendNotifications, spendRequest) } delete(n.spendsByHeight, matureBlockHeight) @@ -1507,6 +1524,15 @@ func (n *TxNotifier) handleConfDetailsAtTip(confRequest ConfRequest, // TODO(wilmer): cancel pending historical rescans if any? confSet := n.confNotifications[confRequest] + + // If we already have details for this request, we don't want to add it + // again since we have already dispatched notifications for it. + if confSet.details != nil { + Log.Warnf("Ignoring address reuse for %s at height %d.", + confRequest, details.BlockHeight) + return + } + confSet.rescanStatus = rescanComplete confSet.details = details @@ -1574,6 +1600,9 @@ func (n *TxNotifier) handleSpendDetailsAtTip(spendRequest SpendRequest, n.spendsByHeight[spendHeight] = opSet } opSet[spendRequest] = struct{}{} + + Log.Debugf("Spend request %v spent at tip=%d", spendRequest, + spendHeight) } // NotifyHeight dispatches confirmation and spend notifications to the clients diff --git a/chainntnfs/txnotifier_test.go b/chainntnfs/txnotifier_test.go index e7f9664dd2..1f4ff08541 100644 --- a/chainntnfs/txnotifier_test.go +++ b/chainntnfs/txnotifier_test.go @@ -604,6 +604,166 @@ func TestTxNotifierFutureSpendDispatch(t *testing.T) { } } +// TestTxNotifierFutureConfDispatchReuseSafe tests that the notifier does not +// misbehave even if two confirmation requests for the same script are issued +// at different block heights (which means funds are being sent to the same +// script multiple times). +func TestTxNotifierFutureConfDispatchReuseSafe(t *testing.T) { + t.Parallel() + + currentBlock := uint32(10) + hintCache := newMockHintCache() + n := chainntnfs.NewTxNotifier( + currentBlock, 2, hintCache, hintCache, + ) + + // We'll register a TX that sends to our test script and put it into a + // block. Additionally we register a notification request for just the + // script which should also be confirmed with that block. + tx1 := wire.MsgTx{Version: 1} + tx1.AddTxOut(&wire.TxOut{PkScript: testRawScript}) + tx1Hash := tx1.TxHash() + ntfn1, err := n.RegisterConf(&tx1Hash, testRawScript, 1, 1) + if err != nil { + t.Fatalf("unable to register ntfn: %v", err) + } + scriptNtfn1, err := n.RegisterConf(nil, testRawScript, 1, 1) + if err != nil { + t.Fatalf("unable to register ntfn: %v", err) + } + block := btcutil.NewBlock(&wire.MsgBlock{ + Transactions: []*wire.MsgTx{&tx1}, + }) + currentBlock++ + err = n.ConnectTip(block.Hash(), currentBlock, block.Transactions()) + if err != nil { + t.Fatalf("unable to connect block: %v", err) + } + if err := n.NotifyHeight(currentBlock); err != nil { + t.Fatalf("unable to dispatch notifications: %v", err) + } + + // Expect an update and confirmation of TX 1 at this point. We save the + // confirmation details because we expect to receive the same details + // for all further registrations. + var confDetails *chainntnfs.TxConfirmation + select { + case <-ntfn1.Event.Updates: + default: + t.Fatal("expected update of TX 1") + } + select { + case confDetails = <-ntfn1.Event.Confirmed: + if confDetails.BlockHeight != currentBlock { + t.Fatalf("expected TX to be confirmed in latest block") + } + default: + t.Fatal("expected confirmation of TX 1") + } + + // The notification for the script should also have received a + // confirmation. + select { + case <-scriptNtfn1.Event.Updates: + default: + t.Fatal("expected update of script ntfn") + } + select { + case details := <-scriptNtfn1.Event.Confirmed: + assertConfDetails(t, details, confDetails) + default: + t.Fatal("expected update of script ntfn") + } + + // Now register a second TX that spends to two outputs with the same + // script so we have a different TXID. And again register a confirmation + // for just the script. + tx2 := wire.MsgTx{Version: 1} + tx2.AddTxOut(&wire.TxOut{PkScript: testRawScript}) + tx2.AddTxOut(&wire.TxOut{PkScript: testRawScript}) + tx2Hash := tx2.TxHash() + ntfn2, err := n.RegisterConf(&tx2Hash, testRawScript, 1, 1) + if err != nil { + t.Fatalf("unable to register ntfn: %v", err) + } + scriptNtfn2, err := n.RegisterConf(nil, testRawScript, 1, 1) + if err != nil { + t.Fatalf("unable to register ntfn: %v", err) + } + block2 := btcutil.NewBlock(&wire.MsgBlock{ + Transactions: []*wire.MsgTx{&tx2}, + }) + currentBlock++ + err = n.ConnectTip(block2.Hash(), currentBlock, block2.Transactions()) + if err != nil { + t.Fatalf("unable to connect block: %v", err) + } + if err := n.NotifyHeight(currentBlock); err != nil { + t.Fatalf("unable to dispatch notifications: %v", err) + } + + // Transaction 2 should get a confirmation here too. Since it was + // a different TXID we wouldn't get the cached details here but the TX + // should be confirmed right away still. + select { + case <-ntfn2.Event.Updates: + default: + t.Fatal("expected update of TX 2") + } + select { + case details := <-ntfn2.Event.Confirmed: + if details.BlockHeight != currentBlock { + t.Fatalf("expected TX to be confirmed in latest block") + } + default: + t.Fatal("expected update of TX 2") + } + + // The second notification for the script should also have received a + // confirmation. Since it's the same script, we expect to get the cached + // details from the first TX back immediately. Nothing should be + // registered at the notifier for the current block height for that + // script any more. + select { + case <-scriptNtfn2.Event.Updates: + default: + t.Fatal("expected update of script ntfn") + } + select { + case details := <-scriptNtfn2.Event.Confirmed: + assertConfDetails(t, details, confDetails) + default: + t.Fatal("expected update of script ntfn") + } + + // Finally, mine a few empty blocks and expect both TXs to be confirmed. + for currentBlock < 15 { + block := btcutil.NewBlock(&wire.MsgBlock{}) + currentBlock++ + err = n.ConnectTip( + block.Hash(), currentBlock, block.Transactions(), + ) + if err != nil { + t.Fatalf("unable to connect block: %v", err) + } + if err := n.NotifyHeight(currentBlock); err != nil { + t.Fatalf("unable to dispatch notifications: %v", err) + } + } + + // Events for both confirmation requests should have been dispatched. + select { + case <-ntfn1.Event.Done: + default: + t.Fatal("expected notifications for TX 1 to be done") + } + select { + case <-ntfn2.Event.Done: + default: + t.Fatal("expected notifications for TX 2 to be done") + } +} + // TestTxNotifierHistoricalSpendDispatch tests that the TxNotifier dispatches // registered notifications when an outpoint is spent before registration. func TestTxNotifierHistoricalSpendDispatch(t *testing.T) { @@ -1995,6 +2155,210 @@ func TestTxNotifierSpendHintCache(t *testing.T) { } } +// TestTxNotifierSpendHinthistoricalRescan checks that the height hints and +// spend notifications behave as expected when a spend is found at tip during a +// historical rescan. +func TestTxNotifierSpendDuringHistoricalRescan(t *testing.T) { + t.Parallel() + + const ( + startingHeight = 200 + reorgSafety = 10 + ) + + // Intiialize our TxNotifier instance backed by a height hint cache. + hintCache := newMockHintCache() + n := chainntnfs.NewTxNotifier( + startingHeight, reorgSafety, hintCache, hintCache, + ) + + // Create a test outpoint and register it for spend notifications. + op1 := wire.OutPoint{Index: 1} + ntfn1, err := n.RegisterSpend(&op1, testRawScript, 1) + if err != nil { + t.Fatalf("unable to register spend for op1: %v", err) + } + + // A historical rescan should be initiated from the height hint to the + // current height. + if ntfn1.HistoricalDispatch.StartHeight != 1 { + t.Fatalf("expected historical dispatch to start at height hint") + } + + if ntfn1.HistoricalDispatch.EndHeight != startingHeight { + t.Fatalf("expected historical dispatch to end at current height") + } + + // It should not have a spend hint set upon registration, as we must + // first determine whether it has already been spent in the chain. + _, err = hintCache.QuerySpendHint(ntfn1.HistoricalDispatch.SpendRequest) + if err != chainntnfs.ErrSpendHintNotFound { + t.Fatalf("unexpected error when querying for height hint "+ + "expected: %v, got %v", chainntnfs.ErrSpendHintNotFound, + err) + } + + // Create a new empty block and extend the chain. + height := uint32(startingHeight) + 1 + emptyBlock := btcutil.NewBlock(&wire.MsgBlock{}) + err = n.ConnectTip( + emptyBlock.Hash(), height, emptyBlock.Transactions(), + ) + if err != nil { + t.Fatalf("unable to connect block: %v", err) + } + if err := n.NotifyHeight(height); err != nil { + t.Fatalf("unable to dispatch notifications: %v", err) + } + + // Since we haven't called UpdateSpendDetails yet, there should be no + // spend hint found. + _, err = hintCache.QuerySpendHint(ntfn1.HistoricalDispatch.SpendRequest) + if err != chainntnfs.ErrSpendHintNotFound { + t.Fatalf("unexpected error when querying for height hint "+ + "expected: %v, got %v", chainntnfs.ErrSpendHintNotFound, + err) + } + + // Simulate a bunch of blocks being mined while the historical rescan + // is still in progress. We make sure to not mine more than reorgSafety + // blocks after the spend, since it will be forgotten then. + var spendHeight uint32 + for i := 0; i < reorgSafety; i++ { + height++ + + // Let the outpoint we are watching be spent midway. + var block *btcutil.Block + if i == 5 { + // We'll create a new block that only contains the + // spending transaction of the outpoint. + spendTx1 := wire.NewMsgTx(2) + spendTx1.AddTxIn(&wire.TxIn{ + PreviousOutPoint: op1, + SignatureScript: testSigScript, + }) + block = btcutil.NewBlock(&wire.MsgBlock{ + Transactions: []*wire.MsgTx{spendTx1}, + }) + spendHeight = height + } else { + // Otherwise we just create an empty block. + block = btcutil.NewBlock(&wire.MsgBlock{}) + } + + err = n.ConnectTip( + block.Hash(), height, block.Transactions(), + ) + if err != nil { + t.Fatalf("unable to connect block: %v", err) + } + if err := n.NotifyHeight(height); err != nil { + t.Fatalf("unable to dispatch notifications: %v", err) + } + } + + // Check that the height hint was set to the spending block. + op1Hint, err := hintCache.QuerySpendHint( + ntfn1.HistoricalDispatch.SpendRequest, + ) + if err != nil { + t.Fatalf("unable to query for spend hint of op1: %v", err) + } + if op1Hint != spendHeight { + t.Fatalf("expected hint %d, got %d", spendHeight, op1Hint) + } + + // We should be getting notified about the spend at this point. + select { + case <-ntfn1.Event.Spend: + default: + t.Fatal("expected to receive spend notification") + } + + // Now, we'll simulate that the historical rescan finished by + // calling UpdateSpendDetails. Since a the spend actually happened at + // tip while the rescan was in progress, the height hint should not be + // updated to the latest height, but stay at the spend height. + err = n.UpdateSpendDetails(ntfn1.HistoricalDispatch.SpendRequest, nil) + if err != nil { + t.Fatalf("unable to update spend details: %v", err) + } + + op1Hint, err = hintCache.QuerySpendHint( + ntfn1.HistoricalDispatch.SpendRequest, + ) + if err != nil { + t.Fatalf("unable to query for spend hint of op1: %v", err) + } + if op1Hint != spendHeight { + t.Fatalf("expected hint %d, got %d", spendHeight, op1Hint) + } + + // Then, we'll create another block that spends a second outpoint. + op2 := wire.OutPoint{Index: 2} + spendTx2 := wire.NewMsgTx(2) + spendTx2.AddTxIn(&wire.TxIn{ + PreviousOutPoint: op2, + SignatureScript: testSigScript, + }) + height++ + block2 := btcutil.NewBlock(&wire.MsgBlock{ + Transactions: []*wire.MsgTx{spendTx2}, + }) + err = n.ConnectTip(block2.Hash(), height, block2.Transactions()) + if err != nil { + t.Fatalf("unable to connect block: %v", err) + } + if err := n.NotifyHeight(height); err != nil { + t.Fatalf("unable to dispatch notifications: %v", err) + } + + // The outpoint's spend hint should remain the same as it's already + // been spent before. + op1Hint, err = hintCache.QuerySpendHint(ntfn1.HistoricalDispatch.SpendRequest) + if err != nil { + t.Fatalf("unable to query for spend hint of op1: %v", err) + } + if op1Hint != spendHeight { + t.Fatalf("expected hint %d, got %d", spendHeight, op1Hint) + } + + // Now mine enough blocks for the spend notification to be forgotten. + for i := 0; i < 2*reorgSafety; i++ { + height++ + block := btcutil.NewBlock(&wire.MsgBlock{}) + + err := n.ConnectTip( + block.Hash(), height, block.Transactions(), + ) + if err != nil { + t.Fatalf("unable to connect block: %v", err) + } + if err := n.NotifyHeight(height); err != nil { + t.Fatalf("unable to dispatch notifications: %v", err) + } + } + + // Attempting to update spend details at this point should fail, since + // the spend request should be removed. This is to ensure the height + // hint won't be overwritten if the historical rescan finishes after + // the spend request has been notified and removed because it has + // matured. + err = n.UpdateSpendDetails(ntfn1.HistoricalDispatch.SpendRequest, nil) + if err == nil { + t.Fatalf("expcted updating spend details to fail") + } + + // Finally, check that the height hint is still there, unchanged. + op1Hint, err = hintCache.QuerySpendHint(ntfn1.HistoricalDispatch.SpendRequest) + if err != nil { + t.Fatalf("unable to query for spend hint of op1: %v", err) + } + if op1Hint != spendHeight { + t.Fatalf("expected hint %d, got %d", spendHeight, op1Hint) + } +} + // TestTxNotifierNtfnDone ensures that a notification is sent to registered // clients through the Done channel once the notification request is no longer // under the risk of being reorged out of the chain. diff --git a/chainregistry.go b/chainregistry.go index 503b70cfd3..7f19877b80 100644 --- a/chainregistry.go +++ b/chainregistry.go @@ -30,12 +30,28 @@ import ( "github.com/lightningnetwork/lnd/keychain" "github.com/lightningnetwork/lnd/lnwallet" "github.com/lightningnetwork/lnd/lnwallet/btcwallet" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/routing/chainview" ) const ( - defaultBitcoinMinHTLCMSat = lnwire.MilliSatoshi(1000) + // defaultBitcoinMinHTLCMSat is the default smallest value htlc this + // node will accept. This value is proposed in the channel open sequence + // and cannot be changed during the life of the channel. It is 1 msat by + // default to allow maximum flexibility in deciding what size payments + // to forward. + // + // All forwarded payments are subjected to the min htlc constraint of + // the routing policy of the outgoing channel. This implicitly controls + // the minimum htlc value on the incoming channel too. + defaultBitcoinMinHTLCInMSat = lnwire.MilliSatoshi(1) + + // defaultBitcoinMinHTLCOutMSat is the default minimum htlc value that + // we require for sending out htlcs. Our channel peer may have a lower + // min htlc channel parameter, but we - by default - don't forward + // anything under the value defined here. + defaultBitcoinMinHTLCOutMSat = lnwire.MilliSatoshi(1000) // DefaultBitcoinBaseFeeMSat is the default forwarding base fee. DefaultBitcoinBaseFeeMSat = lnwire.MilliSatoshi(1000) @@ -47,19 +63,24 @@ const ( // delta. DefaultBitcoinTimeLockDelta = 40 - defaultLitecoinMinHTLCMSat = lnwire.MilliSatoshi(1000) - defaultLitecoinBaseFeeMSat = lnwire.MilliSatoshi(1000) - defaultLitecoinFeeRate = lnwire.MilliSatoshi(1) - defaultLitecoinTimeLockDelta = 576 - defaultLitecoinDustLimit = btcutil.Amount(54600) + defaultLitecoinMinHTLCInMSat = lnwire.MilliSatoshi(1) + defaultLitecoinMinHTLCOutMSat = lnwire.MilliSatoshi(1000) + defaultLitecoinBaseFeeMSat = lnwire.MilliSatoshi(1000) + defaultLitecoinFeeRate = lnwire.MilliSatoshi(1) + defaultLitecoinTimeLockDelta = 576 + defaultLitecoinDustLimit = btcutil.Amount(54600) // defaultBitcoinStaticFeePerKW is the fee rate of 50 sat/vbyte // expressed in sat/kw. - defaultBitcoinStaticFeePerKW = lnwallet.SatPerKWeight(12500) + defaultBitcoinStaticFeePerKW = chainfee.SatPerKWeight(12500) + + // defaultBitcoinStaticMinRelayFeeRate is the min relay fee used for + // static estimators. + defaultBitcoinStaticMinRelayFeeRate = chainfee.FeePerKwFloor // defaultLitecoinStaticFeePerKW is the fee rate of 200 sat/vbyte // expressed in sat/kw. - defaultLitecoinStaticFeePerKW = lnwallet.SatPerKWeight(50000) + defaultLitecoinStaticFeePerKW = chainfee.SatPerKWeight(50000) // btcToLtcConversionRate is a fixed ratio used in order to scale up // payments when running on the Litecoin chain. @@ -112,7 +133,7 @@ func (c chainCode) String() string { type chainControl struct { chainIO lnwallet.BlockChainIO - feeEstimator lnwallet.FeeEstimator + feeEstimator chainfee.Estimator signer input.Signer @@ -129,6 +150,8 @@ type chainControl struct { wallet *lnwallet.LightningWallet routingPolicy htlcswitch.ForwardingPolicy + + minHtlcIn lnwire.MilliSatoshi } // newChainControlFromConfig attempts to create a chainControl instance @@ -156,22 +179,25 @@ func newChainControlFromConfig(cfg *config, chanDB *channeldb.DB, switch registeredChains.PrimaryChain() { case bitcoinChain: cc.routingPolicy = htlcswitch.ForwardingPolicy{ - MinHTLC: cfg.Bitcoin.MinHTLC, + MinHTLCOut: cfg.Bitcoin.MinHTLCOut, BaseFee: cfg.Bitcoin.BaseFee, FeeRate: cfg.Bitcoin.FeeRate, TimeLockDelta: cfg.Bitcoin.TimeLockDelta, } - cc.feeEstimator = lnwallet.NewStaticFeeEstimator( - defaultBitcoinStaticFeePerKW, 0, + cc.minHtlcIn = cfg.Bitcoin.MinHTLCIn + cc.feeEstimator = chainfee.NewStaticEstimator( + defaultBitcoinStaticFeePerKW, + defaultBitcoinStaticMinRelayFeeRate, ) case litecoinChain: cc.routingPolicy = htlcswitch.ForwardingPolicy{ - MinHTLC: cfg.Litecoin.MinHTLC, + MinHTLCOut: cfg.Litecoin.MinHTLCOut, BaseFee: cfg.Litecoin.BaseFee, FeeRate: cfg.Litecoin.FeeRate, TimeLockDelta: cfg.Litecoin.TimeLockDelta, } - cc.feeEstimator = lnwallet.NewStaticFeeEstimator( + cc.minHtlcIn = cfg.Litecoin.MinHTLCIn + cc.feeEstimator = chainfee.NewStaticEstimator( defaultLitecoinStaticFeePerKW, 0, ) default: @@ -219,8 +245,8 @@ func newChainControlFromConfig(cfg *config, chanDB *channeldb.DB, if cfg.NeutrinoMode.FeeURL != "" { ltndLog.Infof("Using API fee estimator!") - estimator := lnwallet.NewWebAPIFeeEstimator( - lnwallet.SparseConfFeeSource{ + estimator := chainfee.NewWebAPIEstimator( + chainfee.SparseConfFeeSource{ URL: cfg.NeutrinoMode.FeeURL, }, defaultBitcoinStaticFeePerKW, @@ -294,7 +320,7 @@ func newChainControlFromConfig(cfg *config, chanDB *channeldb.DB, activeNetParams.Params, bitcoindHost, bitcoindMode.RPCUser, bitcoindMode.RPCPass, bitcoindMode.ZMQPubRawBlock, bitcoindMode.ZMQPubRawTx, - 100*time.Millisecond, + 5*time.Second, ) if err != nil { return nil, err @@ -323,15 +349,17 @@ func newChainControlFromConfig(cfg *config, chanDB *channeldb.DB, HTTPPostMode: true, } if cfg.Bitcoin.Active && !cfg.Bitcoin.RegTest { - ltndLog.Infof("Initializing groestlcoind backed fee estimator") + ltndLog.Infof("Initializing groestlcoind backed fee estimator in "+ + "%s mode", bitcoindMode.EstimateMode) // Finally, we'll re-initialize the fee estimator, as // if we're using bitcoind as a backend, then we can // use live fee estimates, rather than a statically // coded value. - fallBackFeeRate := lnwallet.SatPerKVByte(25 * 1000) - cc.feeEstimator, err = lnwallet.NewBitcoindFeeEstimator( - *rpcConfig, fallBackFeeRate.FeePerKWeight(), + fallBackFeeRate := chainfee.SatPerKVByte(25 * 1000) + cc.feeEstimator, err = chainfee.NewBitcoindEstimator( + *rpcConfig, bitcoindMode.EstimateMode, + fallBackFeeRate.FeePerKWeight(), ) if err != nil { return nil, err @@ -340,15 +368,17 @@ func newChainControlFromConfig(cfg *config, chanDB *channeldb.DB, return nil, err } } else if cfg.Litecoin.Active && !cfg.Litecoin.RegTest { - ltndLog.Infof("Initializing litecoind backed fee estimator") + ltndLog.Infof("Initializing litecoind backed fee estimator in "+ + "%s mode", bitcoindMode.EstimateMode) // Finally, we'll re-initialize the fee estimator, as // if we're using litecoind as a backend, then we can // use live fee estimates, rather than a statically // coded value. - fallBackFeeRate := lnwallet.SatPerKVByte(25 * 1000) - cc.feeEstimator, err = lnwallet.NewBitcoindFeeEstimator( - *rpcConfig, fallBackFeeRate.FeePerKWeight(), + fallBackFeeRate := chainfee.SatPerKVByte(25 * 1000) + cc.feeEstimator, err = chainfee.NewBitcoindEstimator( + *rpcConfig, bitcoindMode.EstimateMode, + fallBackFeeRate.FeePerKWeight(), ) if err != nil { return nil, err @@ -451,8 +481,8 @@ func newChainControlFromConfig(cfg *config, chanDB *channeldb.DB, // if we're using btcd as a backend, then we can use // live fee estimates, rather than a statically coded // value. - fallBackFeeRate := lnwallet.SatPerKVByte(25 * 1000) - cc.feeEstimator, err = lnwallet.NewBtcdFeeEstimator( + fallBackFeeRate := chainfee.SatPerKVByte(25 * 1000) + cc.feeEstimator, err = chainfee.NewBtcdEstimator( *rpcConfig, fallBackFeeRate.FeePerKWeight(), ) if err != nil { diff --git a/chanbackup/backup.go b/chanbackup/backup.go index ca3698a548..b956c6fa90 100644 --- a/chanbackup/backup.go +++ b/chanbackup/backup.go @@ -85,14 +85,14 @@ func FetchStaticChanBackups(chanSource LiveChannelSource) ([]Single, error) { // Now that we have all the channels, we'll use the chanSource to // obtain any auxiliary information we need to craft a backup for each // channel. - staticChanBackups := make([]Single, len(openChans)) - for i, openChan := range openChans { + staticChanBackups := make([]Single, 0, len(openChans)) + for _, openChan := range openChans { chanBackup, err := assembleChanBackup(chanSource, openChan) if err != nil { return nil, err } - staticChanBackups[i] = *chanBackup + staticChanBackups = append(staticChanBackups, *chanBackup) } return staticChanBackups, nil diff --git a/chanbackup/recover.go b/chanbackup/recover.go index 89c094583f..2340e97775 100644 --- a/chanbackup/recover.go +++ b/chanbackup/recover.go @@ -5,6 +5,7 @@ import ( "github.com/btcsuite/btcd/btcec" "github.com/davecgh/go-spew/spew" + "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/keychain" ) @@ -47,6 +48,14 @@ func Recover(backups []Single, restorer ChannelRestorer, backup.FundingOutpoint) err := restorer.RestoreChansFromSingles(backup) + + // If a channel is already present in the channel DB, we can + // just continue. No reason to fail a whole set of multi backups + // for example. This allows resume of a restore in case another + // error happens. + if err == channeldb.ErrChanAlreadyExists { + continue + } if err != nil { return err } diff --git a/chanbackup/single.go b/chanbackup/single.go index 1bfb49ca30..490657b90d 100644 --- a/chanbackup/single.go +++ b/chanbackup/single.go @@ -31,6 +31,11 @@ const ( // implicitly denotes that this channel uses the new tweakless commit // format. TweaklessCommitVersion = 1 + + // AnchorsCommitVersion is the third SCB version. This version + // implicitly denotes that this channel uses the new anchor commitment + // format. + AnchorsCommitVersion = 2 ) // Single is a static description of an existing channel that can be used for @@ -66,6 +71,9 @@ type Single struct { // ShortChannelID encodes the exact location in the chain in which the // channel was initially confirmed. This includes: the block height, // transaction index, and the output within the target transaction. + // Channels that were not confirmed at the time of backup creation will + // have the funding TX broadcast height set as their block height in + // the ShortChannelID. ShortChannelID lnwire.ShortChannelID // RemoteNodePub is the identity public key of the remote node this @@ -126,11 +134,21 @@ func NewSingle(channel *channeldb.OpenChannel, // key. _, shaChainPoint := btcec.PrivKeyFromBytes(btcec.S256(), b.Bytes()) + // If a channel is unconfirmed, the block height of the ShortChannelID + // is zero. This will lead to problems when trying to restore that + // channel as the spend notifier would get a height hint of zero. + // To work around that problem, we add the channel broadcast height + // to the channel ID so we can use that as height hint on restore. + chanID := channel.ShortChanID() + if chanID.BlockHeight == 0 { + chanID.BlockHeight = channel.FundingBroadcastHeight + } + single := Single{ IsInitiator: channel.IsInitiator, ChainHash: channel.ChainHash, FundingOutpoint: channel.FundingOutpoint, - ShortChannelID: channel.ShortChannelID, + ShortChannelID: chanID, RemoteNodePub: channel.IdentityPub, Addresses: nodeAddrs, Capacity: channel.Capacity, @@ -144,9 +162,14 @@ func NewSingle(channel *channeldb.OpenChannel, }, } - if channel.ChanType.IsTweakless() { + switch { + case channel.ChanType.HasAnchors(): + single.Version = AnchorsCommitVersion + + case channel.ChanType.IsTweakless(): single.Version = TweaklessCommitVersion - } else { + + default: single.Version = DefaultSingleVersion } @@ -161,6 +184,7 @@ func (s *Single) Serialize(w io.Writer) error { switch s.Version { case DefaultSingleVersion: case TweaklessCommitVersion: + case AnchorsCommitVersion: default: return fmt.Errorf("unable to serialize w/ unknown "+ "version: %v", s.Version) @@ -319,6 +343,7 @@ func (s *Single) Deserialize(r io.Reader) error { switch s.Version { case DefaultSingleVersion: case TweaklessCommitVersion: + case AnchorsCommitVersion: default: return fmt.Errorf("unable to de-serialize w/ unknown "+ "version: %v", s.Version) diff --git a/chanbackup/single_test.go b/chanbackup/single_test.go index ba56ece768..ee20d892f6 100644 --- a/chanbackup/single_test.go +++ b/chanbackup/single_test.go @@ -124,9 +124,9 @@ func genRandomOpenChannelShell() (*channeldb.OpenChannel, error) { isInitiator = true } - chanType := channeldb.SingleFunder + chanType := channeldb.SingleFunderBit if rand.Int63()%2 == 0 { - chanType = channeldb.SingleFunderTweakless + chanType = channeldb.SingleFunderTweaklessBit } return &channeldb.OpenChannel{ @@ -229,12 +229,20 @@ func TestSinglePackUnpack(t *testing.T) { valid: true, }, - // The new tweakless version, should pack/unpack with no problem. + // The new tweakless version, should pack/unpack with no + // problem. { version: TweaklessCommitVersion, valid: true, }, + // The new anchor version, should pack/unpack with no + // problem. + { + version: AnchorsCommitVersion, + valid: true, + }, + // A non-default version, atm this should result in a failure. { version: 99, @@ -410,4 +418,44 @@ func TestSinglePackStaticChanBackups(t *testing.T) { } } +// TestSingleUnconfirmedChannel tests that unconfirmed channels get serialized +// correctly by encoding the funding broadcast height as block height of the +// short channel ID. +func TestSingleUnconfirmedChannel(t *testing.T) { + t.Parallel() + + var fundingBroadcastHeight = uint32(1234) + + // Let's create an open channel shell that contains all the information + // we need to create a static channel backup but simulate an + // unconfirmed channel by setting the block height to 0. + channel, err := genRandomOpenChannelShell() + if err != nil { + t.Fatalf("unable to gen open channel: %v", err) + } + channel.ShortChannelID.BlockHeight = 0 + channel.FundingBroadcastHeight = fundingBroadcastHeight + + singleChanBackup := NewSingle(channel, []net.Addr{addr1, addr2}) + keyRing := &mockKeyRing{} + + // Pack it and then unpack it again to make sure everything is written + // correctly, then check that the block height of the unpacked + // is the funding broadcast height we set before. + var b bytes.Buffer + if err := singleChanBackup.PackToWriter(&b, keyRing); err != nil { + t.Fatalf("unable to pack single: %v", err) + } + var unpackedSingle Single + err = unpackedSingle.UnpackFromReader(&b, keyRing) + if err != nil { + t.Fatalf("unable to unpack single: %v", err) + } + if unpackedSingle.ShortChannelID.BlockHeight != fundingBroadcastHeight { + t.Fatalf("invalid block height. got %d expected %d.", + unpackedSingle.ShortChannelID.BlockHeight, + fundingBroadcastHeight) + } +} + // TODO(roasbsef): fuzz parsing diff --git a/chancloser.go b/chancloser.go index 3c015a5b54..b261a36576 100644 --- a/chancloser.go +++ b/chancloser.go @@ -1,14 +1,15 @@ package lnd import ( + "bytes" "fmt" - "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcutil" "github.com/davecgh/go-spew/spew" "github.com/lightningnetwork/lnd/htlcswitch" "github.com/lightningnetwork/lnd/lnwallet" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" "github.com/lightningnetwork/lnd/lnwire" ) @@ -25,6 +26,12 @@ var ( // ErrInvalidState is returned when the closing state machine receives // a message while it is in an unknown state. ErrInvalidState = fmt.Errorf("invalid state") + + // errUpfrontShutdownScriptMismatch is returned when a peer or end user + // provides a script to cooperatively close out to which does not match + // the upfront shutdown script previously set for that party. + errUpfrontShutdownScriptMismatch = fmt.Errorf("shutdown " + + "script does not match upfront shutdown script") ) // closeState represents all the possible states the channel closer state @@ -82,6 +89,9 @@ type chanCloseCfg struct { // forward payments. disableChannel func(wire.OutPoint) error + // disconnect will disconnect from the remote peer in this close. + disconnect func() error + // quit is a channel that should be sent upon in the occasion the state // machine should cease all progress and shutdown. quit chan struct{} @@ -144,14 +154,17 @@ type channelCloser struct { // remoteDeliveryScript is the script that we'll send the remote // party's settled channel funds to. remoteDeliveryScript []byte + + // locallyInitiated is true if we initiated the channel close. + locallyInitiated bool } // newChannelCloser creates a new instance of the channel closure given the // passed configuration, and delivery+fee preference. The final argument should // only be populated iff, we're the initiator of this closing request. func newChannelCloser(cfg chanCloseCfg, deliveryScript []byte, - idealFeePerKw lnwallet.SatPerKWeight, negotiationHeight uint32, - closeReq *htlcswitch.ChanClose) *channelCloser { + idealFeePerKw chainfee.SatPerKWeight, negotiationHeight uint32, + closeReq *htlcswitch.ChanClose, locallyInitiated bool) *channelCloser { // Given the target fee-per-kw, we'll compute what our ideal _total_ // fee will be starting at for this fee negotiation. @@ -187,6 +200,7 @@ func newChannelCloser(cfg chanCloseCfg, deliveryScript []byte, idealFeeSat: idealFeeSat, localDeliveryScript: deliveryScript, priorFeeOffers: make(map[btcutil.Amount]*lnwire.ClosingSigned), + locallyInitiated: locallyInitiated, } } @@ -200,12 +214,28 @@ func (c *channelCloser) initChanShutdown() (*lnwire.Shutdown, error) { // TODO(roasbeef): err if channel has htlc's? + // Before closing, we'll attempt to send a disable update for the + // channel. We do so before closing the channel as otherwise the current + // edge policy won't be retrievable from the graph. + if err := c.cfg.disableChannel(c.chanPoint); err != nil { + peerLog.Warnf("Unable to disable channel %v on "+ + "close: %v", c.chanPoint, err) + } + // Before returning the shutdown message, we'll unregister the channel // to ensure that it isn't seen as usable within the system. - // - // TODO(roasbeef): fail if err? c.cfg.unregisterChannel(c.cid) + // Before continuing, mark the channel as cooperatively closed with a + // nil txn. Even though we haven't negotiated the final txn, this + // guarantees that our listchannels rpc will be externally consistent, + // and reflect that the channel is being shutdown by the time the + // closing request returns. + err := c.cfg.channel.MarkCoopBroadcasted(nil, c.locallyInitiated) + if err != nil { + return nil, err + } + peerLog.Infof("ChannelPoint(%v): sending shutdown message", c.chanPoint) return shutdown, nil @@ -262,6 +292,40 @@ func (c *channelCloser) CloseRequest() *htlcswitch.ChanClose { return c.closeReq } +// maybeMatchScript attempts to match the script provided in our peer's +// shutdown message with the upfront shutdown script we have on record. +// If no upfront shutdown script was set, we do not need to enforce option +// upfront shutdown, so the function returns early. If an upfront script is +// set, we check whether it matches the script provided by our peer. If they +// do not match, we use the disconnect function provided to disconnect from +// the peer. +func maybeMatchScript(disconnect func() error, + upfrontScript, peerScript lnwire.DeliveryAddress) error { + + // If no upfront shutdown script was set, return early because we do not + // need to enforce closure to a specific script. + if len(upfrontScript) == 0 { + return nil + } + + // If an upfront shutdown script was provided, disconnect from the peer, as + // per BOLT 2, and return an error. + if !bytes.Equal(upfrontScript, peerScript) { + peerLog.Warnf("peer's script: %x does not match upfront "+ + "shutdown script: %x", peerScript, upfrontScript) + + // Disconnect from the peer because they have violated option upfront + // shutdown. + if err := disconnect(); err != nil { + return err + } + + return errUpfrontShutdownScriptMismatch + } + + return nil +} + // ProcessCloseMsg attempts to process the next message in the closing series. // This method will update the state accordingly and return two primary values: // the next set of messages to be sent, and a bool indicating if the fee @@ -282,9 +346,33 @@ func (c *channelCloser) ProcessCloseMsg(msg lnwire.Message) ([]lnwire.Message, b "instead have %v", spew.Sdump(msg)) } - // Next, we'll note the other party's preference for their - // delivery address. We'll use this when we craft the closure - // transaction. + // As we're the responder to this shutdown (the other party + // wants to close), we'll check if this is a frozen channel or + // not. If the channel is frozen as we were also the initiator + // of the channel opening, then we'll deny their close attempt. + chanInitiator := c.cfg.channel.IsInitiator() + if !chanInitiator && c.cfg.channel.State().ChanType.IsFrozen() && + c.negotiationHeight < c.cfg.channel.State().ThawHeight { + + return nil, false, fmt.Errorf("initiator attempting "+ + "to co-op close frozen ChannelPoint(%v) "+ + "(current_height=%v, thaw_height=%v)", + c.chanPoint, c.negotiationHeight, + c.cfg.channel.State().ThawHeight) + } + + // If the remote node opened the channel with option upfront shutdown + // script, check that the script they provided matches. + if err := maybeMatchScript( + c.cfg.disconnect, c.cfg.channel.RemoteUpfrontShutdownScript(), + shutDownMsg.Address, + ); err != nil { + return nil, false, err + } + + // Once we have checked that the other party has not violated option + // upfront shutdown we set their preference for delivery address. We'll + // use this when we craft the closure transaction. c.remoteDeliveryScript = shutDownMsg.Address // We'll generate a shutdown message of our own to send across @@ -308,7 +396,7 @@ func (c *channelCloser) ProcessCloseMsg(msg lnwire.Message) ([]lnwire.Message, b // We'll also craft our initial close proposal in order to keep // the negotiation moving, but only if we're the negotiator. - if c.cfg.channel.IsInitiator() { + if chanInitiator { closeSigned, err := c.proposeCloseSigned(c.idealFeeSat) if err != nil { return nil, false, err @@ -332,7 +420,16 @@ func (c *channelCloser) ProcessCloseMsg(msg lnwire.Message) ([]lnwire.Message, b "instead have %v", spew.Sdump(msg)) } - // Now that we know this is a valid shutdown message, we'll + // If the remote node opened the channel with option upfront shutdown + // script, check that the script they provided matches. + if err := maybeMatchScript( + c.cfg.disconnect, c.cfg.channel.RemoteUpfrontShutdownScript(), + shutDownMsg.Address, + ); err != nil { + return nil, false, err + } + + // Now that we know this is a valid shutdown message and address, we'll // record their preferred delivery closing script. c.remoteDeliveryScript = shutDownMsg.Address @@ -412,11 +509,15 @@ func (c *channelCloser) ProcessCloseMsg(msg lnwire.Message) ([]lnwire.Message, b // transaction! We'll craft the final closing transaction so // we can broadcast it to the network. matchingSig := c.priorFeeOffers[remoteProposedFee].Signature - localSigBytes := matchingSig.ToSignatureBytes() - localSig := append(localSigBytes, byte(txscript.SigHashAll)) + localSig, err := matchingSig.ToSignature() + if err != nil { + return nil, false, err + } - remoteSigBytes := closeSignedMsg.Signature.ToSignatureBytes() - remoteSig := append(remoteSigBytes, byte(txscript.SigHashAll)) + remoteSig, err := closeSignedMsg.Signature.ToSignature() + if err != nil { + return nil, false, err + } closeTx, _, err := c.cfg.channel.CompleteCooperativeClose( localSig, remoteSig, c.localDeliveryScript, @@ -427,18 +528,12 @@ func (c *channelCloser) ProcessCloseMsg(msg lnwire.Message) ([]lnwire.Message, b } c.closingTx = closeTx - // Before closing, we'll attempt to send a disable update for - // the channel. We do so before closing the channel as otherwise - // the current edge policy won't be retrievable from the graph. - if err := c.cfg.disableChannel(c.chanPoint); err != nil { - peerLog.Warnf("Unable to disable channel %v on "+ - "close: %v", c.chanPoint, err) - } - // Before publishing the closing tx, we persist it to the // database, such that it can be republished if something goes // wrong. - err = c.cfg.channel.MarkCommitmentBroadcasted(closeTx) + err = c.cfg.channel.MarkCoopBroadcasted( + closeTx, c.locallyInitiated, + ) if err != nil { return nil, false, err } @@ -486,7 +581,6 @@ func (c *channelCloser) ProcessCloseMsg(msg lnwire.Message) ([]lnwire.Message, b // transaction for a channel based on the prior fee negotiations and our // current compromise fee. func (c *channelCloser) proposeCloseSigned(fee btcutil.Amount) (*lnwire.ClosingSigned, error) { - rawSig, _, _, err := c.cfg.channel.CreateCloseProposal( fee, c.localDeliveryScript, c.remoteDeliveryScript, ) @@ -498,7 +592,7 @@ func (c *channelCloser) proposeCloseSigned(fee btcutil.Amount) (*lnwire.ClosingS // party responds we'll be able to decide if we've agreed on fees or // not. c.lastFeeProposal = fee - parsedSig, err := lnwire.NewSigFromRawSignature(rawSig) + parsedSig, err := lnwire.NewSigFromSignature(rawSig) if err != nil { return nil, err } diff --git a/chancloser_test.go b/chancloser_test.go new file mode 100644 index 0000000000..f7c481d0c0 --- /dev/null +++ b/chancloser_test.go @@ -0,0 +1,76 @@ +package lnd + +import ( + "crypto/rand" + "testing" + + "github.com/lightningnetwork/lnd/lnwire" +) + +// randDeliveryAddress generates a random delivery address for testing. +func randDeliveryAddress(t *testing.T) lnwire.DeliveryAddress { + // Generate an address of maximum length. + da := lnwire.DeliveryAddress(make([]byte, 34)) + + _, err := rand.Read(da) + if err != nil { + t.Fatalf("cannot generate random address: %v", err) + } + + return da +} + +// TestMaybeMatchScript tests that the maybeMatchScript errors appropriately +// when an upfront shutdown script is set and the script provided does not +// match, and does not error in any other case. +func TestMaybeMatchScript(t *testing.T) { + addr1 := randDeliveryAddress(t) + addr2 := randDeliveryAddress(t) + + tests := []struct { + name string + shutdownScript lnwire.DeliveryAddress + upfrontScript lnwire.DeliveryAddress + expectedErr error + }{ + { + name: "no upfront shutdown set, script ok", + shutdownScript: addr1, + upfrontScript: []byte{}, + expectedErr: nil, + }, + { + name: "upfront shutdown set, script ok", + shutdownScript: addr1, + upfrontScript: addr1, + expectedErr: nil, + }, + { + name: "upfront shutdown set, script not ok", + shutdownScript: addr1, + upfrontScript: addr2, + expectedErr: errUpfrontShutdownScriptMismatch, + }, + { + name: "nil shutdown and empty upfront", + shutdownScript: nil, + upfrontScript: []byte{}, + expectedErr: nil, + }, + } + + for _, test := range tests { + test := test + + t.Run(test.name, func(t *testing.T) { + err := maybeMatchScript( + func() error { return nil }, test.upfrontScript, + test.shutdownScript, + ) + + if err != test.expectedErr { + t.Fatalf("Error: %v, expected error: %v", err, test.expectedErr) + } + }) + } +} diff --git a/chanfitness/chanevent.go b/chanfitness/chanevent.go new file mode 100644 index 0000000000..53048ee3b8 --- /dev/null +++ b/chanfitness/chanevent.go @@ -0,0 +1,218 @@ +package chanfitness + +import ( + "fmt" + "time" + + "github.com/btcsuite/btcd/wire" + "github.com/lightningnetwork/lnd/routing/route" +) + +type eventType int + +const ( + peerOnlineEvent eventType = iota + peerOfflineEvent +) + +// String provides string representations of channel events. +func (e eventType) String() string { + switch e { + case peerOnlineEvent: + return "peer_online" + + case peerOfflineEvent: + return "peer_offline" + } + + return "unknown" +} + +// channelEvent is a a timestamped event which is observed on a per channel +// basis. +type channelEvent struct { + timestamp time.Time + eventType eventType +} + +// chanEventLog stores all events that have occurred over a channel's lifetime. +type chanEventLog struct { + // channelPoint is the outpoint for the channel's funding transaction. + channelPoint wire.OutPoint + + // peer is the compressed public key of the peer being monitored. + peer route.Vertex + + // events is a log of timestamped events observed for the channel. + events []*channelEvent + + // now is expected to return the current time. It is supplied as an + // external function to enable deterministic unit tests. + now func() time.Time + + // openedAt tracks the first time this channel was seen. This is not + // necessarily the time that it confirmed on chain because channel events + // are not persisted at present. + openedAt time.Time + + // closedAt is the time that the channel was closed. If the channel has not + // been closed yet, it is zero. + closedAt time.Time +} + +// newEventLog creates an event log for a channel with the openedAt time set. +func newEventLog(channelPoint wire.OutPoint, peer route.Vertex, + now func() time.Time) *chanEventLog { + + eventlog := &chanEventLog{ + channelPoint: channelPoint, + peer: peer, + now: now, + openedAt: now(), + } + + return eventlog +} + +// close sets the closing time for an event log. +func (e *chanEventLog) close() { + e.closedAt = e.now() +} + +// add appends an event with the given type and current time to the event log. +// The open time for the eventLog will be set to the event's timestamp if it is +// not set yet. +func (e *chanEventLog) add(eventType eventType) { + // If the channel is already closed, return early without adding an event. + if !e.closedAt.IsZero() { + return + } + + // Add the event to the eventLog with the current timestamp. + event := &channelEvent{ + timestamp: e.now(), + eventType: eventType, + } + e.events = append(e.events, event) + + log.Debugf("Channel %v recording event: %v", e.channelPoint, eventType) +} + +// onlinePeriod represents a period of time over which a peer was online. +type onlinePeriod struct { + start, end time.Time +} + +// getOnlinePeriods returns a list of all the periods that the event log has +// recorded the remote peer as being online. In the unexpected case where there +// are no events, the function returns early. Online periods are defined as a +// peer online event which is terminated by a peer offline event. This function +// expects the event log provided to be ordered by ascending timestamp. +func (e *chanEventLog) getOnlinePeriods() []*onlinePeriod { + // Return early if there are no events, there are no online periods. + if len(e.events) == 0 { + return nil + } + + var ( + lastOnline time.Time + offline bool + onlinePeriods []*onlinePeriod + ) + + // Loop through all events to build a list of periods that the peer was + // online. Online periods are added when they are terminated with a peer + // offline event. If the log ends on an online event, the period between + // the online event and the present is not tracked. The type of the most + // recent event is tracked using the offline bool so that we can add a + // final online period if necessary. + for _, event := range e.events { + + switch event.eventType { + case peerOnlineEvent: + lastOnline = event.timestamp + offline = false + + case peerOfflineEvent: + offline = true + + // Do not add to uptime if there is no previous online timestamp, + // the event log has started with an offline event + if lastOnline.IsZero() { + continue + } + + // The eventLog has recorded an offline event, having previously + // been online so we add an online period to to set of online periods. + onlinePeriods = append(onlinePeriods, &onlinePeriod{ + start: lastOnline, + end: event.timestamp, + }) + } + } + + // If the last event was an peer offline event, we do not need to calculate + // a final online period and can return online periods as is. + if offline { + return onlinePeriods + } + + // The log ended on an online event, so we need to add a final online event. + // If the channel is closed, this period is until channel closure. It it is + // still open, we calculate it until the present. + endTime := e.closedAt + if endTime.IsZero() { + endTime = e.now() + } + + // Add the final online period to the set and return. + return append(onlinePeriods, &onlinePeriod{ + start: lastOnline, + end: endTime, + }) +} + +// uptime calculates the total uptime we have recorded for a channel over the +// inclusive range specified. An error is returned if the end of the range is +// before the start or a zero end time is returned. +func (e *chanEventLog) uptime(start, end time.Time) (time.Duration, error) { + // Error if we are provided with an invalid range to calculate uptime for. + if end.Before(start) { + return 0, fmt.Errorf("end time: %v before start time: %v", + end, start) + } + if end.IsZero() { + return 0, fmt.Errorf("zero end time") + } + + var uptime time.Duration + + for _, p := range e.getOnlinePeriods() { + // The online period ends before the range we're looking at, so we can + // skip over it. + if p.end.Before(start) { + continue + } + // The online period starts after the range we're looking at, so can + // stop calculating uptime. + if p.start.After(end) { + break + } + + // If the online period starts before our range, shift the start time up + // so that we only calculate uptime from the start of our range. + if p.start.Before(start) { + p.start = start + } + + // If the online period ends before our range, shift the end time + // forward so that we only calculate uptime until the end of the range. + if p.end.After(end) { + p.end = end + } + + uptime += p.end.Sub(p.start) + } + + return uptime, nil +} diff --git a/chanfitness/chanevent_test.go b/chanfitness/chanevent_test.go new file mode 100644 index 0000000000..72733dbef1 --- /dev/null +++ b/chanfitness/chanevent_test.go @@ -0,0 +1,395 @@ +package chanfitness + +import ( + "testing" + "time" +) + +// TestAdd tests adding events to an event log. It tests the case where the +// channel is open, and should have an event added, and the case where it is +// closed and the event should not be added. +func TestAdd(t *testing.T) { + tests := []struct { + name string + eventLog *chanEventLog + event eventType + expected []eventType + }{ + { + name: "Channel open", + eventLog: &chanEventLog{ + now: time.Now, + }, + event: peerOnlineEvent, + expected: []eventType{peerOnlineEvent}, + }, + { + name: "Channel closed, event not added", + eventLog: &chanEventLog{ + now: time.Now, + }, + event: peerOnlineEvent, + expected: []eventType{}, + }, + } + + for _, test := range tests { + test := test + + t.Run(test.name, func(t *testing.T) { + test.eventLog.add(test.event) + + for i, e := range test.expected { + if test.eventLog.events[i].eventType != e { + t.Fatalf("Expected event type: %v, got: %v", + e, test.eventLog.events[i].eventType) + } + } + }) + } +} + +// TestGetOnlinePeriod tests the getOnlinePeriod function. It tests the case +// where no events present, and the case where an additional online period +// must be added because the event log ends on an online event. +func TestGetOnlinePeriod(t *testing.T) { + // Set time for consistent testing. + now := time.Now() + + fourHoursAgo := now.Add(time.Hour * -4) + threeHoursAgo := now.Add(time.Hour * -3) + twoHoursAgo := now.Add(time.Hour * -2) + oneHourAgo := now.Add(time.Hour * -1) + + tests := []struct { + name string + events []*channelEvent + expectedOnline []*onlinePeriod + openedAt time.Time + closedAt time.Time + }{ + { + name: "No events", + }, + { + name: "Start on online period", + events: []*channelEvent{ + { + timestamp: threeHoursAgo, + eventType: peerOnlineEvent, + }, + { + timestamp: twoHoursAgo, + eventType: peerOfflineEvent, + }, + }, + expectedOnline: []*onlinePeriod{ + { + start: threeHoursAgo, + end: twoHoursAgo, + }, + }, + }, + { + name: "Start on offline period", + events: []*channelEvent{ + { + timestamp: fourHoursAgo, + eventType: peerOfflineEvent, + }, + }, + }, + { + name: "End on an online period, channel not closed", + events: []*channelEvent{ + { + timestamp: fourHoursAgo, + eventType: peerOnlineEvent, + }, + }, + expectedOnline: []*onlinePeriod{ + { + start: fourHoursAgo, + end: now, + }, + }, + }, + { + name: "End on an online period, channel closed", + events: []*channelEvent{ + { + timestamp: fourHoursAgo, + eventType: peerOnlineEvent, + }, + }, + expectedOnline: []*onlinePeriod{ + { + start: fourHoursAgo, + end: oneHourAgo, + }, + }, + closedAt: oneHourAgo, + }, + } + + for _, test := range tests { + test := test + + t.Run(test.name, func(t *testing.T) { + score := &chanEventLog{ + events: test.events, + now: func() time.Time { + return now + }, + openedAt: test.openedAt, + closedAt: test.closedAt, + } + + online := score.getOnlinePeriods() + + if len(online) != len(test.expectedOnline) { + t.Fatalf("Expectd: %v online periods, got: %v", + len(test.expectedOnline), len(online)) + } + + for i, o := range test.expectedOnline { + if online[i].start != o.start { + t.Errorf("Expected start: %v, got %v", o.start, + online[i].start) + } + + if online[i].end != o.end { + t.Errorf("Expected end: %v, got %v", o.end, + online[i].end) + } + } + }) + + } +} + +// TestUptime tests channel uptime calculation based on its event log. +func TestUptime(t *testing.T) { + // Set time for consistent testing. + now := time.Now() + + fourHoursAgo := now.Add(time.Hour * -4) + threeHoursAgo := now.Add(time.Hour * -3) + twoHoursAgo := now.Add(time.Hour * -2) + oneHourAgo := now.Add(time.Hour * -1) + + tests := []struct { + name string + + // opened at is the time the channel was recorded as being open, and is + // never expected to be zero. + openedAt time.Time + + // closed at is the tim the channel was recorded as being closed, and + // can have a zero value if the. + closedAt time.Time + + // events is the set of event log that we are calculating uptime for. + events []*channelEvent + + // startTime is the beginning of the period that we are calculating + // uptime for, it cannot have a zero value. + startTime time.Time + + // endTime is the end of the period that we are calculating uptime for, + // it cannot have a zero value. + endTime time.Time + + // expectedUptime is the amount of uptime we expect to be calculated + // over the period specified by startTime and endTime. + expectedUptime time.Duration + + // expectErr is set to true if we expect an error to be returned when + // calling the uptime function + expectErr bool + }{ + { + name: "End before start", + endTime: threeHoursAgo, + startTime: now, + expectErr: true, + }, + { + name: "Zero end time", + expectErr: true, + }, + { + name: "Online event and closed", + openedAt: fourHoursAgo, + closedAt: oneHourAgo, + events: []*channelEvent{ + { + timestamp: fourHoursAgo, + eventType: peerOnlineEvent, + }, + }, + startTime: fourHoursAgo, + endTime: now, + expectedUptime: time.Hour * 3, + }, + { + name: "Online event and not closed", + openedAt: fourHoursAgo, + events: []*channelEvent{ + { + timestamp: fourHoursAgo, + eventType: peerOnlineEvent, + }, + }, + startTime: fourHoursAgo, + endTime: now, + expectedUptime: time.Hour * 4, + }, + { + name: "Offline event and closed", + openedAt: fourHoursAgo, + closedAt: threeHoursAgo, + events: []*channelEvent{ + { + timestamp: fourHoursAgo, + eventType: peerOfflineEvent, + }, + }, + startTime: fourHoursAgo, + endTime: now, + }, + { + name: "Online event before close", + openedAt: fourHoursAgo, + closedAt: oneHourAgo, + events: []*channelEvent{ + { + timestamp: twoHoursAgo, + eventType: peerOnlineEvent, + }, + }, + startTime: fourHoursAgo, + endTime: now, + expectedUptime: time.Hour, + }, + { + name: "Online then offline event", + openedAt: fourHoursAgo, + closedAt: oneHourAgo, + events: []*channelEvent{ + { + timestamp: threeHoursAgo, + eventType: peerOnlineEvent, + }, + { + timestamp: twoHoursAgo, + eventType: peerOfflineEvent, + }, + }, + startTime: fourHoursAgo, + endTime: now, + expectedUptime: time.Hour, + }, + { + name: "Online event before uptime period", + openedAt: fourHoursAgo, + closedAt: oneHourAgo, + events: []*channelEvent{ + { + timestamp: threeHoursAgo, + eventType: peerOnlineEvent, + }, + }, + startTime: twoHoursAgo, + endTime: now, + expectedUptime: time.Hour, + }, + { + name: "Offline event after uptime period", + openedAt: fourHoursAgo, + events: []*channelEvent{ + { + timestamp: fourHoursAgo, + eventType: peerOnlineEvent, + }, + { + timestamp: now.Add(time.Hour), + eventType: peerOfflineEvent, + }, + }, + startTime: twoHoursAgo, + endTime: now, + expectedUptime: time.Hour * 2, + }, + { + name: "All events within period", + openedAt: fourHoursAgo, + events: []*channelEvent{ + { + timestamp: twoHoursAgo, + eventType: peerOnlineEvent, + }, + }, + startTime: threeHoursAgo, + endTime: oneHourAgo, + expectedUptime: time.Hour, + }, + { + name: "Multiple online and offline", + openedAt: now.Add(time.Hour * -8), + events: []*channelEvent{ + { + timestamp: now.Add(time.Hour * -7), + eventType: peerOnlineEvent, + }, + { + timestamp: now.Add(time.Hour * -6), + eventType: peerOfflineEvent, + }, + { + timestamp: now.Add(time.Hour * -5), + eventType: peerOnlineEvent, + }, + { + timestamp: now.Add(time.Hour * -4), + eventType: peerOfflineEvent, + }, + { + timestamp: now.Add(time.Hour * -3), + eventType: peerOnlineEvent, + }, + }, + startTime: now.Add(time.Hour * -8), + endTime: oneHourAgo, + expectedUptime: time.Hour * 4, + }, + } + + for _, test := range tests { + test := test + + t.Run(test.name, func(t *testing.T) { + score := &chanEventLog{ + events: test.events, + now: func() time.Time { + return now + }, + openedAt: test.openedAt, + closedAt: test.closedAt, + } + + uptime, err := score.uptime(test.startTime, test.endTime) + if test.expectErr && err == nil { + t.Fatal("Expected an error, got nil") + } + if !test.expectErr && err != nil { + t.Fatalf("Expcted no error, got: %v", err) + } + + if uptime != test.expectedUptime { + t.Errorf("Expected uptime: %v, got: %v", + test.expectedUptime, uptime) + } + }) + } +} diff --git a/chanfitness/chaneventstore.go b/chanfitness/chaneventstore.go new file mode 100644 index 0000000000..b54409bfce --- /dev/null +++ b/chanfitness/chaneventstore.go @@ -0,0 +1,398 @@ +// Package chanfitness monitors the behaviour of channels to provide insight +// into the health and performance of a channel. This is achieved by maintaining +// an event store which tracks events for each channel. +// +// Lifespan: the period that the channel has been known to the scoring system. +// Note that lifespan may not equal the channel's full lifetime because data is +// not currently persisted. +// +// Uptime: the total time within a given period that the channel's remote peer +// has been online. +package chanfitness + +import ( + "errors" + "sync" + "time" + + "github.com/btcsuite/btcd/wire" + "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/channelnotifier" + "github.com/lightningnetwork/lnd/peernotifier" + "github.com/lightningnetwork/lnd/routing/route" + "github.com/lightningnetwork/lnd/subscribe" +) + +var ( + // errShuttingDown is returned when the store cannot respond to a query because + // it has received the shutdown signal. + errShuttingDown = errors.New("channel event store shutting down") + + // ErrChannelNotFound is returned when a query is made for a channel that + // the event store does not have knowledge of. + ErrChannelNotFound = errors.New("channel not found in event store") +) + +// ChannelEventStore maintains a set of event logs for the node's channels to +// provide insight into the performance and health of channels. +type ChannelEventStore struct { + cfg *Config + + // channels maps channel points to event logs. + channels map[wire.OutPoint]*chanEventLog + + // peers tracks the current online status of peers based on online/offline + // events. + peers map[route.Vertex]bool + + // lifespanRequests serves requests for the lifespan of channels. + lifespanRequests chan lifespanRequest + + // uptimeRequests serves requests for the uptime of channels. + uptimeRequests chan uptimeRequest + + quit chan struct{} + + wg sync.WaitGroup +} + +// Config provides the event store with functions required to monitor channel +// activity. All elements of the config must be non-nil for the event store to +// operate. +type Config struct { + // SubscribeChannelEvents provides a subscription client which provides a + // stream of channel events. + SubscribeChannelEvents func() (*subscribe.Client, error) + + // SubscribePeerEvents provides a subscription client which provides a + // stream of peer online/offline events. + SubscribePeerEvents func() (*subscribe.Client, error) + + // GetOpenChannels provides a list of existing open channels which is used + // to populate the ChannelEventStore with a set of channels on startup. + GetOpenChannels func() ([]*channeldb.OpenChannel, error) +} + +// lifespanRequest contains the channel ID required to query the store for a +// channel's lifespan and a blocking response channel on which the result is +// sent. +type lifespanRequest struct { + channelPoint wire.OutPoint + responseChan chan lifespanResponse +} + +// lifespanResponse contains the response to a lifespanRequest and an error if +// one occurred. +type lifespanResponse struct { + start time.Time + end time.Time + err error +} + +// uptimeRequest contains the parameters required to query the store for a +// channel's uptime and a blocking response channel on which the result is sent. +type uptimeRequest struct { + channelPoint wire.OutPoint + startTime time.Time + endTime time.Time + responseChan chan uptimeResponse +} + +// uptimeResponse contains the response to an uptimeRequest and an error if one +// occurred. +type uptimeResponse struct { + uptime time.Duration + err error +} + +// NewChannelEventStore initializes an event store with the config provided. +// Note that this function does not start the main event loop, Start() must be +// called. +func NewChannelEventStore(config *Config) *ChannelEventStore { + store := &ChannelEventStore{ + cfg: config, + channels: make(map[wire.OutPoint]*chanEventLog), + peers: make(map[route.Vertex]bool), + lifespanRequests: make(chan lifespanRequest), + uptimeRequests: make(chan uptimeRequest), + quit: make(chan struct{}), + } + + return store +} + +// Start adds all existing open channels to the event store and starts the main +// loop which records channel and peer events, and serves requests for +// information from the store. If this function fails, it cancels its existing +// subscriptions and returns an error. +func (c *ChannelEventStore) Start() error { + // Create a subscription to channel events. + channelClient, err := c.cfg.SubscribeChannelEvents() + if err != nil { + return err + } + + // Create a subscription to peer events. If an error occurs, cancel the + // existing subscription to channel events and return. + peerClient, err := c.cfg.SubscribePeerEvents() + if err != nil { + channelClient.Cancel() + return err + } + + // cancel should be called to cancel all subscriptions if an error occurs. + cancel := func() { + channelClient.Cancel() + peerClient.Cancel() + } + + // Add the existing set of channels to the event store. This is required + // because channel events will not be triggered for channels that exist + // at startup time. + channels, err := c.cfg.GetOpenChannels() + if err != nil { + cancel() + return err + } + + log.Infof("Adding %v channels to event store", len(channels)) + + for _, ch := range channels { + peerKey, err := route.NewVertexFromBytes( + ch.IdentityPub.SerializeCompressed(), + ) + if err != nil { + cancel() + return err + } + + // Add existing channels to the channel store with an initial peer + // online or offline event. + c.addChannel(ch.FundingOutpoint, peerKey) + } + + // Start a goroutine that consumes events from all subscriptions. + c.wg.Add(1) + go c.consume(&subscriptions{ + channelUpdates: channelClient.Updates(), + peerUpdates: peerClient.Updates(), + cancel: cancel, + }) + + return nil +} + +// Stop terminates all goroutines started by the event store. +func (c *ChannelEventStore) Stop() { + log.Info("Stopping event store") + + // Stop the consume goroutine. + close(c.quit) + + c.wg.Wait() +} + +// addChannel adds a new channel to the ChannelEventStore's map of channels with +// an initial peer online state (if the peer is online). If the channel is +// already present in the map, the function returns early. This function should +// be called to add existing channels on startup and when open channel events +// are observed. +func (c *ChannelEventStore) addChannel(channelPoint wire.OutPoint, + peer route.Vertex) { + + // Check for the unexpected case where the channel is already in the store. + _, ok := c.channels[channelPoint] + if ok { + log.Errorf("Channel %v duplicated in channel store", channelPoint) + return + } + + // Create an event log for the channel. + eventLog := newEventLog(channelPoint, peer, time.Now) + + // If the peer is already online, add a peer online event to record + // the starting state of the peer. + if c.peers[peer] { + eventLog.add(peerOnlineEvent) + } + + c.channels[channelPoint] = eventLog +} + +// closeChannel records a closed time for a channel, and returns early is the +// channel is not known to the event store. +func (c *ChannelEventStore) closeChannel(channelPoint wire.OutPoint) { + // Check for the unexpected case where the channel is unknown to the store. + eventLog, ok := c.channels[channelPoint] + if !ok { + log.Errorf("Close channel %v unknown to store", channelPoint) + return + } + + eventLog.close() +} + +// peerEvent adds a peer online or offline event to all channels we currently +// have open with a peer. +func (c *ChannelEventStore) peerEvent(peer route.Vertex, event eventType) { + // Track current online status of peers in the channelEventStore. + c.peers[peer] = event == peerOnlineEvent + + for _, eventLog := range c.channels { + if eventLog.peer == peer { + eventLog.add(event) + } + } +} + +// subscriptions abstracts away from subscription clients to allow for mocking. +type subscriptions struct { + channelUpdates <-chan interface{} + peerUpdates <-chan interface{} + cancel func() +} + +// consume is the event store's main loop. It consumes subscriptions to update +// the event store with channel and peer events, and serves requests for channel +// uptime and lifespan. +func (c *ChannelEventStore) consume(subscriptions *subscriptions) { + defer c.wg.Done() + defer subscriptions.cancel() + + // Consume events until the channel is closed. + for { + select { + // Process channel opened and closed events. + case e := <-subscriptions.channelUpdates: + switch event := e.(type) { + // A new channel has been opened, we must add the channel to the + // store and record a channel open event. + case channelnotifier.OpenChannelEvent: + peerKey, err := route.NewVertexFromBytes( + event.Channel.IdentityPub.SerializeCompressed(), + ) + if err != nil { + log.Errorf("Could not get vertex from: %v", + event.Channel.IdentityPub.SerializeCompressed()) + } + + c.addChannel(event.Channel.FundingOutpoint, peerKey) + + // A channel has been closed, we must remove the channel from the + // store and record a channel closed event. + case channelnotifier.ClosedChannelEvent: + c.closeChannel(event.CloseSummary.ChanPoint) + } + + // Process peer online and offline events. + case e := <-subscriptions.peerUpdates: + switch event := e.(type) { + // We have reestablished a connection with our peer, and should + // record an online event for any channels with that peer. + case peernotifier.PeerOnlineEvent: + c.peerEvent(event.PubKey, peerOnlineEvent) + + // We have lost a connection with our peer, and should record an + // offline event for any channels with that peer. + case peernotifier.PeerOfflineEvent: + c.peerEvent(event.PubKey, peerOfflineEvent) + } + + // Serve all requests for channel lifetime. + case req := <-c.lifespanRequests: + var resp lifespanResponse + + channel, ok := c.channels[req.channelPoint] + if !ok { + resp.err = ErrChannelNotFound + } else { + resp.start = channel.openedAt + resp.end = channel.closedAt + } + + req.responseChan <- resp + + // Serve requests for channel uptime. + case req := <-c.uptimeRequests: + var resp uptimeResponse + + channel, ok := c.channels[req.channelPoint] + if !ok { + resp.err = ErrChannelNotFound + } else { + uptime, err := channel.uptime(req.startTime, req.endTime) + resp.uptime = uptime + resp.err = err + } + + req.responseChan <- resp + + // Exit if the store receives the signal to shutdown. + case <-c.quit: + return + } + } +} + +// GetLifespan returns the opening and closing time observed for a channel and +// a boolean to indicate whether the channel is known the the event store. If +// the channel is still open, a zero close time is returned. +func (c *ChannelEventStore) GetLifespan( + channelPoint wire.OutPoint) (time.Time, time.Time, error) { + + request := lifespanRequest{ + channelPoint: channelPoint, + responseChan: make(chan lifespanResponse), + } + + // Send a request for the channel's lifespan to the main event loop, or + // return early with an error if the store has already received a shutdown + // signal. + select { + case c.lifespanRequests <- request: + case <-c.quit: + return time.Time{}, time.Time{}, errShuttingDown + } + + // Return the response we receive on the response channel or exit early if + // the store is instructed to exit. + select { + case resp := <-request.responseChan: + return resp.start, resp.end, resp.err + + case <-c.quit: + return time.Time{}, time.Time{}, errShuttingDown + } +} + +// GetUptime returns the uptime of a channel over a period and an error if the +// channel cannot be found or the uptime calculation fails. +func (c *ChannelEventStore) GetUptime(channelPoint wire.OutPoint, startTime, + endTime time.Time) (time.Duration, error) { + + request := uptimeRequest{ + channelPoint: channelPoint, + startTime: startTime, + endTime: endTime, + responseChan: make(chan uptimeResponse), + } + + // Send a request for the channel's uptime to the main event loop, or + // return early with an error if the store has already received a shutdown + // signal. + select { + case c.uptimeRequests <- request: + case <-c.quit: + return 0, errShuttingDown + } + + // Return the response we receive on the response channel or exit early if + // the store is instructed to exit. + select { + case resp := <-request.responseChan: + return resp.uptime, resp.err + + case <-c.quit: + return 0, errShuttingDown + } +} diff --git a/chanfitness/chaneventstore_test.go b/chanfitness/chaneventstore_test.go new file mode 100644 index 0000000000..5a78588467 --- /dev/null +++ b/chanfitness/chaneventstore_test.go @@ -0,0 +1,529 @@ +package chanfitness + +import ( + "errors" + "testing" + "time" + + "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" + "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/channelnotifier" + "github.com/lightningnetwork/lnd/peernotifier" + "github.com/lightningnetwork/lnd/routing/route" + "github.com/lightningnetwork/lnd/subscribe" +) + +// TestStartStoreError tests the starting of the store in cases where the setup +// functions fail. It does not test the mechanics of consuming events because +// these are covered in a separate set of tests. +func TestStartStoreError(t *testing.T) { + // Ok and erroring subscribe functions are defined here to de-clutter tests. + okSubscribeFunc := func() (*subscribe.Client, error) { + return &subscribe.Client{ + Cancel: func() {}, + }, nil + } + + errSubscribeFunc := func() (client *subscribe.Client, e error) { + return nil, errors.New("intentional test err") + } + + tests := []struct { + name string + ChannelEvents func() (*subscribe.Client, error) + PeerEvents func() (*subscribe.Client, error) + GetChannels func() ([]*channeldb.OpenChannel, error) + }{ + { + name: "Channel events fail", + ChannelEvents: errSubscribeFunc, + }, + { + name: "Peer events fail", + ChannelEvents: okSubscribeFunc, + PeerEvents: errSubscribeFunc, + }, + { + name: "Get open channels fails", + ChannelEvents: okSubscribeFunc, + PeerEvents: okSubscribeFunc, + GetChannels: func() (channels []*channeldb.OpenChannel, e error) { + return nil, errors.New("intentional test err") + }, + }, + } + + for _, test := range tests { + test := test + + t.Run(test.name, func(t *testing.T) { + store := NewChannelEventStore(&Config{ + SubscribeChannelEvents: test.ChannelEvents, + SubscribePeerEvents: test.PeerEvents, + GetOpenChannels: test.GetChannels, + }) + + err := store.Start() + // Check that we receive an error, because the test only checks for + // error cases. + if err == nil { + t.Fatalf("Expected error on startup, got: nil") + } + }) + } +} + +// getTestChannel returns a non-zero peer pubKey, serialized pubKey and channel +// outpoint for testing. +func getTestChannel(t *testing.T) (*btcec.PublicKey, route.Vertex, + wire.OutPoint) { + + privKey, err := btcec.NewPrivateKey(btcec.S256()) + if err != nil { + t.Fatalf("Error getting pubkey: %v", err) + } + + pubKey, err := route.NewVertexFromBytes( + privKey.PubKey().SerializeCompressed(), + ) + if err != nil { + t.Fatalf("Could not create vertex: %v", err) + } + + return privKey.PubKey(), pubKey, wire.OutPoint{ + Hash: [chainhash.HashSize]byte{1, 2, 3}, + Index: 0, + } +} + +// TestMonitorChannelEvents tests the store's handling of channel and peer +// events. It tests for the unexpected cases where we receive a channel open for +// an already known channel and but does not test for closing an unknown channel +// because it would require custom logic in the test to prevent iterating +// through an eventLog which does not exist. This test does not test handling +// of uptime and lifespan requests, as they are tested in their own tests. +func TestMonitorChannelEvents(t *testing.T) { + pubKey, vertex, chanPoint := getTestChannel(t) + + tests := []struct { + name string + + // generateEvents takes channels which represent the updates channels + // for subscription clients and passes events in the desired order. + // This function is intended to be blocking so that the test does not + // have a data race with event consumption, so the channels should not + // be buffered. + generateEvents func(channelEvents, peerEvents chan<- interface{}) + + // expectedEvents is the expected set of event types in the store. + expectedEvents []eventType + }{ + { + name: "Channel opened, peer comes online", + generateEvents: func(channelEvents, peerEvents chan<- interface{}) { + // Add an open channel event + channelEvents <- channelnotifier.OpenChannelEvent{ + Channel: &channeldb.OpenChannel{ + FundingOutpoint: chanPoint, + IdentityPub: pubKey, + }, + } + + // Add a peer online event. + peerEvents <- peernotifier.PeerOnlineEvent{PubKey: vertex} + }, + expectedEvents: []eventType{peerOnlineEvent}, + }, + { + name: "Duplicate channel open events", + generateEvents: func(channelEvents, peerEvents chan<- interface{}) { + // Add an open channel event + channelEvents <- channelnotifier.OpenChannelEvent{ + Channel: &channeldb.OpenChannel{ + FundingOutpoint: chanPoint, + IdentityPub: pubKey, + }, + } + + // Add a peer online event. + peerEvents <- peernotifier.PeerOnlineEvent{PubKey: vertex} + + // Add a duplicate channel open event. + channelEvents <- channelnotifier.OpenChannelEvent{ + Channel: &channeldb.OpenChannel{ + FundingOutpoint: chanPoint, + IdentityPub: pubKey, + }, + } + }, + expectedEvents: []eventType{peerOnlineEvent}, + }, + { + name: "Channel opened, peer already online", + generateEvents: func(channelEvents, peerEvents chan<- interface{}) { + // Add a peer online event. + peerEvents <- peernotifier.PeerOnlineEvent{PubKey: vertex} + + // Add an open channel event + channelEvents <- channelnotifier.OpenChannelEvent{ + Channel: &channeldb.OpenChannel{ + FundingOutpoint: chanPoint, + IdentityPub: pubKey, + }, + } + }, + expectedEvents: []eventType{peerOnlineEvent}, + }, + + { + name: "Channel opened, peer offline, closed", + generateEvents: func(channelEvents, peerEvents chan<- interface{}) { + // Add an open channel event + channelEvents <- channelnotifier.OpenChannelEvent{ + Channel: &channeldb.OpenChannel{ + FundingOutpoint: chanPoint, + IdentityPub: pubKey, + }, + } + + // Add a peer online event. + peerEvents <- peernotifier.PeerOfflineEvent{PubKey: vertex} + + // Add a close channel event. + channelEvents <- channelnotifier.ClosedChannelEvent{ + CloseSummary: &channeldb.ChannelCloseSummary{ + ChanPoint: chanPoint, + }, + } + }, + expectedEvents: []eventType{peerOfflineEvent}, + }, + { + name: "Event after channel close not recorded", + generateEvents: func(channelEvents, peerEvents chan<- interface{}) { + // Add an open channel event + channelEvents <- channelnotifier.OpenChannelEvent{ + Channel: &channeldb.OpenChannel{ + FundingOutpoint: chanPoint, + IdentityPub: pubKey, + }, + } + + // Add a close channel event. + channelEvents <- channelnotifier.ClosedChannelEvent{ + CloseSummary: &channeldb.ChannelCloseSummary{ + ChanPoint: chanPoint, + }, + } + + // Add a peer online event. + peerEvents <- peernotifier.PeerOfflineEvent{PubKey: vertex} + }, + }, + } + + for _, test := range tests { + test := test + + t.Run(test.name, func(t *testing.T) { + // Create a store with the channels and online peers specified + // by the test. + store := NewChannelEventStore(&Config{}) + + // Create channels which represent the subscriptions we have to peer + // and client events. + channelEvents := make(chan interface{}) + peerEvents := make(chan interface{}) + + store.wg.Add(1) + go store.consume(&subscriptions{ + channelUpdates: channelEvents, + peerUpdates: peerEvents, + cancel: func() {}, + }) + + // Add events to the store then kill the goroutine using store.Stop. + test.generateEvents(channelEvents, peerEvents) + store.Stop() + + // Retrieve the eventLog for the channel and check that its + // contents are as expected. + eventLog, ok := store.channels[chanPoint] + if !ok { + t.Fatalf("Expected to find event store") + } + + for i, e := range eventLog.events { + if test.expectedEvents[i] != e.eventType { + t.Fatalf("Expected type: %v, got: %v", + test.expectedEvents[i], e.eventType) + } + } + }) + } +} + +// TestGetLifetime tests the GetLifetime function for the cases where a channel +// is known and unknown to the store. +func TestGetLifetime(t *testing.T) { + now := time.Now() + + tests := []struct { + name string + channelFound bool + channelPoint wire.OutPoint + opened time.Time + closed time.Time + expectedError error + }{ + { + name: "Channel found", + channelFound: true, + opened: now, + closed: now.Add(time.Hour * -1), + expectedError: nil, + }, + { + name: "Channel not found", + expectedError: ErrChannelNotFound, + }, + } + + for _, test := range tests { + test := test + + t.Run(test.name, func(t *testing.T) { + // Create and empty events store for testing. + store := NewChannelEventStore(&Config{}) + + // Start goroutine which consumes GetLifespan requests. + store.wg.Add(1) + go store.consume(&subscriptions{ + channelUpdates: make(chan interface{}), + peerUpdates: make(chan interface{}), + cancel: func() {}, + }) + + // Stop the store's go routine. + defer store.Stop() + + // Add channel to eventStore if the test indicates that it should + // be present. + if test.channelFound { + store.channels[test.channelPoint] = &chanEventLog{ + openedAt: test.opened, + closedAt: test.closed, + } + } + + open, close, err := store.GetLifespan(test.channelPoint) + if test.expectedError != err { + t.Fatalf("Expected: %v, got: %v", test.expectedError, err) + } + + if open != test.opened { + t.Errorf("Expected: %v, got %v", test.opened, open) + } + + if close != test.closed { + t.Errorf("Expected: %v, got %v", test.closed, close) + } + }) + } +} + +// TestGetUptime tests the getUptime call for channels known to the event store. +// It does not test the trivial case where a channel is unknown to the store, +// because this is simply a zero return if an item is not found in a map. It +// tests the unexpected edge cases where a tracked channel does not have any +// events recorded, and when a zero time is specified for the uptime range. +func TestGetUptime(t *testing.T) { + // Set time for deterministic unit tests. + now := time.Now() + + twoHoursAgo := now.Add(time.Hour * -2) + fourHoursAgo := now.Add(time.Hour * -4) + + tests := []struct { + name string + + channelPoint wire.OutPoint + + // events is the set of events we expect to find in the channel store. + events []*channelEvent + + // openedAt is the time the channel is recorded as open by the store. + openedAt time.Time + + // closedAt is the time the channel is recorded as closed by the store. + // If the channel is still open, this value is zero. + closedAt time.Time + + // channelFound is true if we expect to find the channel in the store. + channelFound bool + + // startTime specifies the beginning of the uptime range we want to + // calculate. + startTime time.Time + + // endTime specified the end of the uptime range we want to calculate. + endTime time.Time + + expectedUptime time.Duration + + expectedError error + }{ + { + name: "No events", + startTime: twoHoursAgo, + endTime: now, + channelFound: true, + expectedError: nil, + }, + { + name: "50% Uptime", + events: []*channelEvent{ + { + timestamp: fourHoursAgo, + eventType: peerOnlineEvent, + }, + { + timestamp: twoHoursAgo, + eventType: peerOfflineEvent, + }, + }, + openedAt: fourHoursAgo, + expectedUptime: time.Hour * 2, + startTime: fourHoursAgo, + endTime: now, + channelFound: true, + expectedError: nil, + }, + { + name: "Zero start time", + events: []*channelEvent{ + { + timestamp: fourHoursAgo, + eventType: peerOnlineEvent, + }, + }, + openedAt: fourHoursAgo, + expectedUptime: time.Hour * 4, + endTime: now, + channelFound: true, + expectedError: nil, + }, + { + name: "Channel not found", + startTime: twoHoursAgo, + endTime: now, + channelFound: false, + expectedError: ErrChannelNotFound, + }, + } + + for _, test := range tests { + test := test + + t.Run(test.name, func(t *testing.T) { + // Set up event store with the events specified for the test and + // mocked time. + store := NewChannelEventStore(&Config{}) + + // Start goroutine which consumes GetUptime requests. + store.wg.Add(1) + go store.consume(&subscriptions{ + channelUpdates: make(chan interface{}), + peerUpdates: make(chan interface{}), + cancel: func() {}, + }) + + // Stop the store's goroutine. + defer store.Stop() + + // Add the channel to the store if it is intended to be found. + if test.channelFound { + store.channels[test.channelPoint] = &chanEventLog{ + events: test.events, + now: func() time.Time { return now }, + openedAt: test.openedAt, + closedAt: test.closedAt, + } + } + + uptime, err := store.GetUptime(test.channelPoint, test.startTime, test.endTime) + if test.expectedError != err { + t.Fatalf("Expected: %v, got: %v", test.expectedError, err) + } + + if uptime != test.expectedUptime { + t.Fatalf("Expected uptime percentage: %v, got %v", + test.expectedUptime, uptime) + } + + }) + } +} + +// TestAddChannel tests that channels are added to the event store with +// appropriate timestamps. This test addresses a bug where offline channels +// did not have an opened time set, and checks that an online event is set for +// peers that are online at the time that a channel is opened. +func TestAddChannel(t *testing.T) { + _, vertex, chanPoint := getTestChannel(t) + + tests := []struct { + name string + + // peers maps peers to an online state. + peers map[route.Vertex]bool + + expectedEvents []eventType + }{ + { + name: "peer offline", + peers: make(map[route.Vertex]bool), + expectedEvents: []eventType{}, + }, + { + name: "peer online", + peers: map[route.Vertex]bool{ + vertex: true, + }, + expectedEvents: []eventType{peerOnlineEvent}, + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + store := NewChannelEventStore(&Config{}) + store.peers = test.peers + + // Add channel to the store. + store.addChannel(chanPoint, vertex) + + // Check that the eventLog is successfully added. + eventLog, ok := store.channels[chanPoint] + if !ok { + t.Fatalf("channel should be in store") + } + + // Check that the eventLog contains the events we + // expect. + for i, e := range test.expectedEvents { + if e != eventLog.events[i].eventType { + t.Fatalf("expected: %v, got: %v", + e, eventLog.events[i].eventType) + } + } + + // Ensure that open time is always set. + if eventLog.openedAt.IsZero() { + t.Fatalf("channel should have opened at set") + } + }) + } +} diff --git a/chanfitness/log.go b/chanfitness/log.go new file mode 100644 index 0000000000..626f7538dc --- /dev/null +++ b/chanfitness/log.go @@ -0,0 +1,32 @@ +package chanfitness + +import ( + "github.com/btcsuite/btclog" + "github.com/lightningnetwork/lnd/build" +) + +// Subsystem defines the logging code for this subsystem. +const Subsystem = "CHFT" + +// log is a logger that is initialized with no output filters. This +// means the package will not perform any logging by default until the caller +// requests it. +var log btclog.Logger + +// The default amount of logging is none. +func init() { + UseLogger(build.NewSubLogger(Subsystem, nil)) +} + +// DisableLog disables all library log output. Logging output is disabled +// by default until UseLogger is called. +func DisableLog() { + UseLogger(btclog.Disabled) +} + +// UseLogger uses a specified Logger to output package logging info. +// This should be used in preference to SetLogWriter if the caller is also +// using btclog. +func UseLogger(logger btclog.Logger) { + log = logger +} diff --git a/channel_notifier.go b/channel_notifier.go index 36a2224a0b..960ba0d9da 100644 --- a/channel_notifier.go +++ b/channel_notifier.go @@ -7,6 +7,7 @@ import ( "github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/wire" "github.com/lightningnetwork/lnd/chanbackup" + "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/channelnotifier" ) @@ -51,6 +52,35 @@ func (c *channelNotifier) SubscribeChans(startingChans map[wire.OutPoint]struct{ quit := make(chan struct{}) chanUpdates := make(chan chanbackup.ChannelEvent, 1) + // sendChanOpenUpdate is a closure that sends a ChannelEvent to the + // chanUpdates channel to inform subscribers about new pending or + // confirmed channels. + sendChanOpenUpdate := func(newOrPendingChan *channeldb.OpenChannel) { + nodeAddrs, err := c.addrs.AddrsForNode( + newOrPendingChan.IdentityPub, + ) + if err != nil { + pub := newOrPendingChan.IdentityPub + ltndLog.Errorf("unable to fetch addrs for %x: %v", + pub.SerializeCompressed(), err) + } + + chanEvent := chanbackup.ChannelEvent{ + NewChans: []chanbackup.ChannelWithAddrs{ + { + OpenChannel: newOrPendingChan, + Addrs: nodeAddrs, + }, + }, + } + + select { + case chanUpdates <- chanEvent: + case <-quit: + return + } + } + // In order to adhere to the interface, we'll proxy the events from the // channel notifier to the sub-swapper in a format it understands. go func() { @@ -74,37 +104,18 @@ func (c *channelNotifier) SubscribeChans(startingChans map[wire.OutPoint]struct{ // TODO(roasbeef): batch dispatch ntnfs switch event := e.(type) { - - // A new channel has been opened, we'll obtain - // the node address, then send to the + // A new channel has been opened and is still + // pending. We can still create a backup, even + // if the final channel ID is not yet available. + case channelnotifier.PendingOpenChannelEvent: + pendingChan := event.PendingChannel + sendChanOpenUpdate(pendingChan) + + // A new channel has been confirmed, we'll + // obtain the node address, then send to the // sub-swapper. case channelnotifier.OpenChannelEvent: - nodeAddrs, err := c.addrs.AddrsForNode( - event.Channel.IdentityPub, - ) - if err != nil { - pub := event.Channel.IdentityPub - ltndLog.Errorf("unable to "+ - "fetch addrs for %x: %v", - pub.SerializeCompressed(), - err) - } - - channel := event.Channel - chanEvent := chanbackup.ChannelEvent{ - NewChans: []chanbackup.ChannelWithAddrs{ - { - OpenChannel: channel, - Addrs: nodeAddrs, - }, - }, - } - - select { - case chanUpdates <- chanEvent: - case <-quit: - return - } + sendChanOpenUpdate(event.Channel) // An existing channel has been closed, we'll // send only the chanPoint of the closed diff --git a/channeldb/channel.go b/channeldb/channel.go index 7bc9e48067..2031b7b337 100644 --- a/channeldb/channel.go +++ b/channeldb/channel.go @@ -2,6 +2,7 @@ package channeldb import ( "bytes" + "crypto/sha256" "encoding/binary" "errors" "fmt" @@ -15,7 +16,7 @@ import ( "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcutil" - "github.com/coreos/bbolt" + "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/keychain" "github.com/lightningnetwork/lnd/lnwire" @@ -37,12 +38,27 @@ var ( // TODO(roasbeef): flesh out comment openChannelBucket = []byte("open-chan-bucket") + // historicalChannelBucket stores all channels that have seen their + // commitment tx confirm. All information from their previous open state + // is retained. + historicalChannelBucket = []byte("historical-chan-bucket") + // chanInfoKey can be accessed within the bucket for a channel // (identified by its chanPoint). This key stores all the static // information for a channel which is decided at the end of the // funding flow. chanInfoKey = []byte("chan-info-key") + // localUpfrontShutdownKey can be accessed within the bucket for a channel + // (identified by its chanPoint). This key stores an optional upfront + // shutdown script for the local peer. + localUpfrontShutdownKey = []byte("local-upfront-shutdown-key") + + // remoteUpfrontShutdownKey can be accessed within the bucket for a channel + // (identified by its chanPoint). This key stores an optional upfront + // shutdown script for the remote peer. + remoteUpfrontShutdownKey = []byte("remote-upfront-shutdown-key") + // chanCommitmentKey can be accessed within the sub-bucket for a // particular channel. This key stores the up to date commitment state // for a particular channel party. Appending a 0 to the end of this key @@ -51,6 +67,11 @@ var ( // party. chanCommitmentKey = []byte("chan-commitment-key") + // unsignedAckedUpdatesKey is an entry in the channel bucket that + // contains the remote updates that we have acked, but not yet signed + // for in one of our remote commits. + unsignedAckedUpdatesKey = []byte("unsigned-acked-updates-key") + // revocationStateKey stores their current revocation hash, our // preimage producer and their preimage store. revocationStateKey = []byte("revocation-state-key") @@ -59,9 +80,13 @@ var ( // remote peer during a channel sync in case we have lost channel state. dataLossCommitPointKey = []byte("data-loss-commit-point-key") - // closingTxKey points to a the closing tx that we broadcasted when - // moving the channel to state CommitBroadcasted. - closingTxKey = []byte("closing-tx-key") + // forceCloseTxKey points to a the unilateral closing tx that we + // broadcasted when moving the channel to state CommitBroadcasted. + forceCloseTxKey = []byte("closing-tx-key") + + // coopCloseTxKey points to a the cooperative closing tx that we + // broadcasted when moving the channel to state CoopBroadcasted. + coopCloseTxKey = []byte("coop-closing-tx-key") // commitDiffKey stores the current pending commitment state we've // extended to the remote party (if any). Each time we propose a new @@ -78,6 +103,11 @@ var ( // channel closure. This key should be accessed from within the // sub-bucket of a target channel, identified by its channel point. revocationLogBucket = []byte("revocation-log-key") + + // frozenChanKey is the key where we store the information for any + // active "frozen" channels. This key is present only in the leaf + // bucket for a given channel. + frozenChanKey = []byte("frozen-chans") ) var ( @@ -120,44 +150,94 @@ var ( // ErrChanBorked is returned when a caller attempts to mutate a borked // channel. ErrChanBorked = fmt.Errorf("cannot mutate borked channel") + + // errLogEntryNotFound is returned when we cannot find a log entry at + // the height requested in the revocation log. + errLogEntryNotFound = fmt.Errorf("log entry not found") + + // errHeightNotFound is returned when a query for channel balances at + // a height that we have not reached yet is made. + errHeightNotReached = fmt.Errorf("height requested greater than " + + "current commit height") ) // ChannelType is an enum-like type that describes one of several possible // channel types. Each open channel is associated with a particular type as the // channel type may determine how higher level operations are conducted such as -// fee negotiation, channel closing, the format of HTLCs, etc. -// TODO(roasbeef): split up per-chain? +// fee negotiation, channel closing, the format of HTLCs, etc. Structure-wise, +// a ChannelType is a bit field, with each bit denoting a modification from the +// base channel type of single funder. type ChannelType uint8 const ( // NOTE: iota isn't used here for this enum needs to be stable // long-term as it will be persisted to the database. - // SingleFunder represents a channel wherein one party solely funds the - // entire capacity of the channel. - SingleFunder ChannelType = 0 + // SingleFunderBit represents a channel wherein one party solely funds + // the entire capacity of the channel. + SingleFunderBit ChannelType = 0 - // DualFunder represents a channel wherein both parties contribute + // DualFunderBit represents a channel wherein both parties contribute // funds towards the total capacity of the channel. The channel may be // funded symmetrically or asymmetrically. - DualFunder ChannelType = 1 + DualFunderBit ChannelType = 1 << 0 // SingleFunderTweakless is similar to the basic SingleFunder channel // type, but it omits the tweak for one's key in the commitment // transaction of the remote party. - SingleFunderTweakless ChannelType = 2 + SingleFunderTweaklessBit ChannelType = 1 << 1 + + // NoFundingTxBit denotes if we have the funding transaction locally on + // disk. This bit may be on if the funding transaction was crafted by a + // wallet external to the primary daemon. + NoFundingTxBit ChannelType = 1 << 2 + + // AnchorOutputsBit indicates that the channel makes use of anchor + // outputs to bump the commitment transaction's effective feerate. This + // channel type also uses a delayed to_remote output script. If bit is + // set, we'll find the size of the anchor outputs in the database. + AnchorOutputsBit ChannelType = 1 << 3 + + // FrozenBit indicates that the channel is a frozen channel, meaning + // that only the responder can decide to cooperatively close the + // channel. + FrozenBit ChannelType = 1 << 4 ) // IsSingleFunder returns true if the channel type if one of the known single // funder variants. func (c ChannelType) IsSingleFunder() bool { - return c == SingleFunder || c == SingleFunderTweakless + return c&DualFunderBit == 0 +} + +// IsDualFunder returns true if the ChannelType has the DualFunderBit set. +func (c ChannelType) IsDualFunder() bool { + return c&DualFunderBit == DualFunderBit } // IsTweakless returns true if the target channel uses a commitment that // doesn't tweak the key for the remote party. func (c ChannelType) IsTweakless() bool { - return c == SingleFunderTweakless + return c&SingleFunderTweaklessBit == SingleFunderTweaklessBit +} + +// HasFundingTx returns true if this channel type is one that has a funding +// transaction stored locally. +func (c ChannelType) HasFundingTx() bool { + return c&NoFundingTxBit == 0 +} + +// HasAnchors returns true if this channel type has anchor ouputs on its +// commitment. +func (c ChannelType) HasAnchors() bool { + return c&AnchorOutputsBit == AnchorOutputsBit +} + +// IsFrozen returns true if the channel is considered to be "frozen". A frozen +// channel means that only the responder can initiate a cooperative channel +// closure. +func (c ChannelType) IsFrozen() bool { + return c&FrozenBit == FrozenBit } // ChannelConstraints represents a set of constraints meant to allow a node to @@ -284,10 +364,16 @@ type ChannelCommitment struct { // LocalBalance is the current available settled balance within the // channel directly spendable by us. + // + // NOTE: This is the balance *after* subtracting any commitment fee, + // AND anchor output values. LocalBalance lnwire.MilliSatoshi // RemoteBalance is the current available settled balance within the // channel directly spendable by the remote node. + // + // NOTE: This is the balance *after* subtracting any commitment fee, + // AND anchor output values. RemoteBalance lnwire.MilliSatoshi // CommitFee is the amount calculated to be paid in fees for the @@ -355,31 +441,53 @@ var ( // has been restored, and doesn't have all the fields a typical channel // will have. ChanStatusRestored ChannelStatus = 1 << 3 + + // ChanStatusCoopBroadcasted indicates that a cooperative close for + // this channel has been broadcasted. Older cooperatively closed + // channels will only have this status set. Newer ones will also have + // close initiator information stored using the local/remote initiator + // status. This status is set in conjunction with the initiator status + // so that we do not need to check multiple channel statues for + // cooperative closes. + ChanStatusCoopBroadcasted ChannelStatus = 1 << 4 + + // ChanStatusLocalCloseInitiator indicates that we initiated closing + // the channel. + ChanStatusLocalCloseInitiator ChannelStatus = 1 << 5 + + // ChanStatusRemoteCloseInitiator indicates that the remote node + // initiated closing the channel. + ChanStatusRemoteCloseInitiator ChannelStatus = 1 << 6 ) // chanStatusStrings maps a ChannelStatus to a human friendly string that // describes that status. var chanStatusStrings = map[ChannelStatus]string{ - ChanStatusDefault: "ChanStatusDefault", - ChanStatusBorked: "ChanStatusBorked", - ChanStatusCommitBroadcasted: "ChanStatusCommitBroadcasted", - ChanStatusLocalDataLoss: "ChanStatusLocalDataLoss", - ChanStatusRestored: "ChanStatusRestored", + ChanStatusDefault: "ChanStatusDefault", + ChanStatusBorked: "ChanStatusBorked", + ChanStatusCommitBroadcasted: "ChanStatusCommitBroadcasted", + ChanStatusLocalDataLoss: "ChanStatusLocalDataLoss", + ChanStatusRestored: "ChanStatusRestored", + ChanStatusCoopBroadcasted: "ChanStatusCoopBroadcasted", + ChanStatusLocalCloseInitiator: "ChanStatusLocalCloseInitiator", + ChanStatusRemoteCloseInitiator: "ChanStatusRemoteCloseInitiator", } // orderedChanStatusFlags is an in-order list of all that channel status flags. var orderedChanStatusFlags = []ChannelStatus{ - ChanStatusDefault, ChanStatusBorked, ChanStatusCommitBroadcasted, ChanStatusLocalDataLoss, ChanStatusRestored, + ChanStatusCoopBroadcasted, + ChanStatusLocalCloseInitiator, + ChanStatusRemoteCloseInitiator, } // String returns a human-readable representation of the ChannelStatus. func (c ChannelStatus) String() string { // If no flags are set, then this is the default case. - if c == 0 { + if c == ChanStatusDefault { return chanStatusStrings[ChanStatusDefault] } @@ -529,9 +637,26 @@ type OpenChannel struct { // is found to be pending. // // NOTE: This value will only be populated for single-funder channels - // for which we are the initiator. + // for which we are the initiator, and that we also have the funding + // transaction for. One can check this by using the HasFundingTx() + // method on the ChanType field. FundingTxn *wire.MsgTx + // LocalShutdownScript is set to a pre-set script if the channel was opened + // by the local node with option_upfront_shutdown_script set. If the option + // was not set, the field is empty. + LocalShutdownScript lnwire.DeliveryAddress + + // RemoteShutdownScript is set to a pre-set script if the channel was opened + // by the remote node with option_upfront_shutdown_script set. If the option + // was not set, the field is empty. + RemoteShutdownScript lnwire.DeliveryAddress + + // ThawHeight is the height when a frozen channel once again becomes a + // normal channel. If this is zero, then there're no restrictions on + // this channel. + ThawHeight uint32 + // TODO(roasbeef): eww Db *DB @@ -585,17 +710,26 @@ func (c *OpenChannel) HasChanStatus(status ChannelStatus) bool { } func (c *OpenChannel) hasChanStatus(status ChannelStatus) bool { + // Special case ChanStatusDefualt since it isn't actually flag, but a + // particular combination (or lack-there-of) of flags. + if status == ChanStatusDefault { + return c.chanStatus == ChanStatusDefault + } + return c.chanStatus&status == status } -// RefreshShortChanID updates the in-memory short channel ID using the latest +// RefreshShortChanID updates the in-memory channel state using the latest // value observed on disk. +// +// TODO: the name of this function should be changed to reflect the fact that +// it is not only refreshing the short channel id but all the channel state. +// maybe Refresh/Reload? func (c *OpenChannel) RefreshShortChanID() error { c.Lock() defer c.Unlock() - var sid lnwire.ShortChannelID - err := c.Db.View(func(tx *bbolt.Tx) error { + err := kvdb.View(c.Db, func(tx kvdb.ReadTx) error { chanBucket, err := fetchChanBucket( tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, ) @@ -603,49 +737,48 @@ func (c *OpenChannel) RefreshShortChanID() error { return err } - channel, err := fetchOpenChannel(chanBucket, &c.FundingOutpoint) - if err != nil { - return err + // We'll re-populating the in-memory channel with the info + // fetched from disk. + if err := fetchChanInfo(chanBucket, c); err != nil { + return fmt.Errorf("unable to fetch chan info: %v", err) } - sid = channel.ShortChannelID - return nil }) if err != nil { return err } - c.ShortChannelID = sid - c.Packager = NewChannelPackager(sid) - return nil } // fetchChanBucket is a helper function that returns the bucket where a // channel's data resides in given: the public key for the node, the outpoint, // and the chainhash that the channel resides on. -func fetchChanBucket(tx *bbolt.Tx, nodeKey *btcec.PublicKey, - outPoint *wire.OutPoint, chainHash chainhash.Hash) (*bbolt.Bucket, error) { +func fetchChanBucket(tx kvdb.ReadTx, nodeKey *btcec.PublicKey, + outPoint *wire.OutPoint, chainHash chainhash.Hash) (kvdb.ReadBucket, error) { // First fetch the top level bucket which stores all data related to // current, active channels. - openChanBucket := tx.Bucket(openChannelBucket) + openChanBucket := tx.ReadBucket(openChannelBucket) if openChanBucket == nil { return nil, ErrNoChanDBExists } + // TODO(roasbeef): CreateTopLevelBucket on the interface isn't like + // CreateIfNotExists, will return error + // Within this top level bucket, fetch the bucket dedicated to storing // open channel data specific to the remote node. nodePub := nodeKey.SerializeCompressed() - nodeChanBucket := openChanBucket.Bucket(nodePub) + nodeChanBucket := openChanBucket.NestedReadBucket(nodePub) if nodeChanBucket == nil { return nil, ErrNoActiveChannels } // We'll then recurse down an additional layer in order to fetch the // bucket for this particular chain. - chainBucket := nodeChanBucket.Bucket(chainHash[:]) + chainBucket := nodeChanBucket.NestedReadBucket(chainHash[:]) if chainBucket == nil { return nil, ErrNoActiveChannels } @@ -656,7 +789,7 @@ func fetchChanBucket(tx *bbolt.Tx, nodeKey *btcec.PublicKey, if err := writeOutpoint(&chanPointBuf, outPoint); err != nil { return nil, err } - chanBucket := chainBucket.Bucket(chanPointBuf.Bytes()) + chanBucket := chainBucket.NestedReadBucket(chanPointBuf.Bytes()) if chanBucket == nil { return nil, ErrChannelNotFound } @@ -664,12 +797,27 @@ func fetchChanBucket(tx *bbolt.Tx, nodeKey *btcec.PublicKey, return chanBucket, nil } +// fetchChanBucketRw is a helper function that returns the bucket where a +// channel's data resides in given: the public key for the node, the outpoint, +// and the chainhash that the channel resides on. This differs from +// fetchChanBucket in that it returns a writeable bucket. +func fetchChanBucketRw(tx kvdb.RwTx, nodeKey *btcec.PublicKey, // nolint:interfacer + outPoint *wire.OutPoint, chainHash chainhash.Hash) (kvdb.RwBucket, error) { + + readBucket, err := fetchChanBucket(tx, nodeKey, outPoint, chainHash) + if err != nil { + return nil, err + } + + return readBucket.(kvdb.RwBucket), nil +} + // fullSync syncs the contents of an OpenChannel while re-using an existing // database transaction. -func (c *OpenChannel) fullSync(tx *bbolt.Tx) error { +func (c *OpenChannel) fullSync(tx kvdb.RwTx) error { // First fetch the top level bucket which stores all data related to // current, active channels. - openChanBucket, err := tx.CreateBucketIfNotExists(openChannelBucket) + openChanBucket, err := tx.CreateTopLevelBucket(openChannelBucket) if err != nil { return err } @@ -699,7 +847,7 @@ func (c *OpenChannel) fullSync(tx *bbolt.Tx) error { chanPointBuf.Bytes(), ) switch { - case err == bbolt.ErrBucketExists: + case err == kvdb.ErrBucketExists: // If this channel already exists, then in order to avoid // overriding it, we'll return an error back up to the caller. return ErrChanAlreadyExists @@ -716,7 +864,7 @@ func (c *OpenChannel) MarkAsOpen(openLoc lnwire.ShortChannelID) error { c.Lock() defer c.Unlock() - if err := c.Db.Update(func(tx *bbolt.Tx) error { + if err := kvdb.Update(c.Db, func(tx kvdb.RwTx) error { chanBucket, err := fetchChanBucket( tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, ) @@ -732,7 +880,7 @@ func (c *OpenChannel) MarkAsOpen(openLoc lnwire.ShortChannelID) error { channel.IsPending = false channel.ShortChannelID = openLoc - return putOpenChannel(chanBucket, channel) + return putOpenChannel(chanBucket.(kvdb.RwBucket), channel) }); err != nil { return err } @@ -756,7 +904,7 @@ func (c *OpenChannel) MarkDataLoss(commitPoint *btcec.PublicKey) error { return err } - putCommitPoint := func(chanBucket *bbolt.Bucket) error { + putCommitPoint := func(chanBucket kvdb.RwBucket) error { return chanBucket.Put(dataLossCommitPointKey, b.Bytes()) } @@ -768,7 +916,7 @@ func (c *OpenChannel) MarkDataLoss(commitPoint *btcec.PublicKey) error { func (c *OpenChannel) DataLossCommitPoint() (*btcec.PublicKey, error) { var commitPoint *btcec.PublicKey - err := c.Db.View(func(tx *bbolt.Tx) error { + err := kvdb.View(c.Db, func(tx kvdb.ReadTx) error { chanBucket, err := fetchChanBucket( tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, ) @@ -897,7 +1045,7 @@ func (c *OpenChannel) ChanSyncMsg() (*lnwire.ChannelReestablish, error) { // active. // // NOTE: The primary mutex should already be held before this method is called. -func (c *OpenChannel) isBorked(chanBucket *bbolt.Bucket) (bool, error) { +func (c *OpenChannel) isBorked(chanBucket kvdb.ReadBucket) (bool, error) { channel, err := fetchOpenChannel(chanBucket, &c.FundingOutpoint) if err != nil { return false, err @@ -912,28 +1060,85 @@ func (c *OpenChannel) isBorked(chanBucket *bbolt.Bucket) (bool, error) { // closing tx _we believe_ will appear in the chain. This is only used to // republish this tx at startup to ensure propagation, and we should still // handle the case where a different tx actually hits the chain. -func (c *OpenChannel) MarkCommitmentBroadcasted(closeTx *wire.MsgTx) error { +func (c *OpenChannel) MarkCommitmentBroadcasted(closeTx *wire.MsgTx, + locallyInitiated bool) error { + + return c.markBroadcasted( + ChanStatusCommitBroadcasted, forceCloseTxKey, closeTx, + locallyInitiated, + ) +} + +// MarkCoopBroadcasted marks the channel to indicate that a cooperative close +// transaction has been broadcast, either our own or the remote, and that we +// should watch the chain for it to confirm before taking further action. It +// takes as argument a cooperative close tx that could appear on chain, and +// should be rebroadcast upon startup. This is only used to republish and +// ensure propagation, and we should still handle the case where a different tx +// actually hits the chain. +func (c *OpenChannel) MarkCoopBroadcasted(closeTx *wire.MsgTx, + locallyInitiated bool) error { + + return c.markBroadcasted( + ChanStatusCoopBroadcasted, coopCloseTxKey, closeTx, + locallyInitiated, + ) +} + +// markBroadcasted is a helper function which modifies the channel status of the +// receiving channel and inserts a close transaction under the requested key, +// which should specify either a coop or force close. It adds a status which +// indicates the party that initiated the channel close. +func (c *OpenChannel) markBroadcasted(status ChannelStatus, key []byte, + closeTx *wire.MsgTx, locallyInitiated bool) error { + c.Lock() defer c.Unlock() - var b bytes.Buffer - if err := WriteElement(&b, closeTx); err != nil { - return err + // If a closing tx is provided, we'll generate a closure to write the + // transaction in the appropriate bucket under the given key. + var putClosingTx func(kvdb.RwBucket) error + if closeTx != nil { + var b bytes.Buffer + if err := WriteElement(&b, closeTx); err != nil { + return err + } + + putClosingTx = func(chanBucket kvdb.RwBucket) error { + return chanBucket.Put(key, b.Bytes()) + } } - putClosingTx := func(chanBucket *bbolt.Bucket) error { - return chanBucket.Put(closingTxKey, b.Bytes()) + // Add the initiator status to the status provided. These statuses are + // set in addition to the broadcast status so that we do not need to + // migrate the original logic which does not store initiator. + if locallyInitiated { + status |= ChanStatusLocalCloseInitiator + } else { + status |= ChanStatusRemoteCloseInitiator } - return c.putChanStatus(ChanStatusCommitBroadcasted, putClosingTx) + return c.putChanStatus(status, putClosingTx) } -// BroadcastedCommitment retrieves the stored closing tx set during +// BroadcastedCommitment retrieves the stored unilateral closing tx set during // MarkCommitmentBroadcasted. If not found ErrNoCloseTx is returned. func (c *OpenChannel) BroadcastedCommitment() (*wire.MsgTx, error) { + return c.getClosingTx(forceCloseTxKey) +} + +// BroadcastedCooperative retrieves the stored cooperative closing tx set during +// MarkCoopBroadcasted. If not found ErrNoCloseTx is returned. +func (c *OpenChannel) BroadcastedCooperative() (*wire.MsgTx, error) { + return c.getClosingTx(coopCloseTxKey) +} + +// getClosingTx is a helper method which returns the stored closing transaction +// for key. The caller should use either the force or coop closing keys. +func (c *OpenChannel) getClosingTx(key []byte) (*wire.MsgTx, error) { var closeTx *wire.MsgTx - err := c.Db.View(func(tx *bbolt.Tx) error { + err := kvdb.View(c.Db, func(tx kvdb.ReadTx) error { chanBucket, err := fetchChanBucket( tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, ) @@ -945,7 +1150,7 @@ func (c *OpenChannel) BroadcastedCommitment() (*wire.MsgTx, error) { return err } - bs := chanBucket.Get(closingTxKey) + bs := chanBucket.Get(key) if bs == nil { return ErrNoCloseTx } @@ -963,10 +1168,10 @@ func (c *OpenChannel) BroadcastedCommitment() (*wire.MsgTx, error) { // list of closures that are given the chanBucket in order to atomically add // extra information together with the new status. func (c *OpenChannel) putChanStatus(status ChannelStatus, - fs ...func(*bbolt.Bucket) error) error { + fs ...func(kvdb.RwBucket) error) error { - if err := c.Db.Update(func(tx *bbolt.Tx) error { - chanBucket, err := fetchChanBucket( + if err := kvdb.Update(c.Db, func(tx kvdb.RwTx) error { + chanBucket, err := fetchChanBucketRw( tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, ) if err != nil { @@ -987,6 +1192,11 @@ func (c *OpenChannel) putChanStatus(status ChannelStatus, } for _, f := range fs { + // Skip execution of nil closures. + if f == nil { + continue + } + if err := f(chanBucket); err != nil { return err } @@ -1004,8 +1214,8 @@ func (c *OpenChannel) putChanStatus(status ChannelStatus, } func (c *OpenChannel) clearChanStatus(status ChannelStatus) error { - if err := c.Db.Update(func(tx *bbolt.Tx) error { - chanBucket, err := fetchChanBucket( + if err := kvdb.Update(c.Db, func(tx kvdb.RwTx) error { + chanBucket, err := fetchChanBucketRw( tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, ) if err != nil { @@ -1034,7 +1244,7 @@ func (c *OpenChannel) clearChanStatus(status ChannelStatus) error { // putChannel serializes, and stores the current state of the channel in its // entirety. -func putOpenChannel(chanBucket *bbolt.Bucket, channel *OpenChannel) error { +func putOpenChannel(chanBucket kvdb.RwBucket, channel *OpenChannel) error { // First, we'll write out all the relatively static fields, that are // decided upon initial channel creation. if err := putChanInfo(chanBucket, channel); err != nil { @@ -1047,6 +1257,17 @@ func putOpenChannel(chanBucket *bbolt.Bucket, channel *OpenChannel) error { return fmt.Errorf("unable to store chan commitments: %v", err) } + // Next, if this is a frozen channel, we'll add in the axillary + // information we need to store. + if channel.ChanType.IsFrozen() { + err := storeThawHeight( + chanBucket, channel.ThawHeight, + ) + if err != nil { + return fmt.Errorf("unable to store thaw height: %v", err) + } + } + // Finally, we'll write out the revocation state for both parties // within a distinct key space. if err := putChanRevocationState(chanBucket, channel); err != nil { @@ -1058,7 +1279,7 @@ func putOpenChannel(chanBucket *bbolt.Bucket, channel *OpenChannel) error { // fetchOpenChannel retrieves, and deserializes (including decrypting // sensitive) the complete channel currently active with the passed nodeID. -func fetchOpenChannel(chanBucket *bbolt.Bucket, +func fetchOpenChannel(chanBucket kvdb.ReadBucket, chanPoint *wire.OutPoint) (*OpenChannel, error) { channel := &OpenChannel{ @@ -1077,6 +1298,18 @@ func fetchOpenChannel(chanBucket *bbolt.Bucket, return nil, fmt.Errorf("unable to fetch chan commitments: %v", err) } + // Next, if this is a frozen channel, we'll add in the axillary + // information we need to store. + if channel.ChanType.IsFrozen() { + thawHeight, err := fetchThawHeight(chanBucket) + if err != nil { + return nil, fmt.Errorf("unable to store thaw "+ + "height: %v", err) + } + + channel.ThawHeight = thawHeight + } + // Finally, we'll retrieve the current revocation state so we can // properly if err := fetchChanRevocationState(chanBucket, channel); err != nil { @@ -1105,20 +1338,20 @@ func (c *OpenChannel) SyncPending(addr net.Addr, pendingHeight uint32) error { c.FundingBroadcastHeight = pendingHeight - return c.Db.Update(func(tx *bbolt.Tx) error { + return kvdb.Update(c.Db, func(tx kvdb.RwTx) error { return syncNewChannel(tx, c, []net.Addr{addr}) }) } // syncNewChannel will write the passed channel to disk, and also create a // LinkNode (if needed) for the channel peer. -func syncNewChannel(tx *bbolt.Tx, c *OpenChannel, addrs []net.Addr) error { +func syncNewChannel(tx kvdb.RwTx, c *OpenChannel, addrs []net.Addr) error { // First, sync all the persistent channel state to disk. if err := c.fullSync(tx); err != nil { return err } - nodeInfoBucket, err := tx.CreateBucketIfNotExists(nodeInfoBucket) + nodeInfoBucket, err := tx.CreateTopLevelBucket(nodeInfoBucket) if err != nil { return err } @@ -1140,12 +1373,17 @@ func syncNewChannel(tx *bbolt.Tx, c *OpenChannel, addrs []net.Addr) error { return putLinkNode(nodeInfoBucket, linkNode) } -// UpdateCommitment updates the commitment state for the specified party -// (remote or local). The commitment stat completely describes the balance -// state at this point in the commitment chain. This method its to be called on -// two occasions: when we revoke our prior commitment state, and when the -// remote party revokes their prior commitment state. -func (c *OpenChannel) UpdateCommitment(newCommitment *ChannelCommitment) error { +// UpdateCommitment updates the local commitment state. It locks in the pending +// local updates that were received by us from the remote party. The commitment +// state completely describes the balance state at this point in the commitment +// chain. In addition to that, it persists all the remote log updates that we +// have acked, but not signed a remote commitment for yet. These need to be +// persisted to be able to produce a valid commit signature if a restart would +// occur. This method its to be called when we revoke our prior commitment +// state. +func (c *OpenChannel) UpdateCommitment(newCommitment *ChannelCommitment, + unsignedAckedUpdates []LogUpdate) error { + c.Lock() defer c.Unlock() @@ -1156,8 +1394,8 @@ func (c *OpenChannel) UpdateCommitment(newCommitment *ChannelCommitment) error { return ErrNoRestoredChannelMutation } - err := c.Db.Update(func(tx *bbolt.Tx) error { - chanBucket, err := fetchChanBucket( + err := kvdb.Update(c.Db, func(tx kvdb.RwTx) error { + chanBucket, err := fetchChanBucketRw( tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, ) if err != nil { @@ -1188,6 +1426,20 @@ func (c *OpenChannel) UpdateCommitment(newCommitment *ChannelCommitment) error { "revocations: %v", err) } + // Persist unsigned but acked remote updates that need to be + // restored after a restart. + var b bytes.Buffer + err = serializeLogUpdates(&b, unsignedAckedUpdates) + if err != nil { + return err + } + + err = chanBucket.Put(unsignedAckedUpdatesKey, b.Bytes()) + if err != nil { + return fmt.Errorf("unable to store dangline remote "+ + "updates: %v", err) + } + return nil }) if err != nil { @@ -1199,6 +1451,74 @@ func (c *OpenChannel) UpdateCommitment(newCommitment *ChannelCommitment) error { return nil } +// BalancesAtHeight returns the local and remote balances on our commitment +// transactions as of a given height. +// +// NOTE: these are our balances *after* subtracting the commitment fee and +// anchor outputs. +func (c *OpenChannel) BalancesAtHeight(height uint64) (lnwire.MilliSatoshi, + lnwire.MilliSatoshi, error) { + + if height > c.LocalCommitment.CommitHeight && + height > c.RemoteCommitment.CommitHeight { + + return 0, 0, errHeightNotReached + } + + // If our current commit is as the desired height, we can return our + // current balances. + if c.LocalCommitment.CommitHeight == height { + return c.LocalCommitment.LocalBalance, + c.LocalCommitment.RemoteBalance, nil + } + + // If our current remote commit is at the desired height, we can return + // the current balances. + if c.RemoteCommitment.CommitHeight == height { + return c.RemoteCommitment.LocalBalance, + c.RemoteCommitment.RemoteBalance, nil + } + + // If we are not currently on the height requested, we need to look up + // the previous height to obtain our balances at the given height. + commit, err := c.FindPreviousState(height) + if err != nil { + return 0, 0, err + } + + return commit.LocalBalance, commit.RemoteBalance, nil +} + +// ActiveHtlcs returns a slice of HTLC's which are currently active on *both* +// commitment transactions. +func (c *OpenChannel) ActiveHtlcs() []HTLC { + c.RLock() + defer c.RUnlock() + + // We'll only return HTLC's that are locked into *both* commitment + // transactions. So we'll iterate through their set of HTLC's to note + // which ones are present on their commitment. + remoteHtlcs := make(map[[32]byte]struct{}) + for _, htlc := range c.RemoteCommitment.Htlcs { + onionHash := sha256.Sum256(htlc.OnionBlob) + remoteHtlcs[onionHash] = struct{}{} + } + + // Now that we know which HTLC's they have, we'll only mark the HTLC's + // as active if *we* know them as well. + activeHtlcs := make([]HTLC, 0, len(remoteHtlcs)) + for _, htlc := range c.LocalCommitment.Htlcs { + onionHash := sha256.Sum256(htlc.OnionBlob) + if _, ok := remoteHtlcs[onionHash]; !ok { + continue + } + + activeHtlcs = append(activeHtlcs, htlc) + } + + return activeHtlcs +} + // HTLC is the on-disk representation of a hash time-locked contract. HTLCs are // contained within ChannelDeltas which encode the current state of the // commitment between state updates. @@ -1465,6 +1785,42 @@ type CommitDiff struct { SettleFailAcks []SettleFailRef } +// serializeLogUpdates serializes provided list of updates to a stream. +func serializeLogUpdates(w io.Writer, logUpdates []LogUpdate) error { + numUpdates := uint16(len(logUpdates)) + if err := binary.Write(w, byteOrder, numUpdates); err != nil { + return err + } + + for _, diff := range logUpdates { + err := WriteElements(w, diff.LogIndex, diff.UpdateMsg) + if err != nil { + return err + } + } + + return nil +} + +// deserializeLogUpdates deserializes a list of updates from a stream. +func deserializeLogUpdates(r io.Reader) ([]LogUpdate, error) { + var numUpdates uint16 + if err := binary.Read(r, byteOrder, &numUpdates); err != nil { + return nil, err + } + + logUpdates := make([]LogUpdate, numUpdates) + for i := 0; i < int(numUpdates); i++ { + err := ReadElements(r, + &logUpdates[i].LogIndex, &logUpdates[i].UpdateMsg, + ) + if err != nil { + return nil, err + } + } + return logUpdates, nil +} + func serializeCommitDiff(w io.Writer, diff *CommitDiff) error { if err := serializeChanCommit(w, &diff.Commitment); err != nil { return err @@ -1474,18 +1830,10 @@ func serializeCommitDiff(w io.Writer, diff *CommitDiff) error { return err } - numUpdates := uint16(len(diff.LogUpdates)) - if err := binary.Write(w, byteOrder, numUpdates); err != nil { + if err := serializeLogUpdates(w, diff.LogUpdates); err != nil { return err } - for _, diff := range diff.LogUpdates { - err := WriteElements(w, diff.LogIndex, diff.UpdateMsg) - if err != nil { - return err - } - } - numOpenRefs := uint16(len(diff.OpenedCircuitKeys)) if err := binary.Write(w, byteOrder, numOpenRefs); err != nil { return err @@ -1529,21 +1877,11 @@ func deserializeCommitDiff(r io.Reader) (*CommitDiff, error) { return nil, err } - var numUpdates uint16 - if err := binary.Read(r, byteOrder, &numUpdates); err != nil { + d.LogUpdates, err = deserializeLogUpdates(r) + if err != nil { return nil, err } - d.LogUpdates = make([]LogUpdate, numUpdates) - for i := 0; i < int(numUpdates); i++ { - err := ReadElements(r, - &d.LogUpdates[i].LogIndex, &d.LogUpdates[i].UpdateMsg, - ) - if err != nil { - return nil, err - } - } - var numOpenRefs uint16 if err := binary.Read(r, byteOrder, &numOpenRefs); err != nil { return nil, err @@ -1594,10 +1932,10 @@ func (c *OpenChannel) AppendRemoteCommitChain(diff *CommitDiff) error { return ErrNoRestoredChannelMutation } - return c.Db.Update(func(tx *bbolt.Tx) error { + return kvdb.Update(c.Db, func(tx kvdb.RwTx) error { // First, we'll grab the writable bucket where this channel's // data resides. - chanBucket, err := fetchChanBucket( + chanBucket, err := fetchChanBucketRw( tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, ) if err != nil { @@ -1634,6 +1972,14 @@ func (c *OpenChannel) AppendRemoteCommitChain(diff *CommitDiff) error { return err } + // Clear unsigned acked remote updates. We are signing now for + // all that we've got. + err = chanBucket.Delete(unsignedAckedUpdatesKey) + if err != nil { + return fmt.Errorf("unable to clear dangling remote "+ + "updates: %v", err) + } + // TODO(roasbeef): use seqno to derive key for later LCP // With the bucket retrieved, we'll now serialize the commit @@ -1654,7 +2000,7 @@ func (c *OpenChannel) AppendRemoteCommitChain(diff *CommitDiff) error { // these pointers, causing the tip and the tail to point to the same entry. func (c *OpenChannel) RemoteCommitChainTip() (*CommitDiff, error) { var cd *CommitDiff - err := c.Db.View(func(tx *bbolt.Tx) error { + err := kvdb.View(c.Db, func(tx kvdb.ReadTx) error { chanBucket, err := fetchChanBucket( tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, ) @@ -1687,6 +2033,38 @@ func (c *OpenChannel) RemoteCommitChainTip() (*CommitDiff, error) { return cd, err } +// UnsignedAckedUpdates retrieves the persisted unsigned acked remote log +// updates that still need to be signed for. +func (c *OpenChannel) UnsignedAckedUpdates() ([]LogUpdate, error) { + var updates []LogUpdate + err := kvdb.View(c.Db, func(tx kvdb.ReadTx) error { + chanBucket, err := fetchChanBucket( + tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, + ) + switch err { + case nil: + case ErrNoChanDBExists, ErrNoActiveChannels, ErrChannelNotFound: + return nil + default: + return err + } + + updateBytes := chanBucket.Get(unsignedAckedUpdatesKey) + if updateBytes == nil { + return nil + } + + r := bytes.NewReader(updateBytes) + updates, err = deserializeLogUpdates(r) + return err + }) + if err != nil { + return nil, err + } + + return updates, nil +} + // InsertNextRevocation inserts the _next_ commitment point (revocation) into // the database, and also modifies the internal RemoteNextRevocation attribute // to point to the passed key. This method is to be using during final channel @@ -1700,8 +2078,8 @@ func (c *OpenChannel) InsertNextRevocation(revKey *btcec.PublicKey) error { c.RemoteNextRevocation = revKey - err := c.Db.Update(func(tx *bbolt.Tx) error { - chanBucket, err := fetchChanBucket( + err := kvdb.Update(c.Db, func(tx kvdb.RwTx) error { + chanBucket, err := fetchChanBucketRw( tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, ) if err != nil { @@ -1737,8 +2115,8 @@ func (c *OpenChannel) AdvanceCommitChainTail(fwdPkg *FwdPkg) error { var newRemoteCommit *ChannelCommitment - err := c.Db.Update(func(tx *bbolt.Tx) error { - chanBucket, err := fetchChanBucket( + err := kvdb.Update(c.Db, func(tx kvdb.RwTx) error { + chanBucket, err := fetchChanBucketRw( tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, ) if err != nil { @@ -1829,7 +2207,7 @@ func (c *OpenChannel) AdvanceCommitChainTail(fwdPkg *FwdPkg) error { // NextLocalHtlcIndex returns the next unallocated local htlc index. To ensure // this always returns the next index that has been not been allocated, this // will first try to examine any pending commitments, before falling back to the -// last locked-in local commitment. +// last locked-in remote commitment. func (c *OpenChannel) NextLocalHtlcIndex() (uint64, error) { // First, load the most recent commit diff that we initiated for the // remote party. If no pending commit is found, this is not treated as @@ -1845,8 +2223,8 @@ func (c *OpenChannel) NextLocalHtlcIndex() (uint64, error) { return pendingRemoteCommit.Commitment.LocalHtlcIndex, nil } - // Otherwise, fallback to using the local htlc index of our commitment. - return c.LocalCommitment.LocalHtlcIndex, nil + // Otherwise, fallback to using the local htlc index of their commitment. + return c.RemoteCommitment.LocalHtlcIndex, nil } // LoadFwdPkgs scans the forwarding log for any packages that haven't been @@ -1857,7 +2235,7 @@ func (c *OpenChannel) LoadFwdPkgs() ([]*FwdPkg, error) { defer c.RUnlock() var fwdPkgs []*FwdPkg - if err := c.Db.View(func(tx *bbolt.Tx) error { + if err := kvdb.View(c.Db, func(tx kvdb.ReadTx) error { var err error fwdPkgs, err = c.Packager.LoadFwdPkgs(tx) return err @@ -1875,7 +2253,7 @@ func (c *OpenChannel) AckAddHtlcs(addRefs ...AddRef) error { c.Lock() defer c.Unlock() - return c.Db.Update(func(tx *bbolt.Tx) error { + return kvdb.Update(c.Db, func(tx kvdb.RwTx) error { return c.Packager.AckAddHtlcs(tx, addRefs...) }) } @@ -1888,7 +2266,7 @@ func (c *OpenChannel) AckSettleFails(settleFailRefs ...SettleFailRef) error { c.Lock() defer c.Unlock() - return c.Db.Update(func(tx *bbolt.Tx) error { + return kvdb.Update(c.Db, func(tx kvdb.RwTx) error { return c.Packager.AckSettleFails(tx, settleFailRefs...) }) } @@ -1899,7 +2277,7 @@ func (c *OpenChannel) SetFwdFilter(height uint64, fwdFilter *PkgFilter) error { c.Lock() defer c.Unlock() - return c.Db.Update(func(tx *bbolt.Tx) error { + return kvdb.Update(c.Db, func(tx kvdb.RwTx) error { return c.Packager.SetFwdFilter(tx, height, fwdFilter) }) } @@ -1912,7 +2290,7 @@ func (c *OpenChannel) RemoveFwdPkg(height uint64) error { c.Lock() defer c.Unlock() - return c.Db.Update(func(tx *bbolt.Tx) error { + return kvdb.Update(c.Db, func(tx kvdb.RwTx) error { return c.Packager.RemovePkg(tx, height) }) } @@ -1933,7 +2311,7 @@ func (c *OpenChannel) RevocationLogTail() (*ChannelCommitment, error) { } var commit ChannelCommitment - if err := c.Db.View(func(tx *bbolt.Tx) error { + if err := kvdb.View(c.Db, func(tx kvdb.ReadTx) error { chanBucket, err := fetchChanBucket( tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, ) @@ -1941,7 +2319,7 @@ func (c *OpenChannel) RevocationLogTail() (*ChannelCommitment, error) { return err } - logBucket := chanBucket.Bucket(revocationLogBucket) + logBucket := chanBucket.NestedReadBucket(revocationLogBucket) if logBucket == nil { return ErrNoPastDeltas } @@ -1950,7 +2328,7 @@ func (c *OpenChannel) RevocationLogTail() (*ChannelCommitment, error) { // this channel, we'll jump to the _last_ key in bucket. As we // store the update number on disk in a big-endian format, // this will retrieve the latest entry. - cursor := logBucket.Cursor() + cursor := logBucket.ReadCursor() _, tailLogEntry := cursor.Last() logEntryReader := bytes.NewReader(tailLogEntry) @@ -1980,7 +2358,7 @@ func (c *OpenChannel) CommitmentHeight() (uint64, error) { defer c.RUnlock() var height uint64 - err := c.Db.View(func(tx *bbolt.Tx) error { + err := kvdb.View(c.Db, func(tx kvdb.ReadTx) error { // Get the bucket dedicated to storing the metadata for open // channels. chanBucket, err := fetchChanBucket( @@ -2015,7 +2393,7 @@ func (c *OpenChannel) FindPreviousState(updateNum uint64) (*ChannelCommitment, e defer c.RUnlock() var commit ChannelCommitment - err := c.Db.View(func(tx *bbolt.Tx) error { + err := kvdb.View(c.Db, func(tx kvdb.ReadTx) error { chanBucket, err := fetchChanBucket( tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, ) @@ -2023,7 +2401,7 @@ func (c *OpenChannel) FindPreviousState(updateNum uint64) (*ChannelCommitment, e return err } - logBucket := chanBucket.Bucket(revocationLogBucket) + logBucket := chanBucket.NestedReadBucket(revocationLogBucket) if logBucket == nil { return ErrNoPastDeltas } @@ -2164,24 +2542,28 @@ type ChannelCloseSummary struct { // entails deleting all saved state within the database concerning this // channel. This method also takes a struct that summarizes the state of the // channel at closing, this compact representation will be the only component -// of a channel left over after a full closing. -func (c *OpenChannel) CloseChannel(summary *ChannelCloseSummary) error { +// of a channel left over after a full closing. It takes an optional set of +// channel statuses which will be written to the historical channel bucket. +// These statuses are used to record close initiators. +func (c *OpenChannel) CloseChannel(summary *ChannelCloseSummary, + statuses ...ChannelStatus) error { + c.Lock() defer c.Unlock() - return c.Db.Update(func(tx *bbolt.Tx) error { - openChanBucket := tx.Bucket(openChannelBucket) + return kvdb.Update(c.Db, func(tx kvdb.RwTx) error { + openChanBucket := tx.ReadWriteBucket(openChannelBucket) if openChanBucket == nil { return ErrNoChanDBExists } nodePub := c.IdentityPub.SerializeCompressed() - nodeChanBucket := openChanBucket.Bucket(nodePub) + nodeChanBucket := openChanBucket.NestedReadWriteBucket(nodePub) if nodeChanBucket == nil { return ErrNoActiveChannels } - chainBucket := nodeChanBucket.Bucket(c.ChainHash[:]) + chainBucket := nodeChanBucket.NestedReadWriteBucket(c.ChainHash[:]) if chainBucket == nil { return ErrNoActiveChannels } @@ -2191,7 +2573,10 @@ func (c *OpenChannel) CloseChannel(summary *ChannelCloseSummary) error { if err != nil { return err } - chanBucket := chainBucket.Bucket(chanPointBuf.Bytes()) + chanKey := chanPointBuf.Bytes() + chanBucket := chainBucket.NestedReadWriteBucket( + chanKey, + ) if chanBucket == nil { return ErrNoActiveChannels } @@ -2208,22 +2593,55 @@ func (c *OpenChannel) CloseChannel(summary *ChannelCloseSummary) error { // Now that the index to this channel has been deleted, purge // the remaining channel metadata from the database. - err = deleteOpenChannel(chanBucket, chanPointBuf.Bytes()) + err = deleteOpenChannel(chanBucket) if err != nil { return err } + // We'll also remove the channel from the frozen channel bucket + // if we need to. + if c.ChanType.IsFrozen() { + err := deleteThawHeight(chanBucket) + if err != nil { + return err + } + } + // With the base channel data deleted, attempt to delete the // information stored within the revocation log. - logBucket := chanBucket.Bucket(revocationLogBucket) + logBucket := chanBucket.NestedReadWriteBucket(revocationLogBucket) if logBucket != nil { - err = chanBucket.DeleteBucket(revocationLogBucket) + err = chanBucket.DeleteNestedBucket(revocationLogBucket) if err != nil { return err } } - err = chainBucket.DeleteBucket(chanPointBuf.Bytes()) + err = chainBucket.DeleteNestedBucket(chanPointBuf.Bytes()) + if err != nil { + return err + } + + // Add channel state to the historical channel bucket. + historicalBucket, err := tx.CreateTopLevelBucket( + historicalChannelBucket, + ) + if err != nil { + return err + } + + historicalChanBucket, err := + historicalBucket.CreateBucketIfNotExists(chanKey) + if err != nil { + return err + } + + // Apply any additional statuses to the channel state. + for _, status := range statuses { + chanState.chanStatus |= status + } + + err = putOpenChannel(historicalChanBucket, chanState) if err != nil { return err } @@ -2309,7 +2727,7 @@ func (c *OpenChannel) Snapshot() *ChannelSnapshot { // latest fully committed state is returned. The first commitment returned is // the local commitment, and the second returned is the remote commitment. func (c *OpenChannel) LatestCommitments() (*ChannelCommitment, *ChannelCommitment, error) { - err := c.Db.View(func(tx *bbolt.Tx) error { + err := kvdb.View(c.Db, func(tx kvdb.ReadTx) error { chanBucket, err := fetchChanBucket( tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, ) @@ -2331,7 +2749,7 @@ func (c *OpenChannel) LatestCommitments() (*ChannelCommitment, *ChannelCommitmen // acting on a possible contract breach to ensure, that the caller has the most // up to date information required to deliver justice. func (c *OpenChannel) RemoteRevocationStore() (shachain.Store, error) { - err := c.Db.View(func(tx *bbolt.Tx) error { + err := kvdb.View(c.Db, func(tx kvdb.ReadTx) error { chanBucket, err := fetchChanBucket( tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, ) @@ -2348,10 +2766,10 @@ func (c *OpenChannel) RemoteRevocationStore() (shachain.Store, error) { return c.RevocationStore, nil } -func putChannelCloseSummary(tx *bbolt.Tx, chanID []byte, +func putChannelCloseSummary(tx kvdb.RwTx, chanID []byte, summary *ChannelCloseSummary, lastChanState *OpenChannel) error { - closedChanBucket, err := tx.CreateBucketIfNotExists(closedChannelBucket) + closedChanBucket, err := tx.CreateTopLevelBucket(closedChannelBucket) if err != nil { return err } @@ -2517,7 +2935,17 @@ func writeChanConfig(b io.Writer, c *ChannelConfig) error { ) } -func putChanInfo(chanBucket *bbolt.Bucket, channel *OpenChannel) error { +// fundingTxPresent returns true if expect the funding transcation to be found +// on disk or already populated within the passed oen chanel struct. +func fundingTxPresent(channel *OpenChannel) bool { + chanType := channel.ChanType + + return chanType.IsSingleFunder() && chanType.HasFundingTx() && + channel.IsInitiator && + !channel.hasChanStatus(ChanStatusRestored) +} + +func putChanInfo(chanBucket kvdb.RwBucket, channel *OpenChannel) error { var w bytes.Buffer if err := WriteElements(&w, channel.ChanType, channel.ChainHash, channel.FundingOutpoint, @@ -2530,10 +2958,9 @@ func putChanInfo(chanBucket *bbolt.Bucket, channel *OpenChannel) error { return err } - // For single funder channels that we initiated, write the funding txn. - if channel.ChanType.IsSingleFunder() && channel.IsInitiator && - !channel.hasChanStatus(ChanStatusRestored) { - + // For single funder channels that we initiated, and we have the + // funding transaction, then write the funding txn. + if fundingTxPresent(channel) { if err := WriteElement(&w, channel.FundingTxn); err != nil { return err } @@ -2546,7 +2973,60 @@ func putChanInfo(chanBucket *bbolt.Bucket, channel *OpenChannel) error { return err } - return chanBucket.Put(chanInfoKey, w.Bytes()) + if err := chanBucket.Put(chanInfoKey, w.Bytes()); err != nil { + return err + } + + // Finally, add optional shutdown scripts for the local and remote peer if + // they are present. + if err := putOptionalUpfrontShutdownScript( + chanBucket, localUpfrontShutdownKey, channel.LocalShutdownScript, + ); err != nil { + return err + } + + return putOptionalUpfrontShutdownScript( + chanBucket, remoteUpfrontShutdownKey, channel.RemoteShutdownScript, + ) +} + +// putOptionalUpfrontShutdownScript adds a shutdown script under the key +// provided if it has a non-zero length. +func putOptionalUpfrontShutdownScript(chanBucket kvdb.RwBucket, key []byte, + script []byte) error { + // If the script is empty, we do not need to add anything. + if len(script) == 0 { + return nil + } + + var w bytes.Buffer + if err := WriteElement(&w, script); err != nil { + return err + } + + return chanBucket.Put(key, w.Bytes()) +} + +// getOptionalUpfrontShutdownScript reads the shutdown script stored under the +// key provided if it is present. Upfront shutdown scripts are optional, so the +// function returns with no error if the key is not present. +func getOptionalUpfrontShutdownScript(chanBucket kvdb.ReadBucket, key []byte, + script *lnwire.DeliveryAddress) error { + + // Return early if the bucket does not exit, a shutdown script was not set. + bs := chanBucket.Get(key) + if bs == nil { + return nil + } + + var tempScript []byte + r := bytes.NewReader(bs) + if err := ReadElement(r, &tempScript); err != nil { + return err + } + *script = tempScript + + return nil } func serializeChanCommit(w io.Writer, c *ChannelCommitment) error { @@ -2562,7 +3042,7 @@ func serializeChanCommit(w io.Writer, c *ChannelCommitment) error { return SerializeHtlcs(w, c.Htlcs...) } -func putChanCommitment(chanBucket *bbolt.Bucket, c *ChannelCommitment, +func putChanCommitment(chanBucket kvdb.RwBucket, c *ChannelCommitment, local bool) error { var commitKey []byte @@ -2580,7 +3060,7 @@ func putChanCommitment(chanBucket *bbolt.Bucket, c *ChannelCommitment, return chanBucket.Put(commitKey, b.Bytes()) } -func putChanCommitments(chanBucket *bbolt.Bucket, channel *OpenChannel) error { +func putChanCommitments(chanBucket kvdb.RwBucket, channel *OpenChannel) error { // If this is a restored channel, then we don't have any commitments to // write. if channel.hasChanStatus(ChanStatusRestored) { @@ -2599,7 +3079,7 @@ func putChanCommitments(chanBucket *bbolt.Bucket, channel *OpenChannel) error { ) } -func putChanRevocationState(chanBucket *bbolt.Bucket, channel *OpenChannel) error { +func putChanRevocationState(chanBucket kvdb.RwBucket, channel *OpenChannel) error { var b bytes.Buffer err := WriteElements( @@ -2634,7 +3114,7 @@ func readChanConfig(b io.Reader, c *ChannelConfig) error { ) } -func fetchChanInfo(chanBucket *bbolt.Bucket, channel *OpenChannel) error { +func fetchChanInfo(chanBucket kvdb.ReadBucket, channel *OpenChannel) error { infoBytes := chanBucket.Get(chanInfoKey) if infoBytes == nil { return ErrNoChanInfoFound @@ -2652,10 +3132,9 @@ func fetchChanInfo(chanBucket *bbolt.Bucket, channel *OpenChannel) error { return err } - // For single funder channels that we initiated, read the funding txn. - if channel.ChanType.IsSingleFunder() && channel.IsInitiator && - !channel.hasChanStatus(ChanStatusRestored) { - + // For single funder channels that we initiated and have the funding + // transaction to, read the funding txn. + if fundingTxPresent(channel) { if err := ReadElement(r, &channel.FundingTxn); err != nil { return err } @@ -2670,7 +3149,16 @@ func fetchChanInfo(chanBucket *bbolt.Bucket, channel *OpenChannel) error { channel.Packager = NewChannelPackager(channel.ShortChannelID) - return nil + // Finally, read the optional shutdown scripts. + if err := getOptionalUpfrontShutdownScript( + chanBucket, localUpfrontShutdownKey, &channel.LocalShutdownScript, + ); err != nil { + return err + } + + return getOptionalUpfrontShutdownScript( + chanBucket, remoteUpfrontShutdownKey, &channel.RemoteShutdownScript, + ) } func deserializeChanCommit(r io.Reader) (ChannelCommitment, error) { @@ -2693,7 +3181,7 @@ func deserializeChanCommit(r io.Reader) (ChannelCommitment, error) { return c, nil } -func fetchChanCommitment(chanBucket *bbolt.Bucket, local bool) (ChannelCommitment, error) { +func fetchChanCommitment(chanBucket kvdb.ReadBucket, local bool) (ChannelCommitment, error) { var commitKey []byte if local { commitKey = append(chanCommitmentKey, byte(0x00)) @@ -2710,7 +3198,7 @@ func fetchChanCommitment(chanBucket *bbolt.Bucket, local bool) (ChannelCommitmen return deserializeChanCommit(r) } -func fetchChanCommitments(chanBucket *bbolt.Bucket, channel *OpenChannel) error { +func fetchChanCommitments(chanBucket kvdb.ReadBucket, channel *OpenChannel) error { var err error // If this is a restored channel, then we don't have any commitments to @@ -2731,7 +3219,7 @@ func fetchChanCommitments(chanBucket *bbolt.Bucket, channel *OpenChannel) error return nil } -func fetchChanRevocationState(chanBucket *bbolt.Bucket, channel *OpenChannel) error { +func fetchChanRevocationState(chanBucket kvdb.ReadBucket, channel *OpenChannel) error { revBytes := chanBucket.Get(revocationStateKey) if revBytes == nil { return ErrNoRevocationsFound @@ -2757,7 +3245,7 @@ func fetchChanRevocationState(chanBucket *bbolt.Bucket, channel *OpenChannel) er return ReadElements(r, &channel.RemoteNextRevocation) } -func deleteOpenChannel(chanBucket *bbolt.Bucket, chanPointBytes []byte) error { +func deleteOpenChannel(chanBucket kvdb.RwBucket) error { if err := chanBucket.Delete(chanInfoKey); err != nil { return err @@ -2791,7 +3279,7 @@ func makeLogKey(updateNum uint64) [8]byte { return key } -func appendChannelLogEntry(log *bbolt.Bucket, +func appendChannelLogEntry(log kvdb.RwBucket, commit *ChannelCommitment) error { var b bytes.Buffer @@ -2803,15 +3291,41 @@ func appendChannelLogEntry(log *bbolt.Bucket, return log.Put(logEntrykey[:], b.Bytes()) } -func fetchChannelLogEntry(log *bbolt.Bucket, +func fetchChannelLogEntry(log kvdb.ReadBucket, updateNum uint64) (ChannelCommitment, error) { logEntrykey := makeLogKey(updateNum) commitBytes := log.Get(logEntrykey[:]) if commitBytes == nil { - return ChannelCommitment{}, fmt.Errorf("log entry not found") + return ChannelCommitment{}, errLogEntryNotFound } commitReader := bytes.NewReader(commitBytes) return deserializeChanCommit(commitReader) } + +func fetchThawHeight(chanBucket kvdb.ReadBucket) (uint32, error) { + var height uint32 + + heightBytes := chanBucket.Get(frozenChanKey) + heightReader := bytes.NewReader(heightBytes) + + if err := ReadElements(heightReader, &height); err != nil { + return 0, err + } + + return height, nil +} + +func storeThawHeight(chanBucket kvdb.RwBucket, height uint32) error { + var heightBuf bytes.Buffer + if err := WriteElements(&heightBuf, height); err != nil { + return err + } + + return chanBucket.Put(frozenChanKey, heightBuf.Bytes()) +} + +func deleteThawHeight(chanBucket kvdb.RwBucket) error { + return chanBucket.Delete(frozenChanKey) +} diff --git a/channeldb/channel_test.go b/channeldb/channel_test.go index 21bb738cf2..6b1e0ab8b1 100644 --- a/channeldb/channel_test.go +++ b/channeldb/channel_test.go @@ -10,12 +10,15 @@ import ( "runtime" "testing" + "github.com/lightningnetwork/lnd/channeldb/kvdb" + "github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcutil" _ "github.com/btcsuite/btcwallet/walletdb/bdb" "github.com/davecgh/go-spew/spew" + "github.com/lightningnetwork/lnd/clock" "github.com/lightningnetwork/lnd/keychain" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/shachain" @@ -68,6 +71,19 @@ var ( privKey, pubKey = btcec.PrivKeyFromBytes(btcec.S256(), key[:]) wireSig, _ = lnwire.NewSigFromSignature(testSig) + + testClock = clock.NewTestClock(testNow) + + // defaultPendingHeight is the default height at which we set + // channels to pending. + defaultPendingHeight = 100 + + // defaultAddr is the default address that we mark test channels pending + // with. + defaultAddr = &net.TCPAddr{ + IP: net.ParseIP("127.0.0.1"), + Port: 18555, + } ) // makeTestDB creates a new instance of the ChannelDB for testing purposes. A @@ -82,7 +98,7 @@ func makeTestDB() (*DB, func(), error) { } // Next, create channeldb for the first time. - cdb, err := Open(tempDirName) + cdb, err := Open(tempDirName, OptionClock(testClock)) if err != nil { return nil, nil, err } @@ -95,21 +111,167 @@ func makeTestDB() (*DB, func(), error) { return cdb, cleanUp, nil } -func createTestChannelState(cdb *DB) (*OpenChannel, error) { +// testChannelParams is a struct which details the specifics of how a channel +// should be created. +type testChannelParams struct { + // channel is the channel that will be written to disk. + channel *OpenChannel + + // addr is the address that the channel will be synced pending with. + addr *net.TCPAddr + + // pendingHeight is the height that the channel should be recorded as + // pending. + pendingHeight uint32 + + // openChannel is set to true if the channel should be fully marked as + // open if this is false, the channel will be left in pending state. + openChannel bool +} + +// testChannelOption is a functional option which can be used to alter the +// default channel that is creates for testing. +type testChannelOption func(params *testChannelParams) + +// channelCommitmentOption is an option which allows overwriting of the default +// commitment height and balances. The local boolean can be used to set these +// balances on the local or remote commit. +func channelCommitmentOption(height uint64, localBalance, + remoteBalance lnwire.MilliSatoshi, local bool) testChannelOption { + + return func(params *testChannelParams) { + if local { + params.channel.LocalCommitment.CommitHeight = height + params.channel.LocalCommitment.LocalBalance = localBalance + params.channel.LocalCommitment.RemoteBalance = remoteBalance + } else { + params.channel.RemoteCommitment.CommitHeight = height + params.channel.RemoteCommitment.LocalBalance = localBalance + params.channel.RemoteCommitment.RemoteBalance = remoteBalance + } + } +} + +// pendingHeightOption is an option which can be used to set the height the +// channel is marked as pending at. +func pendingHeightOption(height uint32) testChannelOption { + return func(params *testChannelParams) { + params.pendingHeight = height + } +} + +// openChannelOption is an option which can be used to create a test channel +// that is open. +func openChannelOption() testChannelOption { + return func(params *testChannelParams) { + params.openChannel = true + } +} + +// localHtlcsOption is an option which allows setting of htlcs on the local +// commitment. +func localHtlcsOption(htlcs []HTLC) testChannelOption { + return func(params *testChannelParams) { + params.channel.LocalCommitment.Htlcs = htlcs + } +} + +// remoteHtlcsOption is an option which allows setting of htlcs on the remote +// commitment. +func remoteHtlcsOption(htlcs []HTLC) testChannelOption { + return func(params *testChannelParams) { + params.channel.RemoteCommitment.Htlcs = htlcs + } +} + +// localShutdownOption is an option which sets the local upfront shutdown +// script for the channel. +func localShutdownOption(addr lnwire.DeliveryAddress) testChannelOption { + return func(params *testChannelParams) { + params.channel.LocalShutdownScript = addr + } +} + +// remoteShutdownOption is an option which sets the remote upfront shutdown +// script for the channel. +func remoteShutdownOption(addr lnwire.DeliveryAddress) testChannelOption { + return func(params *testChannelParams) { + params.channel.RemoteShutdownScript = addr + } +} + +// fundingPointOption is an option which sets the funding outpoint of the +// channel. +func fundingPointOption(chanPoint wire.OutPoint) testChannelOption { + return func(params *testChannelParams) { + params.channel.FundingOutpoint = chanPoint + } +} + +// channelIDOption is an option which sets the short channel ID of the channel. +var channelIDOption = func(chanID lnwire.ShortChannelID) testChannelOption { + return func(params *testChannelParams) { + params.channel.ShortChannelID = chanID + } +} + +// createTestChannel writes a test channel to the database. It takes a set of +// functional options which can be used to overwrite the default of creating +// a pending channel that was broadcast at height 100. +func createTestChannel(t *testing.T, cdb *DB, + opts ...testChannelOption) *OpenChannel { + + // Create a default set of parameters. + params := &testChannelParams{ + channel: createTestChannelState(t, cdb), + addr: defaultAddr, + openChannel: false, + pendingHeight: uint32(defaultPendingHeight), + } + + // Apply all functional options to the test channel params. + for _, o := range opts { + o(params) + } + + // Mark the channel as pending. + err := params.channel.SyncPending(params.addr, params.pendingHeight) + if err != nil { + t.Fatalf("unable to save and serialize channel "+ + "state: %v", err) + } + + // If the parameters do not specify that we should open the channel + // fully, we return the pending channel. + if !params.openChannel { + return params.channel + } + + // Mark the channel as open with the short channel id provided. + err = params.channel.MarkAsOpen(params.channel.ShortChannelID) + if err != nil { + t.Fatalf("unable to mark channel open: %v", err) + } + + return params.channel +} + +func createTestChannelState(t *testing.T, cdb *DB) *OpenChannel { // Simulate 1000 channel updates. producer, err := shachain.NewRevocationProducerFromBytes(key[:]) if err != nil { - return nil, err + t.Fatalf("could not get producer: %v", err) } store := shachain.NewRevocationStore() for i := 0; i < 1; i++ { preImage, err := producer.AtIndex(uint64(i)) if err != nil { - return nil, err + t.Fatalf("could not get "+ + "preimage: %v", err) } if err := store.AddNextEntry(preImage); err != nil { - return nil, err + t.Fatalf("could not add entry: %v", err) } } @@ -187,7 +349,7 @@ func createTestChannelState(cdb *DB) (*OpenChannel, error) { chanID := lnwire.NewShortChanIDFromInt(uint64(rand.Int63())) return &OpenChannel{ - ChanType: SingleFunder, + ChanType: SingleFunderBit | FrozenBit, ChainHash: key, FundingOutpoint: wire.OutPoint{Hash: key, Index: rand.Uint32()}, ShortChannelID: chanID, @@ -225,7 +387,8 @@ func createTestChannelState(cdb *DB) (*OpenChannel, error) { Db: cdb, Packager: NewChannelPackager(chanID), FundingTxn: testTx, - }, nil + ThawHeight: uint32(defaultPendingHeight), + } } func TestOpenChannelPutGetDelete(t *testing.T) { @@ -237,15 +400,10 @@ func TestOpenChannelPutGetDelete(t *testing.T) { } defer cleanUp() - // Create the test channel state, then add an additional fake HTLC - // before syncing to disk. - state, err := createTestChannelState(cdb) - if err != nil { - t.Fatalf("unable to create channel state: %v", err) - } - state.LocalCommitment.Htlcs = []HTLC{ - { - Signature: testSig.Serialize(), + // Create the test channel state, with additional htlcs on the local + // and remote commitment. + localHtlcs := []HTLC{ + {Signature: testSig.Serialize(), Incoming: true, Amt: 10, RHash: key, @@ -253,7 +411,8 @@ func TestOpenChannelPutGetDelete(t *testing.T) { OnionBlob: []byte("onionblob"), }, } - state.RemoteCommitment.Htlcs = []HTLC{ + + remoteHtlcs := []HTLC{ { Signature: testSig.Serialize(), Incoming: false, @@ -264,13 +423,11 @@ func TestOpenChannelPutGetDelete(t *testing.T) { }, } - addr := &net.TCPAddr{ - IP: net.ParseIP("127.0.0.1"), - Port: 18556, - } - if err := state.SyncPending(addr, 101); err != nil { - t.Fatalf("unable to save and serialize channel state: %v", err) - } + state := createTestChannel( + t, cdb, + remoteHtlcsOption(remoteHtlcs), + localHtlcsOption(localHtlcs), + ) openChannels, err := cdb.FetchOpenChannels(state.IdentityPub) if err != nil { @@ -345,6 +502,93 @@ func TestOpenChannelPutGetDelete(t *testing.T) { } } +// TestOptionalShutdown tests the reading and writing of channels with and +// without optional shutdown script fields. +func TestOptionalShutdown(t *testing.T) { + local := lnwire.DeliveryAddress([]byte("local shutdown script")) + remote := lnwire.DeliveryAddress([]byte("remote shutdown script")) + + if _, err := rand.Read(remote); err != nil { + t.Fatalf("Could not create random script: %v", err) + } + + tests := []struct { + name string + localShutdown lnwire.DeliveryAddress + remoteShutdown lnwire.DeliveryAddress + }{ + { + name: "no shutdown scripts", + localShutdown: nil, + remoteShutdown: nil, + }, + { + name: "local shutdown script", + localShutdown: local, + remoteShutdown: nil, + }, + { + name: "remote shutdown script", + localShutdown: nil, + remoteShutdown: remote, + }, + { + name: "both scripts set", + localShutdown: local, + remoteShutdown: remote, + }, + } + + for _, test := range tests { + test := test + + t.Run(test.name, func(t *testing.T) { + cdb, cleanUp, err := makeTestDB() + if err != nil { + t.Fatalf("unable to make test database: %v", err) + } + defer cleanUp() + + // Create a channel with upfront scripts set as + // specified in the test. + state := createTestChannel( + t, cdb, + localShutdownOption(test.localShutdown), + remoteShutdownOption(test.remoteShutdown), + ) + + openChannels, err := cdb.FetchOpenChannels( + state.IdentityPub, + ) + if err != nil { + t.Fatalf("unable to fetch open"+ + " channel: %v", err) + } + + if len(openChannels) != 1 { + t.Fatalf("Expected one channel open,"+ + " got: %v", len(openChannels)) + } + + if !bytes.Equal(openChannels[0].LocalShutdownScript, + test.localShutdown) { + + t.Fatalf("Expected local: %x, got: %x", + test.localShutdown, + openChannels[0].LocalShutdownScript) + } + + if !bytes.Equal(openChannels[0].RemoteShutdownScript, + test.remoteShutdown) { + + t.Fatalf("Expected remote: %x, got: %x", + test.remoteShutdown, + openChannels[0].RemoteShutdownScript) + } + }) + } +} + func assertCommitmentEqual(t *testing.T, a, b *ChannelCommitment) { if !reflect.DeepEqual(a, b) { _, _, line, _ := runtime.Caller(1) @@ -364,18 +608,7 @@ func TestChannelStateTransition(t *testing.T) { // First create a minimal channel, then perform a full sync in order to // persist the data. - channel, err := createTestChannelState(cdb) - if err != nil { - t.Fatalf("unable to create channel state: %v", err) - } - - addr := &net.TCPAddr{ - IP: net.ParseIP("127.0.0.1"), - Port: 18556, - } - if err := channel.SyncPending(addr, 101); err != nil { - t.Fatalf("unable to save and serialize channel state: %v", err) - } + channel := createTestChannel(t, cdb) // Add some HTLCs which were added during this new state transition. // Half of the HTLCs are incoming, while the other half are outgoing. @@ -429,10 +662,34 @@ func TestChannelStateTransition(t *testing.T) { // First update the local node's broadcastable state and also add a // CommitDiff remote node's as well in order to simulate a proper state // transition. - if err := channel.UpdateCommitment(&commitment); err != nil { + unsignedAckedUpdates := []LogUpdate{ + { + LogIndex: 2, + UpdateMsg: &lnwire.UpdateAddHTLC{ + ChanID: lnwire.ChannelID{1, 2, 3}, + }, + }, + } + + err = channel.UpdateCommitment(&commitment, unsignedAckedUpdates) + if err != nil { t.Fatalf("unable to update commitment: %v", err) } + // Assert that update is correctly written to the database. + dbUnsignedAckedUpdates, err := channel.UnsignedAckedUpdates() + if err != nil { + t.Fatalf("unable to fetch dangling remote updates: %v", err) + } + if len(dbUnsignedAckedUpdates) != 1 { + t.Fatalf("unexpected number of dangling remote updates") + } + if !reflect.DeepEqual( + dbUnsignedAckedUpdates[0], unsignedAckedUpdates[0], + ) { + t.Fatalf("unexpected update") + } + // The balances, new update, the HTLCs and the changes to the fake // commitment transaction along with the modified signature should all // have been updated. @@ -654,21 +911,9 @@ func TestFetchPendingChannels(t *testing.T) { } defer cleanUp() - // Create first test channel state - state, err := createTestChannelState(cdb) - if err != nil { - t.Fatalf("unable to create channel state: %v", err) - } - - addr := &net.TCPAddr{ - IP: net.ParseIP("127.0.0.1"), - Port: 18555, - } - + // Create a pending channel that was broadcast at height 99. const broadcastHeight = 99 - if err := state.SyncPending(addr, broadcastHeight); err != nil { - t.Fatalf("unable to save and serialize channel state: %v", err) - } + createTestChannel(t, cdb, pendingHeightOption(broadcastHeight)) pendingChannels, err := cdb.FetchPendingChannels() if err != nil { @@ -745,35 +990,8 @@ func TestFetchClosedChannels(t *testing.T) { } defer cleanUp() - // First create a test channel, that we'll be closing within this pull - // request. - state, err := createTestChannelState(cdb) - if err != nil { - t.Fatalf("unable to create channel state: %v", err) - } - - // Next sync the channel to disk, marking it as being in a pending open - // state. - addr := &net.TCPAddr{ - IP: net.ParseIP("127.0.0.1"), - Port: 18555, - } - const broadcastHeight = 99 - if err := state.SyncPending(addr, broadcastHeight); err != nil { - t.Fatalf("unable to save and serialize channel state: %v", err) - } - - // Next, simulate the confirmation of the channel by marking it as - // pending within the database. - chanOpenLoc := lnwire.ShortChannelID{ - BlockHeight: 5, - TxIndex: 10, - TxPosition: 15, - } - err = state.MarkAsOpen(chanOpenLoc) - if err != nil { - t.Fatalf("unable to mark channel as open: %v", err) - } + // Create an open channel in the database. + state := createTestChannel(t, cdb, openChannelOption()) // Next, close the channel by including a close channel summary in the // database. @@ -853,7 +1071,6 @@ func TestFetchWaitingCloseChannels(t *testing.T) { const numChannels = 2 const broadcastHeight = 99 - addr := &net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: 18555} // We'll start by creating two channels within our test database. One of // them will have their funding transaction confirmed on-chain, while @@ -866,15 +1083,11 @@ func TestFetchWaitingCloseChannels(t *testing.T) { channels := make([]*OpenChannel, numChannels) for i := 0; i < numChannels; i++ { - channel, err := createTestChannelState(db) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } - err = channel.SyncPending(addr, broadcastHeight) - if err != nil { - t.Fatalf("unable to sync channel: %v", err) - } - channels[i] = channel + // Create a pending channel in the database at the broadcast + // height. + channels[i] = createTestChannel( + t, db, pendingHeightOption(broadcastHeight), + ) } // We'll only confirm the first one. @@ -897,9 +1110,28 @@ func TestFetchWaitingCloseChannels(t *testing.T) { PreviousOutPoint: channel.FundingOutpoint, }, ) - if err := channel.MarkCommitmentBroadcasted(closeTx); err != nil { + + if err := channel.MarkCommitmentBroadcasted(closeTx, true); err != nil { t.Fatalf("unable to mark commitment broadcast: %v", err) } + + // Now try to marking a coop close with a nil tx. This should + // succeed, but it shouldn't exit when queried. + if err = channel.MarkCoopBroadcasted(nil, true); err != nil { + t.Fatalf("unable to mark nil coop broadcast: %v", err) + } + _, err := channel.BroadcastedCooperative() + if err != ErrNoCloseTx { + t.Fatalf("expected no closing tx error, got: %v", err) + } + + // Finally, modify the close tx deterministically and also mark + // it as coop closed. Later we will test that distinct + // transactions are returned for both coop and force closes. + closeTx.TxIn[0].PreviousOutPoint.Index ^= 1 + if err := channel.MarkCoopBroadcasted(closeTx, true); err != nil { + t.Fatalf("unable to mark coop broadcast: %v", err) + } } // Now, we'll fetch all the channels waiting to be closed from the @@ -909,7 +1141,7 @@ func TestFetchWaitingCloseChannels(t *testing.T) { if err != nil { t.Fatalf("unable to fetch all waiting close channels: %v", err) } - if len(waitingCloseChannels) != 2 { + if len(waitingCloseChannels) != numChannels { t.Fatalf("expected %d channels waiting to be closed, got %d", 2, len(waitingCloseChannels)) } @@ -923,24 +1155,38 @@ func TestFetchWaitingCloseChannels(t *testing.T) { channel.FundingOutpoint) } - // Finally, make sure we can retrieve the closing tx for the - // channel. - closeTx, err := channel.BroadcastedCommitment() + chanPoint := channel.FundingOutpoint + + // Assert that the force close transaction is retrievable. + forceCloseTx, err := channel.BroadcastedCommitment() if err != nil { t.Fatalf("Unable to retrieve commitment: %v", err) } - if closeTx.TxIn[0].PreviousOutPoint != channel.FundingOutpoint { + if forceCloseTx.TxIn[0].PreviousOutPoint != chanPoint { + t.Fatalf("expected outpoint %v, got %v", + chanPoint, + forceCloseTx.TxIn[0].PreviousOutPoint) + } + + // Assert that the coop close transaction is retrievable. + coopCloseTx, err := channel.BroadcastedCooperative() + if err != nil { + t.Fatalf("unable to retrieve coop close: %v", err) + } + + chanPoint.Index ^= 1 + if coopCloseTx.TxIn[0].PreviousOutPoint != chanPoint { t.Fatalf("expected outpoint %v, got %v", - channel.FundingOutpoint, - closeTx.TxIn[0].PreviousOutPoint) + chanPoint, + coopCloseTx.TxIn[0].PreviousOutPoint) } } } // TestRefreshShortChanID asserts that RefreshShortChanID updates the in-memory -// short channel ID of another OpenChannel to reflect a preceding call to -// MarkOpen on a different OpenChannel. +// state of another OpenChannel to reflect a preceding call to MarkOpen on a +// different OpenChannel. func TestRefreshShortChanID(t *testing.T) { t.Parallel() @@ -951,21 +1197,7 @@ func TestRefreshShortChanID(t *testing.T) { defer cleanUp() // First create a test channel. - state, err := createTestChannelState(cdb) - if err != nil { - t.Fatalf("unable to create channel state: %v", err) - } - - addr := &net.TCPAddr{ - IP: net.ParseIP("127.0.0.1"), - Port: 18555, - } - - // Mark the channel as pending within the channeldb. - const broadcastHeight = 99 - if err := state.SyncPending(addr, broadcastHeight); err != nil { - t.Fatalf("unable to save and serialize channel state: %v", err) - } + state := createTestChannel(t, cdb) // Next, locate the pending channel with the database. pendingChannels, err := cdb.FetchPendingChannels() @@ -1038,4 +1270,373 @@ func TestRefreshShortChanID(t *testing.T) { "got %v", chanOpenLoc, pendingChannel.Packager.(*ChannelPackager).source) } + + // Check to ensure that this channel is no longer pending and this field + // is up to date. + if pendingChannel.IsPending { + t.Fatalf("channel pending state wasn't updated: want false got true") + } +} + +// TestCloseInitiator tests the setting of close initiator statuses for +// cooperative closes and local force closes. +func TestCloseInitiator(t *testing.T) { + tests := []struct { + name string + // updateChannel is called to update the channel as broadcast, + // cooperatively or not, based on the test's requirements. + updateChannel func(c *OpenChannel) error + expectedStatuses []ChannelStatus + }{ + { + name: "local coop close", + // Mark the channel as cooperatively closed, initiated + // by the local party. + updateChannel: func(c *OpenChannel) error { + return c.MarkCoopBroadcasted( + &wire.MsgTx{}, true, + ) + }, + expectedStatuses: []ChannelStatus{ + ChanStatusLocalCloseInitiator, + ChanStatusCoopBroadcasted, + }, + }, + { + name: "remote coop close", + // Mark the channel as cooperatively closed, initiated + // by the remote party. + updateChannel: func(c *OpenChannel) error { + return c.MarkCoopBroadcasted( + &wire.MsgTx{}, false, + ) + }, + expectedStatuses: []ChannelStatus{ + ChanStatusRemoteCloseInitiator, + ChanStatusCoopBroadcasted, + }, + }, + { + name: "local force close", + // Mark the channel's commitment as broadcast with + // local initiator. + updateChannel: func(c *OpenChannel) error { + return c.MarkCommitmentBroadcasted( + &wire.MsgTx{}, true, + ) + }, + expectedStatuses: []ChannelStatus{ + ChanStatusLocalCloseInitiator, + ChanStatusCommitBroadcasted, + }, + }, + } + + for _, test := range tests { + test := test + + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + cdb, cleanUp, err := makeTestDB() + if err != nil { + t.Fatalf("unable to make test database: %v", + err) + } + defer cleanUp() + + // Create an open channel. + channel := createTestChannel( + t, cdb, openChannelOption(), + ) + + err = test.updateChannel(channel) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + // Lookup open channels in the database. + dbChans, err := fetchChannels( + cdb, pendingChannelFilter(false), + ) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(dbChans) != 1 { + t.Fatalf("expected 1 channel, got: %v", + len(dbChans)) + } + + // Check that the statuses that we expect were written + // to disk. + for _, status := range test.expectedStatuses { + if !dbChans[0].HasChanStatus(status) { + t.Fatalf("expected channel to have "+ + "status: %v, has status: %v", + status, dbChans[0].chanStatus) + } + } + }) + } +} + +// TestCloseChannelStatus tests setting of a channel status on the historical +// channel on channel close. +func TestCloseChannelStatus(t *testing.T) { + cdb, cleanUp, err := makeTestDB() + if err != nil { + t.Fatalf("unable to make test database: %v", + err) + } + defer cleanUp() + + // Create an open channel. + channel := createTestChannel( + t, cdb, openChannelOption(), + ) + + if err := channel.CloseChannel( + &ChannelCloseSummary{ + ChanPoint: channel.FundingOutpoint, + RemotePub: channel.IdentityPub, + }, ChanStatusRemoteCloseInitiator, + ); err != nil { + t.Fatalf("unexpected error: %v", err) + } + + histChan, err := channel.Db.FetchHistoricalChannel( + &channel.FundingOutpoint, + ) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if !histChan.HasChanStatus(ChanStatusRemoteCloseInitiator) { + t.Fatalf("channel should have status") + } +} + +// TestBalanceAtHeight tests lookup of our local and remote balance at a given +// height. +func TestBalanceAtHeight(t *testing.T) { + const ( + // Values that will be set on our current local commit in + // memory. + localHeight = 2 + localLocalBalance = 1000 + localRemoteBalance = 1500 + + // Values that will be set on our current remote commit in + // memory. + remoteHeight = 3 + remoteLocalBalance = 2000 + remoteRemoteBalance = 2500 + + // Values that will be written to disk in the revocation log. + oldHeight = 0 + oldLocalBalance = 200 + oldRemoteBalance = 300 + + // Heights to test error cases. + unknownHeight = 1 + unreachedHeight = 4 + ) + + // putRevokedState is a helper function used to put commitments is + // the revocation log bucket to test lookup of balances at heights that + // are not our current height. + putRevokedState := func(c *OpenChannel, height uint64, local, + remote lnwire.MilliSatoshi) error { + + err := kvdb.Update(c.Db, func(tx kvdb.RwTx) error { + chanBucket, err := fetchChanBucketRw( + tx, c.IdentityPub, &c.FundingOutpoint, + c.ChainHash, + ) + if err != nil { + return err + } + + logKey := revocationLogBucket + logBucket, err := chanBucket.CreateBucketIfNotExists( + logKey, + ) + if err != nil { + return err + } + + // Make a copy of our current commitment so we do not + // need to re-fill all the required fields and copy in + // our new desired values. + commit := c.LocalCommitment + commit.CommitHeight = height + commit.LocalBalance = local + commit.RemoteBalance = remote + + return appendChannelLogEntry(logBucket, &commit) + }) + + return err + } + + tests := []struct { + name string + targetHeight uint64 + expectedLocalBalance lnwire.MilliSatoshi + expectedRemoteBalance lnwire.MilliSatoshi + expectedError error + }{ + { + name: "target is current local height", + targetHeight: localHeight, + expectedLocalBalance: localLocalBalance, + expectedRemoteBalance: localRemoteBalance, + expectedError: nil, + }, + { + name: "target is current remote height", + targetHeight: remoteHeight, + expectedLocalBalance: remoteLocalBalance, + expectedRemoteBalance: remoteRemoteBalance, + expectedError: nil, + }, + { + name: "need to lookup commit", + targetHeight: oldHeight, + expectedLocalBalance: oldLocalBalance, + expectedRemoteBalance: oldRemoteBalance, + expectedError: nil, + }, + { + name: "height not found", + targetHeight: unknownHeight, + expectedLocalBalance: 0, + expectedRemoteBalance: 0, + expectedError: errLogEntryNotFound, + }, + { + name: "height not reached", + targetHeight: unreachedHeight, + expectedLocalBalance: 0, + expectedRemoteBalance: 0, + expectedError: errHeightNotReached, + }, + } + + for _, test := range tests { + test := test + + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + cdb, cleanUp, err := makeTestDB() + if err != nil { + t.Fatalf("unable to make test database: %v", + err) + } + defer cleanUp() + + // Create options to set the heights and balances of + // our local and remote commitments. + localCommitOpt := channelCommitmentOption( + localHeight, localLocalBalance, + localRemoteBalance, true, + ) + + remoteCommitOpt := channelCommitmentOption( + remoteHeight, remoteLocalBalance, + remoteRemoteBalance, false, + ) + + // Create an open channel. + channel := createTestChannel( + t, cdb, openChannelOption(), + localCommitOpt, remoteCommitOpt, + ) + + // Write an older commit to disk. + err = putRevokedState(channel, oldHeight, + oldLocalBalance, oldRemoteBalance) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + local, remote, err := channel.BalancesAtHeight( + test.targetHeight, + ) + if err != test.expectedError { + t.Fatalf("expected: %v, got: %v", + test.expectedError, err) + } + + if local != test.expectedLocalBalance { + t.Fatalf("expected local: %v, got: %v", + test.expectedLocalBalance, local) + } + + if remote != test.expectedRemoteBalance { + t.Fatalf("expected remote: %v, got: %v", + test.expectedRemoteBalance, remote) + } + }) + } +} + +// TestHasChanStatus asserts the behavior of HasChanStatus by checking the +// behavior of various status flags in addition to the special case of +// ChanStatusDefault which is treated like a flag in the code base even though +// it isn't. +func TestHasChanStatus(t *testing.T) { + tests := []struct { + name string + status ChannelStatus + expHas map[ChannelStatus]bool + }{ + { + name: "default", + status: ChanStatusDefault, + expHas: map[ChannelStatus]bool{ + ChanStatusDefault: true, + ChanStatusBorked: false, + }, + }, + { + name: "single flag", + status: ChanStatusBorked, + expHas: map[ChannelStatus]bool{ + ChanStatusDefault: false, + ChanStatusBorked: true, + }, + }, + { + name: "multiple flags", + status: ChanStatusBorked | ChanStatusLocalDataLoss, + expHas: map[ChannelStatus]bool{ + ChanStatusDefault: false, + ChanStatusBorked: true, + ChanStatusLocalDataLoss: true, + }, + }, + } + + for _, test := range tests { + test := test + + t.Run(test.name, func(t *testing.T) { + c := &OpenChannel{ + chanStatus: test.status, + } + + for status, expHas := range test.expHas { + has := c.HasChanStatus(status) + if has == expHas { + continue + } + + t.Fatalf("expected chan status to "+ + "have %s? %t, got: %t", + status, expHas, has) + } + }) + } } diff --git a/channeldb/db.go b/channeldb/db.go index 7e8f94791e..c2842b1815 100644 --- a/channeldb/db.go +++ b/channeldb/db.go @@ -11,8 +11,12 @@ import ( "github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/wire" - "github.com/coreos/bbolt" "github.com/go-errors/errors" + "github.com/lightningnetwork/lnd/channeldb/kvdb" + "github.com/lightningnetwork/lnd/channeldb/migration12" + "github.com/lightningnetwork/lnd/channeldb/migration13" + "github.com/lightningnetwork/lnd/channeldb/migration_01_to_11" + "github.com/lightningnetwork/lnd/clock" "github.com/lightningnetwork/lnd/lnwire" ) @@ -24,7 +28,7 @@ const ( // migration is a function which takes a prior outdated version of the database // instances and mutates the key/bucket structure to arrive at a more // up-to-date version of the database. -type migration func(tx *bbolt.Tx) error +type migration func(tx kvdb.RwTx) error type version struct { number uint32 @@ -47,19 +51,19 @@ var ( // for the update time of node and channel updates were // added. number: 1, - migration: migrateNodeAndEdgeUpdateIndex, + migration: migration_01_to_11.MigrateNodeAndEdgeUpdateIndex, }, { // The DB version that added the invoice event time // series. number: 2, - migration: migrateInvoiceTimeSeries, + migration: migration_01_to_11.MigrateInvoiceTimeSeries, }, { // The DB version that updated the embedded invoice in // outgoing payments to match the new format. number: 3, - migration: migrateInvoiceTimeSeriesOutgoingPayments, + migration: migration_01_to_11.MigrateInvoiceTimeSeriesOutgoingPayments, }, { // The version of the database where every channel @@ -67,53 +71,64 @@ var ( // a policy is unknown, this will be represented // by a special byte sequence. number: 4, - migration: migrateEdgePolicies, + migration: migration_01_to_11.MigrateEdgePolicies, }, { // The DB version where we persist each attempt to send // an HTLC to a payment hash, and track whether the // payment is in-flight, succeeded, or failed. number: 5, - migration: paymentStatusesMigration, + migration: migration_01_to_11.PaymentStatusesMigration, }, { // The DB version that properly prunes stale entries // from the edge update index. number: 6, - migration: migratePruneEdgeUpdateIndex, + migration: migration_01_to_11.MigratePruneEdgeUpdateIndex, }, { // The DB version that migrates the ChannelCloseSummary // to a format where optional fields are indicated with // boolean flags. number: 7, - migration: migrateOptionalChannelCloseSummaryFields, + migration: migration_01_to_11.MigrateOptionalChannelCloseSummaryFields, }, { // The DB version that changes the gossiper's message // store keys to account for the message's type and // ShortChannelID. number: 8, - migration: migrateGossipMessageStoreKeys, + migration: migration_01_to_11.MigrateGossipMessageStoreKeys, }, { // The DB version where the payments and payment // statuses are moved to being stored in a combined // bucket. number: 9, - migration: migrateOutgoingPayments, + migration: migration_01_to_11.MigrateOutgoingPayments, }, { // The DB version where we started to store legacy // payload information for all routes, as well as the // optional TLV records. number: 10, - migration: migrateRouteSerialization, + migration: migration_01_to_11.MigrateRouteSerialization, }, { // Add invoice htlc and cltv delta fields. number: 11, - migration: migrateInvoices, + migration: migration_01_to_11.MigrateInvoices, + }, + { + // Migrate to TLV invoice bodies, add payment address + // and features, remove receipt. + number: 12, + migration: migration12.MigrateInvoiceTLV, + }, + { + // Migrate to multi-path payments. + number: 13, + migration: migration13.MigrateMPP, }, } @@ -126,10 +141,10 @@ var ( // information related to nodes, routing data, open/closed channels, fee // schedules, and reputation data. type DB struct { - *bbolt.DB + kvdb.Backend dbPath string graph *ChannelGraph - now func() time.Time + clock clock.Clock } // Open opens an existing channeldb. Any necessary schemas migrations due to @@ -150,20 +165,15 @@ func Open(dbPath string, modifiers ...OptionModifier) (*DB, error) { // Specify bbolt freelist options to reduce heap pressure in case the // freelist grows to be very large. - options := &bbolt.Options{ - NoFreelistSync: opts.NoFreelistSync, - FreelistType: bbolt.FreelistMapType, - } - - bdb, err := bbolt.Open(path, dbFilePermission, options) + bdb, err := kvdb.Open(kvdb.BoltBackendName, path, opts.NoFreelistSync) if err != nil { return nil, err } chanDB := &DB{ - DB: bdb, - dbPath: dbPath, - now: time.Now, + Backend: bdb, + dbPath: dbPath, + clock: opts.clock, } chanDB.graph = newChannelGraph( chanDB, opts.RejectCacheSize, opts.ChannelCacheSize, @@ -187,41 +197,41 @@ func (d *DB) Path() string { // database. The deletion is done in a single transaction, therefore this // operation is fully atomic. func (d *DB) Wipe() error { - return d.Update(func(tx *bbolt.Tx) error { - err := tx.DeleteBucket(openChannelBucket) - if err != nil && err != bbolt.ErrBucketNotFound { + return kvdb.Update(d, func(tx kvdb.RwTx) error { + err := tx.DeleteTopLevelBucket(openChannelBucket) + if err != nil && err != kvdb.ErrBucketNotFound { return err } - err = tx.DeleteBucket(closedChannelBucket) - if err != nil && err != bbolt.ErrBucketNotFound { + err = tx.DeleteTopLevelBucket(closedChannelBucket) + if err != nil && err != kvdb.ErrBucketNotFound { return err } - err = tx.DeleteBucket(invoiceBucket) - if err != nil && err != bbolt.ErrBucketNotFound { + err = tx.DeleteTopLevelBucket(invoiceBucket) + if err != nil && err != kvdb.ErrBucketNotFound { return err } - err = tx.DeleteBucket(nodeInfoBucket) - if err != nil && err != bbolt.ErrBucketNotFound { + err = tx.DeleteTopLevelBucket(nodeInfoBucket) + if err != nil && err != kvdb.ErrBucketNotFound { return err } - err = tx.DeleteBucket(nodeBucket) - if err != nil && err != bbolt.ErrBucketNotFound { + err = tx.DeleteTopLevelBucket(nodeBucket) + if err != nil && err != kvdb.ErrBucketNotFound { return err } - err = tx.DeleteBucket(edgeBucket) - if err != nil && err != bbolt.ErrBucketNotFound { + err = tx.DeleteTopLevelBucket(edgeBucket) + if err != nil && err != kvdb.ErrBucketNotFound { return err } - err = tx.DeleteBucket(edgeIndexBucket) - if err != nil && err != bbolt.ErrBucketNotFound { + err = tx.DeleteTopLevelBucket(edgeIndexBucket) + if err != nil && err != kvdb.ErrBucketNotFound { return err } - err = tx.DeleteBucket(graphMetaBucket) - if err != nil && err != bbolt.ErrBucketNotFound { + err = tx.DeleteTopLevelBucket(graphMetaBucket) + if err != nil && err != kvdb.ErrBucketNotFound { return err } @@ -241,40 +251,36 @@ func createChannelDB(dbPath string) error { } path := filepath.Join(dbPath, dbName) - bdb, err := bbolt.Open(path, dbFilePermission, nil) + bdb, err := kvdb.Create(kvdb.BoltBackendName, path, true) if err != nil { return err } - err = bdb.Update(func(tx *bbolt.Tx) error { - if _, err := tx.CreateBucket(openChannelBucket); err != nil { - return err - } - if _, err := tx.CreateBucket(closedChannelBucket); err != nil { + err = kvdb.Update(bdb, func(tx kvdb.RwTx) error { + if _, err := tx.CreateTopLevelBucket(openChannelBucket); err != nil { return err } - - if _, err := tx.CreateBucket(forwardingLogBucket); err != nil { + if _, err := tx.CreateTopLevelBucket(closedChannelBucket); err != nil { return err } - if _, err := tx.CreateBucket(fwdPackagesKey); err != nil { + if _, err := tx.CreateTopLevelBucket(forwardingLogBucket); err != nil { return err } - if _, err := tx.CreateBucket(invoiceBucket); err != nil { + if _, err := tx.CreateTopLevelBucket(fwdPackagesKey); err != nil { return err } - if _, err := tx.CreateBucket(paymentBucket); err != nil { + if _, err := tx.CreateTopLevelBucket(invoiceBucket); err != nil { return err } - if _, err := tx.CreateBucket(nodeInfoBucket); err != nil { + if _, err := tx.CreateTopLevelBucket(nodeInfoBucket); err != nil { return err } - nodes, err := tx.CreateBucket(nodeBucket) + nodes, err := tx.CreateTopLevelBucket(nodeBucket) if err != nil { return err } @@ -287,7 +293,7 @@ func createChannelDB(dbPath string) error { return err } - edges, err := tx.CreateBucket(edgeBucket) + edges, err := tx.CreateTopLevelBucket(edgeBucket) if err != nil { return err } @@ -304,7 +310,7 @@ func createChannelDB(dbPath string) error { return err } - graphMeta, err := tx.CreateBucket(graphMetaBucket) + graphMeta, err := tx.CreateTopLevelBucket(graphMetaBucket) if err != nil { return err } @@ -313,7 +319,7 @@ func createChannelDB(dbPath string) error { return err } - if _, err := tx.CreateBucket(metaBucket); err != nil { + if _, err := tx.CreateTopLevelBucket(metaBucket); err != nil { return err } @@ -346,7 +352,7 @@ func fileExists(path string) bool { // zero-length slice is returned. func (d *DB) FetchOpenChannels(nodeID *btcec.PublicKey) ([]*OpenChannel, error) { var channels []*OpenChannel - err := d.View(func(tx *bbolt.Tx) error { + err := kvdb.View(d, func(tx kvdb.ReadTx) error { var err error channels, err = d.fetchOpenChannels(tx, nodeID) return err @@ -359,11 +365,11 @@ func (d *DB) FetchOpenChannels(nodeID *btcec.PublicKey) ([]*OpenChannel, error) // stored currently active/open channels associated with the target nodeID. In // the case that no active channels are known to have been created with this // node, then a zero-length slice is returned. -func (d *DB) fetchOpenChannels(tx *bbolt.Tx, +func (d *DB) fetchOpenChannels(tx kvdb.ReadTx, nodeID *btcec.PublicKey) ([]*OpenChannel, error) { // Get the bucket dedicated to storing the metadata for open channels. - openChanBucket := tx.Bucket(openChannelBucket) + openChanBucket := tx.ReadBucket(openChannelBucket) if openChanBucket == nil { return nil, nil } @@ -371,7 +377,7 @@ func (d *DB) fetchOpenChannels(tx *bbolt.Tx, // Within this top level bucket, fetch the bucket dedicated to storing // open channel data specific to the remote node. pub := nodeID.SerializeCompressed() - nodeChanBucket := openChanBucket.Bucket(pub) + nodeChanBucket := openChanBucket.NestedReadBucket(pub) if nodeChanBucket == nil { return nil, nil } @@ -387,7 +393,7 @@ func (d *DB) fetchOpenChannels(tx *bbolt.Tx, // If we've found a valid chainhash bucket, then we'll retrieve // that so we can extract all the channels. - chainBucket := nodeChanBucket.Bucket(chainHash) + chainBucket := nodeChanBucket.NestedReadBucket(chainHash) if chainBucket == nil { return fmt.Errorf("unable to read bucket for chain=%x", chainHash[:]) @@ -412,7 +418,7 @@ func (d *DB) fetchOpenChannels(tx *bbolt.Tx, // fetchNodeChannels retrieves all active channels from the target chainBucket // which is under a node's dedicated channel bucket. This function is typically // used to fetch all the active channels related to a particular node. -func (d *DB) fetchNodeChannels(chainBucket *bbolt.Bucket) ([]*OpenChannel, error) { +func (d *DB) fetchNodeChannels(chainBucket kvdb.ReadBucket) ([]*OpenChannel, error) { var channels []*OpenChannel @@ -426,7 +432,7 @@ func (d *DB) fetchNodeChannels(chainBucket *bbolt.Bucket) ([]*OpenChannel, error // Once we've found a valid channel bucket, we'll extract it // from the node's chain bucket. - chanBucket := chainBucket.Bucket(chanPoint) + chanBucket := chainBucket.NestedReadBucket(chanPoint) var outPoint wire.OutPoint err := readOutpoint(bytes.NewReader(chanPoint), &outPoint) @@ -471,10 +477,10 @@ func (d *DB) FetchChannel(chanPoint wire.OutPoint) (*OpenChannel, error) { // structure and skipping fully decoding each channel, we save a good // bit of CPU as we don't need to do things like decompress public // keys. - chanScan := func(tx *bbolt.Tx) error { + chanScan := func(tx kvdb.ReadTx) error { // Get the bucket dedicated to storing the metadata for open // channels. - openChanBucket := tx.Bucket(openChannelBucket) + openChanBucket := tx.ReadBucket(openChannelBucket) if openChanBucket == nil { return ErrNoActiveChannels } @@ -489,7 +495,7 @@ func (d *DB) FetchChannel(chanPoint wire.OutPoint) (*OpenChannel, error) { return nil } - nodeChanBucket := openChanBucket.Bucket(nodePub) + nodeChanBucket := openChanBucket.NestedReadBucket(nodePub) if nodeChanBucket == nil { return nil } @@ -503,7 +509,9 @@ func (d *DB) FetchChannel(chanPoint wire.OutPoint) (*OpenChannel, error) { return nil } - chainBucket := nodeChanBucket.Bucket(chainHash) + chainBucket := nodeChanBucket.NestedReadBucket( + chainHash, + ) if chainBucket == nil { return fmt.Errorf("unable to read "+ "bucket for chain=%x", chainHash[:]) @@ -511,7 +519,7 @@ func (d *DB) FetchChannel(chanPoint wire.OutPoint) (*OpenChannel, error) { // Finally we reach the leaf bucket that stores // all the chanPoints for this node. - chanBucket := chainBucket.Bucket( + chanBucket := chainBucket.NestedReadBucket( targetChanPoint.Bytes(), ) if chanBucket == nil { @@ -533,7 +541,7 @@ func (d *DB) FetchChannel(chanPoint wire.OutPoint) (*OpenChannel, error) { }) } - err := d.View(chanScan) + err := kvdb.View(d, chanScan) if err != nil { return nil, err } @@ -551,42 +559,28 @@ func (d *DB) FetchChannel(chanPoint wire.OutPoint) (*OpenChannel, error) { // within the database, including pending open, fully open and channels waiting // for a closing transaction to confirm. func (d *DB) FetchAllChannels() ([]*OpenChannel, error) { - var channels []*OpenChannel - - // TODO(halseth): fetch all in one db tx. - openChannels, err := d.FetchAllOpenChannels() - if err != nil { - return nil, err - } - channels = append(channels, openChannels...) - - pendingChannels, err := d.FetchPendingChannels() - if err != nil { - return nil, err - } - channels = append(channels, pendingChannels...) - - waitingClose, err := d.FetchWaitingCloseChannels() - if err != nil { - return nil, err - } - channels = append(channels, waitingClose...) - - return channels, nil + return fetchChannels(d) } // FetchAllOpenChannels will return all channels that have the funding // transaction confirmed, and is not waiting for a closing transaction to be // confirmed. func (d *DB) FetchAllOpenChannels() ([]*OpenChannel, error) { - return fetchChannels(d, false, false) + return fetchChannels( + d, + pendingChannelFilter(false), + waitingCloseFilter(false), + ) } // FetchPendingChannels will return channels that have completed the process of // generating and broadcasting funding transactions, but whose funding // transactions have yet to be confirmed on the blockchain. func (d *DB) FetchPendingChannels() ([]*OpenChannel, error) { - return fetchChannels(d, true, false) + return fetchChannels(d, + pendingChannelFilter(true), + waitingCloseFilter(false), + ) } // FetchWaitingCloseChannels will return all channels that have been opened, @@ -594,31 +588,55 @@ func (d *DB) FetchPendingChannels() ([]*OpenChannel, error) { // // NOTE: This includes channels that are also pending to be opened. func (d *DB) FetchWaitingCloseChannels() ([]*OpenChannel, error) { - waitingClose, err := fetchChannels(d, false, true) - if err != nil { - return nil, err - } - pendingWaitingClose, err := fetchChannels(d, true, true) - if err != nil { - return nil, err + return fetchChannels( + d, waitingCloseFilter(true), + ) +} + +// fetchChannelsFilter applies a filter to channels retrieved in fetchchannels. +// A set of filters can be combined to filter across multiple dimensions. +type fetchChannelsFilter func(channel *OpenChannel) bool + +// pendingChannelFilter returns a filter based on whether channels are pending +// (ie, their funding transaction still needs to confirm). If pending is false, +// channels with confirmed funding transactions are returned. +func pendingChannelFilter(pending bool) fetchChannelsFilter { + return func(channel *OpenChannel) bool { + return channel.IsPending == pending } +} - return append(waitingClose, pendingWaitingClose...), nil +// waitingCloseFilter returns a filter which filters channels based on whether +// they are awaiting the confirmation of their closing transaction. If waiting +// close is true, channels that have had their closing tx broadcast are +// included. If it is false, channels that are not awaiting confirmation of +// their close transaction are returned. +func waitingCloseFilter(waitingClose bool) fetchChannelsFilter { + return func(channel *OpenChannel) bool { + // If the channel is in any other state than Default, + // then it means it is waiting to be closed. + channelWaitingClose := + channel.ChanStatus() != ChanStatusDefault + + // Include the channel if it matches the value for + // waiting close that we are filtering on. + return channelWaitingClose == waitingClose + } } // fetchChannels attempts to retrieve channels currently stored in the -// database. The pending parameter determines whether only pending channels -// will be returned, or only open channels will be returned. The waitingClose -// parameter determines whether only channels waiting for a closing transaction -// to be confirmed should be returned. If no active channels exist within the -// network, then ErrNoActiveChannels is returned. -func fetchChannels(d *DB, pending, waitingClose bool) ([]*OpenChannel, error) { +// database. It takes a set of filters which are applied to each channel to +// obtain a set of channels with the desired set of properties. Only channels +// which have a true value returned for *all* of the filters will be returned. +// If no filters are provided, every channel in the open channels bucket will +// be returned. +func fetchChannels(d *DB, filters ...fetchChannelsFilter) ([]*OpenChannel, error) { var channels []*OpenChannel - err := d.View(func(tx *bbolt.Tx) error { + err := kvdb.View(d, func(tx kvdb.ReadTx) error { // Get the bucket dedicated to storing the metadata for open // channels. - openChanBucket := tx.Bucket(openChannelBucket) + openChanBucket := tx.ReadBucket(openChannelBucket) if openChanBucket == nil { return ErrNoActiveChannels } @@ -626,7 +644,7 @@ func fetchChannels(d *DB, pending, waitingClose bool) ([]*OpenChannel, error) { // Next, fetch the bucket dedicated to storing metadata related // to all nodes. All keys within this bucket are the serialized // public keys of all our direct counterparties. - nodeMetaBucket := tx.Bucket(nodeInfoBucket) + nodeMetaBucket := tx.ReadBucket(nodeInfoBucket) if nodeMetaBucket == nil { return fmt.Errorf("node bucket not created") } @@ -634,7 +652,7 @@ func fetchChannels(d *DB, pending, waitingClose bool) ([]*OpenChannel, error) { // Finally for each node public key in the bucket, fetch all // the channels related to this particular node. return nodeMetaBucket.ForEach(func(k, v []byte) error { - nodeChanBucket := openChanBucket.Bucket(k) + nodeChanBucket := openChanBucket.NestedReadBucket(k) if nodeChanBucket == nil { return nil } @@ -649,7 +667,9 @@ func fetchChannels(d *DB, pending, waitingClose bool) ([]*OpenChannel, error) { // If we've found a valid chainhash bucket, // then we'll retrieve that so we can extract // all the channels. - chainBucket := nodeChanBucket.Bucket(chainHash) + chainBucket := nodeChanBucket.NestedReadBucket( + chainHash, + ) if chainBucket == nil { return fmt.Errorf("unable to read "+ "bucket for chain=%x", chainHash[:]) @@ -662,24 +682,27 @@ func fetchChannels(d *DB, pending, waitingClose bool) ([]*OpenChannel, error) { "node_key=%x: %v", chainHash[:], k, err) } for _, channel := range nodeChans { - if channel.IsPending != pending { - continue + // includeChannel indicates whether the channel + // meets the criteria specified by our filters. + includeChannel := true + + // Run through each filter and check whether the + // channel should be included. + for _, f := range filters { + // If the channel fails the filter, set + // includeChannel to false and don't bother + // checking the remaining filters. + if !f(channel) { + includeChannel = false + break + } } - // If the channel is in any other state - // than Default, then it means it is - // waiting to be closed. - channelWaitingClose := - channel.ChanStatus() != ChanStatusDefault - - // Only include it if we requested - // channels with the same waitingClose - // status. - if channelWaitingClose != waitingClose { - continue + // If the channel passed every filter, include it in + // our set of channels. + if includeChannel { + channels = append(channels, channel) } - - channels = append(channels, channel) } return nil }) @@ -702,8 +725,8 @@ func fetchChannels(d *DB, pending, waitingClose bool) ([]*OpenChannel, error) { func (d *DB) FetchClosedChannels(pendingOnly bool) ([]*ChannelCloseSummary, error) { var chanSummaries []*ChannelCloseSummary - if err := d.View(func(tx *bbolt.Tx) error { - closeBucket := tx.Bucket(closedChannelBucket) + if err := kvdb.View(d, func(tx kvdb.ReadTx) error { + closeBucket := tx.ReadBucket(closedChannelBucket) if closeBucket == nil { return ErrNoClosedChannels } @@ -740,8 +763,8 @@ var ErrClosedChannelNotFound = errors.New("unable to find closed channel summary // point of the channel in question. func (d *DB) FetchClosedChannel(chanID *wire.OutPoint) (*ChannelCloseSummary, error) { var chanSummary *ChannelCloseSummary - if err := d.View(func(tx *bbolt.Tx) error { - closeBucket := tx.Bucket(closedChannelBucket) + if err := kvdb.View(d, func(tx kvdb.ReadTx) error { + closeBucket := tx.ReadBucket(closedChannelBucket) if closeBucket == nil { return ErrClosedChannelNotFound } @@ -774,15 +797,15 @@ func (d *DB) FetchClosedChannelForID(cid lnwire.ChannelID) ( *ChannelCloseSummary, error) { var chanSummary *ChannelCloseSummary - if err := d.View(func(tx *bbolt.Tx) error { - closeBucket := tx.Bucket(closedChannelBucket) + if err := kvdb.View(d, func(tx kvdb.ReadTx) error { + closeBucket := tx.ReadBucket(closedChannelBucket) if closeBucket == nil { return ErrClosedChannelNotFound } // The first 30 bytes of the channel ID and outpoint will be // equal. - cursor := closeBucket.Cursor() + cursor := closeBucket.ReadCursor() op, c := cursor.Seek(cid[:30]) // We scan over all possible candidates for this channel ID. @@ -822,7 +845,7 @@ func (d *DB) FetchClosedChannelForID(cid lnwire.ChannelID) ( // the pending funds in a channel that has been forcibly closed have been // swept. func (d *DB) MarkChanFullyClosed(chanPoint *wire.OutPoint) error { - return d.Update(func(tx *bbolt.Tx) error { + return kvdb.Update(d, func(tx kvdb.RwTx) error { var b bytes.Buffer if err := writeOutpoint(&b, chanPoint); err != nil { return err @@ -830,7 +853,7 @@ func (d *DB) MarkChanFullyClosed(chanPoint *wire.OutPoint) error { chanID := b.Bytes() - closedChanBucket, err := tx.CreateBucketIfNotExists( + closedChanBucket, err := tx.CreateTopLevelBucket( closedChannelBucket, ) if err != nil { @@ -875,7 +898,7 @@ func (d *DB) MarkChanFullyClosed(chanPoint *wire.OutPoint) error { // pruneLinkNode determines whether we should garbage collect a link node from // the database due to no longer having any open channels with it. If there are // any left, then this acts as a no-op. -func (d *DB) pruneLinkNode(tx *bbolt.Tx, remotePub *btcec.PublicKey) error { +func (d *DB) pruneLinkNode(tx kvdb.RwTx, remotePub *btcec.PublicKey) error { openChannels, err := d.fetchOpenChannels(tx, remotePub) if err != nil { return fmt.Errorf("unable to fetch open channels for peer %x: "+ @@ -895,7 +918,7 @@ func (d *DB) pruneLinkNode(tx *bbolt.Tx, remotePub *btcec.PublicKey) error { // PruneLinkNodes attempts to prune all link nodes found within the databse with // whom we no longer have any open channels with. func (d *DB) PruneLinkNodes() error { - return d.Update(func(tx *bbolt.Tx) error { + return kvdb.Update(d, func(tx kvdb.RwTx) error { linkNodes, err := d.fetchAllLinkNodes(tx) if err != nil { return err @@ -939,7 +962,7 @@ func (d *DB) RestoreChannelShells(channelShells ...*ChannelShell) error { defer chanGraph.cacheMu.Unlock() var chansRestored []uint64 - err := d.Update(func(tx *bbolt.Tx) error { + err := kvdb.Update(d, func(tx kvdb.RwTx) error { for _, channelShell := range channelShells { channel := channelShell.Chan @@ -976,7 +999,7 @@ func (d *DB) RestoreChannelShells(channelShells ...*ChannelShell) error { Capacity: channel.Capacity, } - nodes := tx.Bucket(nodeBucket) + nodes := tx.ReadWriteBucket(nodeBucket) if nodes == nil { return ErrGraphNotFound } @@ -1050,7 +1073,7 @@ func (d *DB) AddrsForNode(nodePub *btcec.PublicKey) ([]net.Addr, error) { graphNode LightningNode ) - dbErr := d.View(func(tx *bbolt.Tx) error { + dbErr := kvdb.View(d, func(tx kvdb.ReadTx) error { var err error linkNode, err = fetchLinkNode(tx, nodePub) @@ -1061,7 +1084,7 @@ func (d *DB) AddrsForNode(nodePub *btcec.PublicKey) ([]net.Addr, error) { // We'll also query the graph for this peer to see if they have // any addresses that we don't currently have stored within the // link node database. - nodes := tx.Bucket(nodeBucket) + nodes := tx.ReadBucket(nodeBucket) if nodes == nil { return ErrGraphNotFound } @@ -1097,6 +1120,55 @@ func (d *DB) AddrsForNode(nodePub *btcec.PublicKey) ([]net.Addr, error) { return dedupedAddrs, nil } +// AbandonChannel attempts to remove the target channel from the open channel +// database. If the channel was already removed (has a closed channel entry), +// then we'll return a nil error. Otherwise, we'll insert a new close summary +// into the database. +func (d *DB) AbandonChannel(chanPoint *wire.OutPoint, bestHeight uint32) error { + // With the chanPoint constructed, we'll attempt to find the target + // channel in the database. If we can't find the channel, then we'll + // return the error back to the caller. + dbChan, err := d.FetchChannel(*chanPoint) + switch { + // If the channel wasn't found, then it's possible that it was already + // abandoned from the database. + case err == ErrChannelNotFound: + _, closedErr := d.FetchClosedChannel(chanPoint) + if closedErr != nil { + return closedErr + } + + // If the channel was already closed, then we don't return an + // error as we'd like fro this step to be repeatable. + return nil + case err != nil: + return err + } + + // Now that we've found the channel, we'll populate a close summary for + // the channel, so we can store as much information for this abounded + // channel as possible. We also ensure that we set Pending to false, to + // indicate that this channel has been "fully" closed. + summary := &ChannelCloseSummary{ + CloseType: Abandoned, + ChanPoint: *chanPoint, + ChainHash: dbChan.ChainHash, + CloseHeight: bestHeight, + RemotePub: dbChan.IdentityPub, + Capacity: dbChan.Capacity, + SettledBalance: dbChan.LocalCommitment.LocalBalance.ToSatoshis(), + ShortChanID: dbChan.ShortChanID(), + RemoteCurrentRevocation: dbChan.RemoteCurrentRevocation, + RemoteNextRevocation: dbChan.RemoteNextRevocation, + LocalChanConfig: dbChan.LocalChanCfg, + } + + // Finally, we'll close the channel in the DB, and return back to the + // caller. We set ourselves as the close initiator because we abandoned + // the channel. + return dbChan.CloseChannel(summary, ChanStatusLocalCloseInitiator) +} + // syncVersions function is used for safe db version synchronization. It // applies migration functions to the current database and recovers the // previous state of db if at least one error/panic appeared during migration. @@ -1139,7 +1211,7 @@ func (d *DB) syncVersions(versions []version) error { migrations, migrationVersions := getMigrationsToApply( versions, meta.DbVersionNumber, ) - return d.Update(func(tx *bbolt.Tx) error { + return kvdb.Update(d, func(tx kvdb.RwTx) error { for i, migration := range migrations { if migration == nil { continue @@ -1183,3 +1255,50 @@ func getMigrationsToApply(versions []version, version uint32) ([]migration, []ui return migrations, migrationVersions } + +// fetchHistoricalChanBucket returns a the channel bucket for a given outpoint +// from the historical channel bucket. If the bucket does not exist, +// ErrNoHistoricalBucket is returned. +func fetchHistoricalChanBucket(tx kvdb.ReadTx, + outPoint *wire.OutPoint) (kvdb.ReadBucket, error) { + + // First fetch the top level bucket which stores all data related to + // historically stored channels. + historicalChanBucket := tx.ReadBucket(historicalChannelBucket) + if historicalChanBucket == nil { + return nil, ErrNoHistoricalBucket + } + + // With the bucket for the node and chain fetched, we can now go down + // another level, for the channel itself. + var chanPointBuf bytes.Buffer + if err := writeOutpoint(&chanPointBuf, outPoint); err != nil { + return nil, err + } + chanBucket := historicalChanBucket.NestedReadBucket(chanPointBuf.Bytes()) + if chanBucket == nil { + return nil, ErrChannelNotFound + } + + return chanBucket, nil +} + +// FetchHistoricalChannel fetches open channel data from the historical channel +// bucket. +func (d *DB) FetchHistoricalChannel(outPoint *wire.OutPoint) (*OpenChannel, error) { + var channel *OpenChannel + err := kvdb.View(d, func(tx kvdb.ReadTx) error { + chanBucket, err := fetchHistoricalChanBucket(tx, outPoint) + if err != nil { + return err + } + + channel, err = fetchOpenChannel(chanBucket, outPoint) + return err + }) + if err != nil { + return nil, err + } + + return channel, nil +} diff --git a/channeldb/db_test.go b/channeldb/db_test.go index 198f6e2b23..e678d2a504 100644 --- a/channeldb/db_test.go +++ b/channeldb/db_test.go @@ -100,10 +100,7 @@ func TestFetchClosedChannelForID(t *testing.T) { // Create the test channel state, that we will mutate the index of the // funding point. - state, err := createTestChannelState(cdb) - if err != nil { - t.Fatalf("unable to create channel state: %v", err) - } + state := createTestChannelState(t, cdb) // Now run through the number of channels, and modify the outpoint index // to create new channel IDs. @@ -111,14 +108,12 @@ func TestFetchClosedChannelForID(t *testing.T) { // Save the open channel to disk. state.FundingOutpoint.Index = i - addr := &net.TCPAddr{ - IP: net.ParseIP("127.0.0.1"), - Port: 18556, - } - if err := state.SyncPending(addr, 101); err != nil { - t.Fatalf("unable to save and serialize channel "+ - "state: %v", err) - } + // Write the channel to disk in a pending state. + createTestChannel( + t, cdb, + fundingPointOption(state.FundingOutpoint), + openChannelOption(), + ) // Close the channel. To make sure we retrieve the correct // summary later, we make them differ in the SettledBalance. @@ -235,26 +230,8 @@ func TestFetchChannel(t *testing.T) { } defer cleanUp() - // Create the test channel state that we'll sync to the database - // shortly. - channelState, err := createTestChannelState(cdb) - if err != nil { - t.Fatalf("unable to create channel state: %v", err) - } - - // Mark the channel as pending, then immediately mark it as open to it - // can be fully visible. - addr := &net.TCPAddr{ - IP: net.ParseIP("127.0.0.1"), - Port: 18555, - } - if err := channelState.SyncPending(addr, 9); err != nil { - t.Fatalf("unable to save and serialize channel state: %v", err) - } - err = channelState.MarkAsOpen(lnwire.NewShortChanIDFromInt(99)) - if err != nil { - t.Fatalf("unable to mark channel open: %v", err) - } + // Create an open channel. + channelState := createTestChannel(t, cdb, openChannelOption()) // Next, attempt to fetch the channel by its chan point. dbChannel, err := cdb.FetchChannel(channelState.FundingOutpoint) @@ -271,7 +248,7 @@ func TestFetchChannel(t *testing.T) { // If we attempt to query for a non-exist ante channel, then we should // get an error. - channelState2, err := createTestChannelState(cdb) + channelState2 := createTestChannelState(t, cdb) if err != nil { t.Fatalf("unable to create channel state: %v", err) } @@ -403,7 +380,7 @@ func TestRestoreChannelShells(t *testing.T) { // Ensure that it isn't possible to modify the commitment state machine // of this restored channel. channel := nodeChans[0] - err = channel.UpdateCommitment(nil) + err = channel.UpdateCommitment(nil, nil) if err != ErrNoRestoredChannelMutation { t.Fatalf("able to mutate restored channel") } @@ -469,3 +446,304 @@ func TestRestoreChannelShells(t *testing.T) { t.Fatalf("only a single edge should be inserted: %v", err) } } + +// TestAbandonChannel tests that the AbandonChannel method is able to properly +// remove a channel from the database and add a close channel summary. If +// called after a channel has already been removed, the method shouldn't return +// an error. +func TestAbandonChannel(t *testing.T) { + t.Parallel() + + cdb, cleanUp, err := makeTestDB() + if err != nil { + t.Fatalf("unable to make test database: %v", err) + } + defer cleanUp() + + // If we attempt to abandon the state of a channel that doesn't exist + // in the open or closed channel bucket, then we should receive an + // error. + err = cdb.AbandonChannel(&wire.OutPoint{}, 0) + if err == nil { + t.Fatalf("removing non-existent channel should have failed") + } + + // We'll now create a new channel in a pending state to abandon + // shortly. + chanState := createTestChannel(t, cdb) + + // We should now be able to abandon the channel without any errors. + closeHeight := uint32(11) + err = cdb.AbandonChannel(&chanState.FundingOutpoint, closeHeight) + if err != nil { + t.Fatalf("unable to abandon channel: %v", err) + } + + // At this point, the channel should no longer be found in the set of + // open channels. + _, err = cdb.FetchChannel(chanState.FundingOutpoint) + if err != ErrChannelNotFound { + t.Fatalf("channel should not have been found: %v", err) + } + + // However we should be able to retrieve a close channel summary for + // the channel. + _, err = cdb.FetchClosedChannel(&chanState.FundingOutpoint) + if err != nil { + t.Fatalf("unable to fetch closed channel: %v", err) + } + + // Finally, if we attempt to abandon the channel again, we should get a + // nil error as the channel has already been abandoned. + err = cdb.AbandonChannel(&chanState.FundingOutpoint, closeHeight) + if err != nil { + t.Fatalf("unable to abandon channel: %v", err) + } +} + +// TestFetchChannels tests the filtering of open channels in fetchChannels. +// It tests the case where no filters are provided (which is equivalent to +// FetchAllOpenChannels) and every combination of pending and waiting close. +func TestFetchChannels(t *testing.T) { + // Create static channel IDs for each kind of channel retrieved by + // fetchChannels so that the expected channel IDs can be set in tests. + var ( + // Pending is a channel that is pending open, and has not had + // a close initiated. + pendingChan = lnwire.NewShortChanIDFromInt(1) + + // pendingWaitingClose is a channel that is pending open and + // has has its closing transaction broadcast. + pendingWaitingChan = lnwire.NewShortChanIDFromInt(2) + + // openChan is a channel that has confirmed on chain. + openChan = lnwire.NewShortChanIDFromInt(3) + + // openWaitingChan is a channel that has confirmed on chain, + // and it waiting for its close transaction to confirm. + openWaitingChan = lnwire.NewShortChanIDFromInt(4) + ) + + tests := []struct { + name string + filters []fetchChannelsFilter + expectedChannels map[lnwire.ShortChannelID]bool + }{ + { + name: "get all channels", + filters: []fetchChannelsFilter{}, + expectedChannels: map[lnwire.ShortChannelID]bool{ + pendingChan: true, + pendingWaitingChan: true, + openChan: true, + openWaitingChan: true, + }, + }, + { + name: "pending channels", + filters: []fetchChannelsFilter{ + pendingChannelFilter(true), + }, + expectedChannels: map[lnwire.ShortChannelID]bool{ + pendingChan: true, + pendingWaitingChan: true, + }, + }, + { + name: "open channels", + filters: []fetchChannelsFilter{ + pendingChannelFilter(false), + }, + expectedChannels: map[lnwire.ShortChannelID]bool{ + openChan: true, + openWaitingChan: true, + }, + }, + { + name: "waiting close channels", + filters: []fetchChannelsFilter{ + waitingCloseFilter(true), + }, + expectedChannels: map[lnwire.ShortChannelID]bool{ + pendingWaitingChan: true, + openWaitingChan: true, + }, + }, + { + name: "not waiting close channels", + filters: []fetchChannelsFilter{ + waitingCloseFilter(false), + }, + expectedChannels: map[lnwire.ShortChannelID]bool{ + pendingChan: true, + openChan: true, + }, + }, + { + name: "pending waiting", + filters: []fetchChannelsFilter{ + pendingChannelFilter(true), + waitingCloseFilter(true), + }, + expectedChannels: map[lnwire.ShortChannelID]bool{ + pendingWaitingChan: true, + }, + }, + { + name: "pending, not waiting", + filters: []fetchChannelsFilter{ + pendingChannelFilter(true), + waitingCloseFilter(false), + }, + expectedChannels: map[lnwire.ShortChannelID]bool{ + pendingChan: true, + }, + }, + { + name: "open waiting", + filters: []fetchChannelsFilter{ + pendingChannelFilter(false), + waitingCloseFilter(true), + }, + expectedChannels: map[lnwire.ShortChannelID]bool{ + openWaitingChan: true, + }, + }, + { + name: "open, not waiting", + filters: []fetchChannelsFilter{ + pendingChannelFilter(false), + waitingCloseFilter(false), + }, + expectedChannels: map[lnwire.ShortChannelID]bool{ + openChan: true, + }, + }, + } + + for _, test := range tests { + test := test + + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + cdb, cleanUp, err := makeTestDB() + if err != nil { + t.Fatalf("unable to make test "+ + "database: %v", err) + } + defer cleanUp() + + // Create a pending channel that is not awaiting close. + createTestChannel( + t, cdb, channelIDOption(pendingChan), + ) + + // Create a pending channel which has has been marked as + // broadcast, indicating that its closing transaction is + // waiting to confirm. + pendingClosing := createTestChannel( + t, cdb, + channelIDOption(pendingWaitingChan), + ) + + err = pendingClosing.MarkCoopBroadcasted(nil, true) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + // Create a open channel that is not awaiting close. + createTestChannel( + t, cdb, + channelIDOption(openChan), + openChannelOption(), + ) + + // Create a open channel which has has been marked as + // broadcast, indicating that its closing transaction is + // waiting to confirm. + openClosing := createTestChannel( + t, cdb, + channelIDOption(openWaitingChan), + openChannelOption(), + ) + err = openClosing.MarkCoopBroadcasted(nil, true) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + channels, err := fetchChannels(cdb, test.filters...) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if len(channels) != len(test.expectedChannels) { + t.Fatalf("expected: %v channels, "+ + "got: %v", len(test.expectedChannels), + len(channels)) + } + + for _, ch := range channels { + _, ok := test.expectedChannels[ch.ShortChannelID] + if !ok { + t.Fatalf("fetch channels unexpected "+ + "channel: %v", ch.ShortChannelID) + } + } + }) + } +} + +// TestFetchHistoricalChannel tests lookup of historical channels. +func TestFetchHistoricalChannel(t *testing.T) { + cdb, cleanUp, err := makeTestDB() + if err != nil { + t.Fatalf("unable to make test database: %v", err) + } + defer cleanUp() + + // Create a an open channel in the database. + channel := createTestChannel(t, cdb, openChannelOption()) + + // First, try to lookup a channel when the bucket does not + // exist. + _, err = cdb.FetchHistoricalChannel(&channel.FundingOutpoint) + if err != ErrNoHistoricalBucket { + t.Fatalf("expected no bucket, got: %v", err) + } + + // Close the channel so that it will be written to the historical + // bucket. The values provided in the channel close summary are the + // minimum required for this call to run without panicking. + if err := channel.CloseChannel(&ChannelCloseSummary{ + ChanPoint: channel.FundingOutpoint, + RemotePub: channel.IdentityPub, + SettledBalance: btcutil.Amount(500), + }); err != nil { + t.Fatalf("unexpected error closing channel: %v", err) + } + + histChannel, err := cdb.FetchHistoricalChannel(&channel.FundingOutpoint) + if err != nil { + t.Fatalf("unexepected error getting channel: %v", err) + } + + // Set the db on our channel to nil so that we can check that all other + // fields on the channel equal those on the historical channel. + channel.Db = nil + + if !reflect.DeepEqual(histChannel, channel) { + t.Fatalf("expected: %v, got: %v", channel, histChannel) + } + + // Create an outpoint that will not be in the db and look it up. + badOutpoint := &wire.OutPoint{ + Hash: channel.FundingOutpoint.Hash, + Index: channel.FundingOutpoint.Index + 1, + } + _, err = cdb.FetchHistoricalChannel(badOutpoint) + if err != ErrChannelNotFound { + t.Fatalf("expected chan not found, got: %v", err) + } + +} diff --git a/channeldb/duplicate_payments.go b/channeldb/duplicate_payments.go new file mode 100644 index 0000000000..c78e2804b7 --- /dev/null +++ b/channeldb/duplicate_payments.go @@ -0,0 +1,246 @@ +package channeldb + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "time" + + "github.com/btcsuite/btcd/btcec" + "github.com/lightningnetwork/lnd/channeldb/kvdb" + "github.com/lightningnetwork/lnd/lntypes" + "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/routing/route" +) + +var ( + // duplicatePaymentsBucket is the name of a optional sub-bucket within + // the payment hash bucket, that is used to hold duplicate payments to a + // payment hash. This is needed to support information from earlier + // versions of lnd, where it was possible to pay to a payment hash more + // than once. + duplicatePaymentsBucket = []byte("payment-duplicate-bucket") + + // duplicatePaymentSettleInfoKey is a key used in the payment's + // sub-bucket to store the settle info of the payment. + duplicatePaymentSettleInfoKey = []byte("payment-settle-info") + + // duplicatePaymentAttemptInfoKey is a key used in the payment's + // sub-bucket to store the info about the latest attempt that was done + // for the payment in question. + duplicatePaymentAttemptInfoKey = []byte("payment-attempt-info") + + // duplicatePaymentCreationInfoKey is a key used in the payment's + // sub-bucket to store the creation info of the payment. + duplicatePaymentCreationInfoKey = []byte("payment-creation-info") + + // duplicatePaymentFailInfoKey is a key used in the payment's sub-bucket + // to store information about the reason a payment failed. + duplicatePaymentFailInfoKey = []byte("payment-fail-info") + + // duplicatePaymentSequenceKey is a key used in the payment's sub-bucket + // to store the sequence number of the payment. + duplicatePaymentSequenceKey = []byte("payment-sequence-key") +) + +// duplicateHTLCAttemptInfo contains static information about a specific HTLC +// attempt for a payment. This information is used by the router to handle any +// errors coming back after an attempt is made, and to query the switch about +// the status of the attempt. +type duplicateHTLCAttemptInfo struct { + // attemptID is the unique ID used for this attempt. + attemptID uint64 + + // sessionKey is the ephemeral key used for this attempt. + sessionKey *btcec.PrivateKey + + // route is the route attempted to send the HTLC. + route route.Route +} + +// fetchDuplicatePaymentStatus fetches the payment status of the payment. If the +// payment isn't found, it will default to "StatusUnknown". +func fetchDuplicatePaymentStatus(bucket kvdb.ReadBucket) PaymentStatus { + if bucket.Get(duplicatePaymentSettleInfoKey) != nil { + return StatusSucceeded + } + + if bucket.Get(duplicatePaymentFailInfoKey) != nil { + return StatusFailed + } + + if bucket.Get(duplicatePaymentCreationInfoKey) != nil { + return StatusInFlight + } + + return StatusUnknown +} + +func deserializeDuplicateHTLCAttemptInfo(r io.Reader) ( + *duplicateHTLCAttemptInfo, error) { + + a := &duplicateHTLCAttemptInfo{} + err := ReadElements(r, &a.attemptID, &a.sessionKey) + if err != nil { + return nil, err + } + a.route, err = DeserializeRoute(r) + if err != nil { + return nil, err + } + return a, nil +} + +func deserializeDuplicatePaymentCreationInfo(r io.Reader) ( + *PaymentCreationInfo, error) { + + var scratch [8]byte + + c := &PaymentCreationInfo{} + + if _, err := io.ReadFull(r, c.PaymentHash[:]); err != nil { + return nil, err + } + + if _, err := io.ReadFull(r, scratch[:]); err != nil { + return nil, err + } + c.Value = lnwire.MilliSatoshi(byteOrder.Uint64(scratch[:])) + + if _, err := io.ReadFull(r, scratch[:]); err != nil { + return nil, err + } + c.CreationTime = time.Unix(int64(byteOrder.Uint64(scratch[:])), 0) + + if _, err := io.ReadFull(r, scratch[:4]); err != nil { + return nil, err + } + + reqLen := byteOrder.Uint32(scratch[:4]) + payReq := make([]byte, reqLen) + if reqLen > 0 { + if _, err := io.ReadFull(r, payReq); err != nil { + return nil, err + } + } + c.PaymentRequest = payReq + + return c, nil +} + +func fetchDuplicatePayment(bucket kvdb.ReadBucket) (*MPPayment, error) { + seqBytes := bucket.Get(duplicatePaymentSequenceKey) + if seqBytes == nil { + return nil, fmt.Errorf("sequence number not found") + } + + sequenceNum := binary.BigEndian.Uint64(seqBytes) + + // Get the payment status. + paymentStatus := fetchDuplicatePaymentStatus(bucket) + + // Get the PaymentCreationInfo. + b := bucket.Get(duplicatePaymentCreationInfoKey) + if b == nil { + return nil, fmt.Errorf("creation info not found") + } + + r := bytes.NewReader(b) + creationInfo, err := deserializeDuplicatePaymentCreationInfo(r) + if err != nil { + return nil, err + + } + + // Get failure reason if available. + var failureReason *FailureReason + b = bucket.Get(duplicatePaymentFailInfoKey) + if b != nil { + reason := FailureReason(b[0]) + failureReason = &reason + } + + payment := &MPPayment{ + SequenceNum: sequenceNum, + Info: creationInfo, + FailureReason: failureReason, + Status: paymentStatus, + } + + // Get the HTLCAttemptInfo. It can be absent. + b = bucket.Get(duplicatePaymentAttemptInfoKey) + if b != nil { + r = bytes.NewReader(b) + attempt, err := deserializeDuplicateHTLCAttemptInfo(r) + if err != nil { + return nil, err + } + + htlc := HTLCAttempt{ + HTLCAttemptInfo: HTLCAttemptInfo{ + AttemptID: attempt.attemptID, + Route: attempt.route, + SessionKey: attempt.sessionKey, + }, + } + + // Get the payment preimage. This is only found for + // successful payments. + b = bucket.Get(duplicatePaymentSettleInfoKey) + if b != nil { + var preimg lntypes.Preimage + copy(preimg[:], b) + + htlc.Settle = &HTLCSettleInfo{ + Preimage: preimg, + SettleTime: time.Time{}, + } + } else { + // Otherwise the payment must have failed. + htlc.Failure = &HTLCFailInfo{ + FailTime: time.Time{}, + } + } + + payment.HTLCs = []HTLCAttempt{htlc} + } + + return payment, nil +} + +func fetchDuplicatePayments(paymentHashBucket kvdb.ReadBucket) ([]*MPPayment, + error) { + + var payments []*MPPayment + + // For older versions of lnd, duplicate payments to a payment has was + // possible. These will be found in a sub-bucket indexed by their + // sequence number if available. + dup := paymentHashBucket.NestedReadBucket(duplicatePaymentsBucket) + if dup == nil { + return nil, nil + } + + err := dup.ForEach(func(k, v []byte) error { + subBucket := dup.NestedReadBucket(k) + if subBucket == nil { + // We one bucket for each duplicate to be found. + return fmt.Errorf("non bucket element" + + "in duplicate bucket") + } + + p, err := fetchDuplicatePayment(subBucket) + if err != nil { + return err + } + + payments = append(payments, p) + return nil + }) + if err != nil { + return nil, err + } + + return payments, nil +} diff --git a/channeldb/error.go b/channeldb/error.go index e0e7545220..b1364fb4ba 100644 --- a/channeldb/error.go +++ b/channeldb/error.go @@ -10,6 +10,11 @@ var ( // created. ErrNoChanDBExists = fmt.Errorf("channel db has not yet been created") + // ErrNoHistoricalBucket is returned when the historical channel bucket + // not been created yet. + ErrNoHistoricalBucket = fmt.Errorf("historical channel bucket has " + + "not yet been created") + // ErrDBReversion is returned when detecting an attempt to revert to a // prior database version. ErrDBReversion = fmt.Errorf("channel db cannot revert to prior version") diff --git a/channeldb/forwarding_log.go b/channeldb/forwarding_log.go index 04e8719a2f..52596956fc 100644 --- a/channeldb/forwarding_log.go +++ b/channeldb/forwarding_log.go @@ -6,7 +6,7 @@ import ( "sort" "time" - "github.com/coreos/bbolt" + "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/lnwire" ) @@ -111,10 +111,10 @@ func (f *ForwardingLog) AddForwardingEvents(events []ForwardingEvent) error { var timestamp [8]byte - return f.db.Batch(func(tx *bbolt.Tx) error { + return kvdb.Batch(f.db.Backend, func(tx kvdb.RwTx) error { // First, we'll fetch the bucket that stores our time series // log. - logBucket, err := tx.CreateBucketIfNotExists( + logBucket, err := tx.CreateTopLevelBucket( forwardingLogBucket, ) if err != nil { @@ -204,10 +204,10 @@ func (f *ForwardingLog) Query(q ForwardingEventQuery) (ForwardingLogTimeSlice, e recordsToSkip := q.IndexOffset recordOffset := q.IndexOffset - err := f.db.View(func(tx *bbolt.Tx) error { + err := kvdb.View(f.db, func(tx kvdb.ReadTx) error { // If the bucket wasn't found, then there aren't any events to // be returned. - logBucket := tx.Bucket(forwardingLogBucket) + logBucket := tx.ReadBucket(forwardingLogBucket) if logBucket == nil { return ErrNoForwardingEvents } @@ -223,7 +223,7 @@ func (f *ForwardingLog) Query(q ForwardingEventQuery) (ForwardingLogTimeSlice, e // our seek through the log in order to satisfy the query. // We'll continue until either we reach the end of the range, // or reach our max number of events. - logCursor := logBucket.Cursor() + logCursor := logBucket.ReadCursor() timestamp, events := logCursor.Seek(startTime[:]) for ; timestamp != nil && bytes.Compare(timestamp, endTime[:]) <= 0; timestamp, events = logCursor.Next() { // If our current return payload exceeds the max number diff --git a/channeldb/forwarding_package.go b/channeldb/forwarding_package.go index 4808c55ca0..4ddcf55b3b 100644 --- a/channeldb/forwarding_package.go +++ b/channeldb/forwarding_package.go @@ -7,7 +7,7 @@ import ( "fmt" "io" - "github.com/coreos/bbolt" + "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/lnwire" ) @@ -318,7 +318,7 @@ type SettleFailRef struct { type SettleFailAcker interface { // AckSettleFails atomically updates the settle-fail filters in *other* // channels' forwarding packages. - AckSettleFails(tx *bbolt.Tx, settleFailRefs ...SettleFailRef) error + AckSettleFails(tx kvdb.RwTx, settleFailRefs ...SettleFailRef) error } // GlobalFwdPkgReader is an interface used to retrieve the forwarding packages @@ -326,7 +326,7 @@ type SettleFailAcker interface { type GlobalFwdPkgReader interface { // LoadChannelFwdPkgs loads all known forwarding packages for the given // channel. - LoadChannelFwdPkgs(tx *bbolt.Tx, + LoadChannelFwdPkgs(tx kvdb.RwTx, source lnwire.ShortChannelID) ([]*FwdPkg, error) } @@ -357,14 +357,14 @@ func NewSwitchPackager() *SwitchPackager { // AckSettleFails atomically updates the settle-fail filters in *other* // channels' forwarding packages, to mark that the switch has received a settle // or fail residing in the forwarding package of a link. -func (*SwitchPackager) AckSettleFails(tx *bbolt.Tx, +func (*SwitchPackager) AckSettleFails(tx kvdb.RwTx, settleFailRefs ...SettleFailRef) error { return ackSettleFails(tx, settleFailRefs) } // LoadChannelFwdPkgs loads all forwarding packages for a particular channel. -func (*SwitchPackager) LoadChannelFwdPkgs(tx *bbolt.Tx, +func (*SwitchPackager) LoadChannelFwdPkgs(tx kvdb.RwTx, source lnwire.ShortChannelID) ([]*FwdPkg, error) { return loadChannelFwdPkgs(tx, source) @@ -376,19 +376,19 @@ func (*SwitchPackager) LoadChannelFwdPkgs(tx *bbolt.Tx, type FwdPackager interface { // AddFwdPkg serializes and writes a FwdPkg for this channel at the // remote commitment height included in the forwarding package. - AddFwdPkg(tx *bbolt.Tx, fwdPkg *FwdPkg) error + AddFwdPkg(tx kvdb.RwTx, fwdPkg *FwdPkg) error // SetFwdFilter looks up the forwarding package at the remote `height` // and sets the `fwdFilter`, marking the Adds for which: // 1) We are not the exit node // 2) Passed all validation // 3) Should be forwarded to the switch immediately after a failure - SetFwdFilter(tx *bbolt.Tx, height uint64, fwdFilter *PkgFilter) error + SetFwdFilter(tx kvdb.RwTx, height uint64, fwdFilter *PkgFilter) error // AckAddHtlcs atomically updates the add filters in this channel's // forwarding packages to mark the resolution of an Add that was // received from the remote party. - AckAddHtlcs(tx *bbolt.Tx, addRefs ...AddRef) error + AckAddHtlcs(tx kvdb.RwTx, addRefs ...AddRef) error // SettleFailAcker allows a link to acknowledge settle/fail HTLCs // belonging to other channels. @@ -396,11 +396,11 @@ type FwdPackager interface { // LoadFwdPkgs loads all known forwarding packages owned by this // channel. - LoadFwdPkgs(tx *bbolt.Tx) ([]*FwdPkg, error) + LoadFwdPkgs(tx kvdb.ReadTx) ([]*FwdPkg, error) // RemovePkg deletes a forwarding package owned by this channel at // the provided remote `height`. - RemovePkg(tx *bbolt.Tx, height uint64) error + RemovePkg(tx kvdb.RwTx, height uint64) error } // ChannelPackager is used by a channel to manage the lifecycle of its forwarding @@ -420,8 +420,8 @@ func NewChannelPackager(source lnwire.ShortChannelID) *ChannelPackager { } // AddFwdPkg writes a newly locked in forwarding package to disk. -func (*ChannelPackager) AddFwdPkg(tx *bbolt.Tx, fwdPkg *FwdPkg) error { - fwdPkgBkt, err := tx.CreateBucketIfNotExists(fwdPackagesKey) +func (*ChannelPackager) AddFwdPkg(tx kvdb.RwTx, fwdPkg *FwdPkg) error { + fwdPkgBkt, err := tx.CreateTopLevelBucket(fwdPackagesKey) if err != nil { return err } @@ -485,7 +485,7 @@ func (*ChannelPackager) AddFwdPkg(tx *bbolt.Tx, fwdPkg *FwdPkg) error { } // putLogUpdate writes an htlc to the provided `bkt`, using `index` as the key. -func putLogUpdate(bkt *bbolt.Bucket, idx uint16, htlc *LogUpdate) error { +func putLogUpdate(bkt kvdb.RwBucket, idx uint16, htlc *LogUpdate) error { var b bytes.Buffer if err := htlc.Encode(&b); err != nil { return err @@ -497,19 +497,19 @@ func putLogUpdate(bkt *bbolt.Bucket, idx uint16, htlc *LogUpdate) error { // LoadFwdPkgs scans the forwarding log for any packages that haven't been // processed, and returns their deserialized log updates in a map indexed by the // remote commitment height at which the updates were locked in. -func (p *ChannelPackager) LoadFwdPkgs(tx *bbolt.Tx) ([]*FwdPkg, error) { +func (p *ChannelPackager) LoadFwdPkgs(tx kvdb.ReadTx) ([]*FwdPkg, error) { return loadChannelFwdPkgs(tx, p.source) } // loadChannelFwdPkgs loads all forwarding packages owned by `source`. -func loadChannelFwdPkgs(tx *bbolt.Tx, source lnwire.ShortChannelID) ([]*FwdPkg, error) { - fwdPkgBkt := tx.Bucket(fwdPackagesKey) +func loadChannelFwdPkgs(tx kvdb.ReadTx, source lnwire.ShortChannelID) ([]*FwdPkg, error) { + fwdPkgBkt := tx.ReadBucket(fwdPackagesKey) if fwdPkgBkt == nil { return nil, nil } sourceKey := makeLogKey(source.ToUint64()) - sourceBkt := fwdPkgBkt.Bucket(sourceKey[:]) + sourceBkt := fwdPkgBkt.NestedReadBucket(sourceKey[:]) if sourceBkt == nil { return nil, nil } @@ -543,23 +543,23 @@ func loadChannelFwdPkgs(tx *bbolt.Tx, source lnwire.ShortChannelID) ([]*FwdPkg, // loadFwPkg reads the packager's fwd pkg at a given height, and determines the // appropriate FwdState. -func loadFwdPkg(fwdPkgBkt *bbolt.Bucket, source lnwire.ShortChannelID, +func loadFwdPkg(fwdPkgBkt kvdb.ReadBucket, source lnwire.ShortChannelID, height uint64) (*FwdPkg, error) { sourceKey := makeLogKey(source.ToUint64()) - sourceBkt := fwdPkgBkt.Bucket(sourceKey[:]) + sourceBkt := fwdPkgBkt.NestedReadBucket(sourceKey[:]) if sourceBkt == nil { return nil, ErrCorruptedFwdPkg } heightKey := makeLogKey(height) - heightBkt := sourceBkt.Bucket(heightKey[:]) + heightBkt := sourceBkt.NestedReadBucket(heightKey[:]) if heightBkt == nil { return nil, ErrCorruptedFwdPkg } // Load ADDs from disk. - addBkt := heightBkt.Bucket(addBucketKey) + addBkt := heightBkt.NestedReadBucket(addBucketKey) if addBkt == nil { return nil, ErrCorruptedFwdPkg } @@ -582,7 +582,7 @@ func loadFwdPkg(fwdPkgBkt *bbolt.Bucket, source lnwire.ShortChannelID, } // Load SETTLE/FAILs from disk. - failSettleBkt := heightBkt.Bucket(failSettleBucketKey) + failSettleBkt := heightBkt.NestedReadBucket(failSettleBucketKey) if failSettleBkt == nil { return nil, ErrCorruptedFwdPkg } @@ -649,7 +649,7 @@ func loadFwdPkg(fwdPkgBkt *bbolt.Bucket, source lnwire.ShortChannelID, // loadHtlcs retrieves all serialized htlcs in a bucket, returning // them in order of the indexes they were written under. -func loadHtlcs(bkt *bbolt.Bucket) ([]LogUpdate, error) { +func loadHtlcs(bkt kvdb.ReadBucket) ([]LogUpdate, error) { var htlcs []LogUpdate if err := bkt.ForEach(func(_, v []byte) error { var htlc LogUpdate @@ -674,22 +674,22 @@ func loadHtlcs(bkt *bbolt.Bucket) ([]LogUpdate, error) { // leaving this channel. After a restart, we skip validation of these Adds, // since they are assumed to have already been validated, and make the switch or // outgoing link responsible for handling replays. -func (p *ChannelPackager) SetFwdFilter(tx *bbolt.Tx, height uint64, +func (p *ChannelPackager) SetFwdFilter(tx kvdb.RwTx, height uint64, fwdFilter *PkgFilter) error { - fwdPkgBkt := tx.Bucket(fwdPackagesKey) + fwdPkgBkt := tx.ReadWriteBucket(fwdPackagesKey) if fwdPkgBkt == nil { return ErrCorruptedFwdPkg } source := makeLogKey(p.source.ToUint64()) - sourceBkt := fwdPkgBkt.Bucket(source[:]) + sourceBkt := fwdPkgBkt.NestedReadWriteBucket(source[:]) if sourceBkt == nil { return ErrCorruptedFwdPkg } heightKey := makeLogKey(height) - heightBkt := sourceBkt.Bucket(heightKey[:]) + heightBkt := sourceBkt.NestedReadWriteBucket(heightKey[:]) if heightBkt == nil { return ErrCorruptedFwdPkg } @@ -713,18 +713,18 @@ func (p *ChannelPackager) SetFwdFilter(tx *bbolt.Tx, height uint64, // AckAddHtlcs accepts a list of references to add htlcs, and updates the // AckAddFilter of those forwarding packages to indicate that a settle or fail // has been received in response to the add. -func (p *ChannelPackager) AckAddHtlcs(tx *bbolt.Tx, addRefs ...AddRef) error { +func (p *ChannelPackager) AckAddHtlcs(tx kvdb.RwTx, addRefs ...AddRef) error { if len(addRefs) == 0 { return nil } - fwdPkgBkt := tx.Bucket(fwdPackagesKey) + fwdPkgBkt := tx.ReadWriteBucket(fwdPackagesKey) if fwdPkgBkt == nil { return ErrCorruptedFwdPkg } sourceKey := makeLogKey(p.source.ToUint64()) - sourceBkt := fwdPkgBkt.Bucket(sourceKey[:]) + sourceBkt := fwdPkgBkt.NestedReadWriteBucket(sourceKey[:]) if sourceBkt == nil { return ErrCorruptedFwdPkg } @@ -753,11 +753,11 @@ func (p *ChannelPackager) AckAddHtlcs(tx *bbolt.Tx, addRefs ...AddRef) error { // ackAddHtlcsAtHeight updates the AddAckFilter of a single forwarding package // with a list of indexes, writing the resulting filter back in its place. -func ackAddHtlcsAtHeight(sourceBkt *bbolt.Bucket, height uint64, +func ackAddHtlcsAtHeight(sourceBkt kvdb.RwBucket, height uint64, indexes []uint16) error { heightKey := makeLogKey(height) - heightBkt := sourceBkt.Bucket(heightKey[:]) + heightBkt := sourceBkt.NestedReadWriteBucket(heightKey[:]) if heightBkt == nil { // If the height bucket isn't found, this could be because the // forwarding package was already removed. We'll return nil to @@ -796,17 +796,17 @@ func ackAddHtlcsAtHeight(sourceBkt *bbolt.Bucket, height uint64, // package. This should only be called after the source of the Add has locked in // the settle/fail, or it becomes otherwise safe to forgo retransmitting the // settle/fail after a restart. -func (p *ChannelPackager) AckSettleFails(tx *bbolt.Tx, settleFailRefs ...SettleFailRef) error { +func (p *ChannelPackager) AckSettleFails(tx kvdb.RwTx, settleFailRefs ...SettleFailRef) error { return ackSettleFails(tx, settleFailRefs) } // ackSettleFails persistently acknowledges a batch of settle fail references. -func ackSettleFails(tx *bbolt.Tx, settleFailRefs []SettleFailRef) error { +func ackSettleFails(tx kvdb.RwTx, settleFailRefs []SettleFailRef) error { if len(settleFailRefs) == 0 { return nil } - fwdPkgBkt := tx.Bucket(fwdPackagesKey) + fwdPkgBkt := tx.ReadWriteBucket(fwdPackagesKey) if fwdPkgBkt == nil { return ErrCorruptedFwdPkg } @@ -832,7 +832,7 @@ func ackSettleFails(tx *bbolt.Tx, settleFailRefs []SettleFailRef) error { // settle/fail htlcs. for dest, destHeights := range destHeightDiffs { destKey := makeLogKey(dest.ToUint64()) - destBkt := fwdPkgBkt.Bucket(destKey[:]) + destBkt := fwdPkgBkt.NestedReadWriteBucket(destKey[:]) if destBkt == nil { // If the destination bucket is not found, this is // likely the result of the destination channel being @@ -855,11 +855,11 @@ func ackSettleFails(tx *bbolt.Tx, settleFailRefs []SettleFailRef) error { // ackSettleFailsAtHeight given a destination bucket, acks the provided indexes // at particular a height by updating the settle fail filter. -func ackSettleFailsAtHeight(destBkt *bbolt.Bucket, height uint64, +func ackSettleFailsAtHeight(destBkt kvdb.RwBucket, height uint64, indexes []uint16) error { heightKey := makeLogKey(height) - heightBkt := destBkt.Bucket(heightKey[:]) + heightBkt := destBkt.NestedReadWriteBucket(heightKey[:]) if heightBkt == nil { // If the height bucket isn't found, this could be because the // forwarding package was already removed. We'll return nil to @@ -895,21 +895,21 @@ func ackSettleFailsAtHeight(destBkt *bbolt.Bucket, height uint64, // RemovePkg deletes the forwarding package at the given height from the // packager's source bucket. -func (p *ChannelPackager) RemovePkg(tx *bbolt.Tx, height uint64) error { - fwdPkgBkt := tx.Bucket(fwdPackagesKey) +func (p *ChannelPackager) RemovePkg(tx kvdb.RwTx, height uint64) error { + fwdPkgBkt := tx.ReadWriteBucket(fwdPackagesKey) if fwdPkgBkt == nil { return nil } sourceBytes := makeLogKey(p.source.ToUint64()) - sourceBkt := fwdPkgBkt.Bucket(sourceBytes[:]) + sourceBkt := fwdPkgBkt.NestedReadWriteBucket(sourceBytes[:]) if sourceBkt == nil { return ErrCorruptedFwdPkg } heightKey := makeLogKey(height) - return sourceBkt.DeleteBucket(heightKey[:]) + return sourceBkt.DeleteNestedBucket(heightKey[:]) } // uint16Key writes the provided 16-bit unsigned integer to a 2-byte slice. diff --git a/channeldb/forwarding_package_test.go b/channeldb/forwarding_package_test.go index 85fda950bd..ed18b6bc39 100644 --- a/channeldb/forwarding_package_test.go +++ b/channeldb/forwarding_package_test.go @@ -8,8 +8,8 @@ import ( "testing" "github.com/btcsuite/btcd/wire" - "github.com/coreos/bbolt" "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/lnwire" ) @@ -207,7 +207,7 @@ func TestPackagerEmptyFwdPkg(t *testing.T) { // Next, create and write a new forwarding package with no htlcs. fwdPkg := channeldb.NewFwdPkg(shortChanID, 0, nil, nil) - if err := db.Update(func(tx *bbolt.Tx) error { + if err := kvdb.Update(db, func(tx kvdb.RwTx) error { return packager.AddFwdPkg(tx, fwdPkg) }); err != nil { t.Fatalf("unable to add fwd pkg: %v", err) @@ -226,7 +226,7 @@ func TestPackagerEmptyFwdPkg(t *testing.T) { // Now, write the forwarding decision. In this case, its just an empty // fwd filter. - if err := db.Update(func(tx *bbolt.Tx) error { + if err := kvdb.Update(db, func(tx kvdb.RwTx) error { return packager.SetFwdFilter(tx, fwdPkg.Height, fwdPkg.FwdFilter) }); err != nil { t.Fatalf("unable to set fwdfiter: %v", err) @@ -244,7 +244,7 @@ func TestPackagerEmptyFwdPkg(t *testing.T) { assertAckFilterIsFull(t, fwdPkgs[0], true) // Lastly, remove the completed forwarding package from disk. - if err := db.Update(func(tx *bbolt.Tx) error { + if err := kvdb.Update(db, func(tx kvdb.RwTx) error { return packager.RemovePkg(tx, fwdPkg.Height) }); err != nil { t.Fatalf("unable to remove fwdpkg: %v", err) @@ -279,7 +279,7 @@ func TestPackagerOnlyAdds(t *testing.T) { nAdds := len(adds) - if err := db.Update(func(tx *bbolt.Tx) error { + if err := kvdb.Update(db, func(tx kvdb.RwTx) error { return packager.AddFwdPkg(tx, fwdPkg) }); err != nil { t.Fatalf("unable to add fwd pkg: %v", err) @@ -300,7 +300,7 @@ func TestPackagerOnlyAdds(t *testing.T) { // added any adds to the fwdfilter, this would indicate that all of the // adds were 1) settled locally by this link (exit hop), or 2) the htlc // was failed locally. - if err := db.Update(func(tx *bbolt.Tx) error { + if err := kvdb.Update(db, func(tx kvdb.RwTx) error { return packager.SetFwdFilter(tx, fwdPkg.Height, fwdPkg.FwdFilter) }); err != nil { t.Fatalf("unable to set fwdfiter: %v", err) @@ -324,7 +324,7 @@ func TestPackagerOnlyAdds(t *testing.T) { Index: uint16(i), } - if err := db.Update(func(tx *bbolt.Tx) error { + if err := kvdb.Update(db, func(tx kvdb.RwTx) error { return packager.AckAddHtlcs(tx, addRef) }); err != nil { t.Fatalf("unable to ack add htlc: %v", err) @@ -343,7 +343,7 @@ func TestPackagerOnlyAdds(t *testing.T) { assertAckFilterIsFull(t, fwdPkgs[0], true) // Lastly, remove the completed forwarding package from disk. - if err := db.Update(func(tx *bbolt.Tx) error { + if err := kvdb.Update(db, func(tx kvdb.RwTx) error { return packager.RemovePkg(tx, fwdPkg.Height) }); err != nil { t.Fatalf("unable to remove fwdpkg: %v", err) @@ -381,7 +381,7 @@ func TestPackagerOnlySettleFails(t *testing.T) { nSettleFails := len(settleFails) - if err := db.Update(func(tx *bbolt.Tx) error { + if err := kvdb.Update(db, func(tx kvdb.RwTx) error { return packager.AddFwdPkg(tx, fwdPkg) }); err != nil { t.Fatalf("unable to add fwd pkg: %v", err) @@ -402,7 +402,7 @@ func TestPackagerOnlySettleFails(t *testing.T) { // added any adds to the fwdfilter, this would indicate that all of the // adds were 1) settled locally by this link (exit hop), or 2) the htlc // was failed locally. - if err := db.Update(func(tx *bbolt.Tx) error { + if err := kvdb.Update(db, func(tx kvdb.RwTx) error { return packager.SetFwdFilter(tx, fwdPkg.Height, fwdPkg.FwdFilter) }); err != nil { t.Fatalf("unable to set fwdfiter: %v", err) @@ -428,7 +428,7 @@ func TestPackagerOnlySettleFails(t *testing.T) { Index: uint16(i), } - if err := db.Update(func(tx *bbolt.Tx) error { + if err := kvdb.Update(db, func(tx kvdb.RwTx) error { return packager.AckSettleFails(tx, failSettleRef) }); err != nil { t.Fatalf("unable to ack add htlc: %v", err) @@ -448,7 +448,7 @@ func TestPackagerOnlySettleFails(t *testing.T) { assertAckFilterIsFull(t, fwdPkgs[0], true) // Lastly, remove the completed forwarding package from disk. - if err := db.Update(func(tx *bbolt.Tx) error { + if err := kvdb.Update(db, func(tx kvdb.RwTx) error { return packager.RemovePkg(tx, fwdPkg.Height) }); err != nil { t.Fatalf("unable to remove fwdpkg: %v", err) @@ -486,7 +486,7 @@ func TestPackagerAddsThenSettleFails(t *testing.T) { nAdds := len(adds) nSettleFails := len(settleFails) - if err := db.Update(func(tx *bbolt.Tx) error { + if err := kvdb.Update(db, func(tx kvdb.RwTx) error { return packager.AddFwdPkg(tx, fwdPkg) }); err != nil { t.Fatalf("unable to add fwd pkg: %v", err) @@ -507,7 +507,7 @@ func TestPackagerAddsThenSettleFails(t *testing.T) { // added any adds to the fwdfilter, this would indicate that all of the // adds were 1) settled locally by this link (exit hop), or 2) the htlc // was failed locally. - if err := db.Update(func(tx *bbolt.Tx) error { + if err := kvdb.Update(db, func(tx kvdb.RwTx) error { return packager.SetFwdFilter(tx, fwdPkg.Height, fwdPkg.FwdFilter) }); err != nil { t.Fatalf("unable to set fwdfiter: %v", err) @@ -532,7 +532,7 @@ func TestPackagerAddsThenSettleFails(t *testing.T) { Index: uint16(i), } - if err := db.Update(func(tx *bbolt.Tx) error { + if err := kvdb.Update(db, func(tx kvdb.RwTx) error { return packager.AckAddHtlcs(tx, addRef) }); err != nil { t.Fatalf("unable to ack add htlc: %v", err) @@ -559,7 +559,7 @@ func TestPackagerAddsThenSettleFails(t *testing.T) { Index: uint16(i), } - if err := db.Update(func(tx *bbolt.Tx) error { + if err := kvdb.Update(db, func(tx kvdb.RwTx) error { return packager.AckSettleFails(tx, failSettleRef) }); err != nil { t.Fatalf("unable to remove settle/fail htlc: %v", err) @@ -579,7 +579,7 @@ func TestPackagerAddsThenSettleFails(t *testing.T) { assertAckFilterIsFull(t, fwdPkgs[0], true) // Lastly, remove the completed forwarding package from disk. - if err := db.Update(func(tx *bbolt.Tx) error { + if err := kvdb.Update(db, func(tx kvdb.RwTx) error { return packager.RemovePkg(tx, fwdPkg.Height) }); err != nil { t.Fatalf("unable to remove fwdpkg: %v", err) @@ -619,7 +619,7 @@ func TestPackagerSettleFailsThenAdds(t *testing.T) { nAdds := len(adds) nSettleFails := len(settleFails) - if err := db.Update(func(tx *bbolt.Tx) error { + if err := kvdb.Update(db, func(tx kvdb.RwTx) error { return packager.AddFwdPkg(tx, fwdPkg) }); err != nil { t.Fatalf("unable to add fwd pkg: %v", err) @@ -640,7 +640,7 @@ func TestPackagerSettleFailsThenAdds(t *testing.T) { // added any adds to the fwdfilter, this would indicate that all of the // adds were 1) settled locally by this link (exit hop), or 2) the htlc // was failed locally. - if err := db.Update(func(tx *bbolt.Tx) error { + if err := kvdb.Update(db, func(tx kvdb.RwTx) error { return packager.SetFwdFilter(tx, fwdPkg.Height, fwdPkg.FwdFilter) }); err != nil { t.Fatalf("unable to set fwdfiter: %v", err) @@ -669,7 +669,7 @@ func TestPackagerSettleFailsThenAdds(t *testing.T) { Index: uint16(i), } - if err := db.Update(func(tx *bbolt.Tx) error { + if err := kvdb.Update(db, func(tx kvdb.RwTx) error { return packager.AckSettleFails(tx, failSettleRef) }); err != nil { t.Fatalf("unable to remove settle/fail htlc: %v", err) @@ -696,7 +696,7 @@ func TestPackagerSettleFailsThenAdds(t *testing.T) { Index: uint16(i), } - if err := db.Update(func(tx *bbolt.Tx) error { + if err := kvdb.Update(db, func(tx kvdb.RwTx) error { return packager.AckAddHtlcs(tx, addRef) }); err != nil { t.Fatalf("unable to ack add htlc: %v", err) @@ -716,7 +716,7 @@ func TestPackagerSettleFailsThenAdds(t *testing.T) { assertAckFilterIsFull(t, fwdPkgs[0], true) // Lastly, remove the completed forwarding package from disk. - if err := db.Update(func(tx *bbolt.Tx) error { + if err := kvdb.Update(db, func(tx kvdb.RwTx) error { return packager.RemovePkg(tx, fwdPkg.Height) }); err != nil { t.Fatalf("unable to remove fwdpkg: %v", err) @@ -778,11 +778,11 @@ func assertSettleFailFilterIsFull(t *testing.T, fwdPkg *channeldb.FwdPkg, expect // loadFwdPkgs is a helper method that reads all forwarding packages for a // particular packager. -func loadFwdPkgs(t *testing.T, db *bbolt.DB, +func loadFwdPkgs(t *testing.T, db kvdb.Backend, packager channeldb.FwdPackager) []*channeldb.FwdPkg { var fwdPkgs []*channeldb.FwdPkg - if err := db.View(func(tx *bbolt.Tx) error { + if err := kvdb.View(db, func(tx kvdb.ReadTx) error { var err error fwdPkgs, err = packager.LoadFwdPkgs(tx) return err @@ -795,7 +795,7 @@ func loadFwdPkgs(t *testing.T, db *bbolt.DB, // makeFwdPkgDB initializes a test database for forwarding packages. If the // provided path is an empty, it will create a temp dir/file to use. -func makeFwdPkgDB(t *testing.T, path string) *bbolt.DB { +func makeFwdPkgDB(t *testing.T, path string) kvdb.Backend { // nolint:unparam if path == "" { var err error path, err = ioutil.TempDir("", "fwdpkgdb") @@ -806,10 +806,10 @@ func makeFwdPkgDB(t *testing.T, path string) *bbolt.DB { path = filepath.Join(path, "fwdpkg.db") } - db, err := bbolt.Open(path, 0600, nil) + bdb, err := kvdb.Create(kvdb.BoltBackendName, path, true) if err != nil { t.Fatalf("unable to open boltdb: %v", err) } - return db + return bdb } diff --git a/channeldb/graph.go b/channeldb/graph.go index 46ba131119..e0105043a5 100644 --- a/channeldb/graph.go +++ b/channeldb/graph.go @@ -18,8 +18,9 @@ import ( "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcutil" - "github.com/coreos/bbolt" + "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/routing/route" ) var ( @@ -205,10 +206,10 @@ func (c *ChannelGraph) Database() *DB { func (c *ChannelGraph) ForEachChannel(cb func(*ChannelEdgeInfo, *ChannelEdgePolicy, *ChannelEdgePolicy) error) error { // TODO(roasbeef): ptr map to reduce # of allocs? no duplicates - return c.db.View(func(tx *bbolt.Tx) error { + return kvdb.View(c.db, func(tx kvdb.ReadTx) error { // First, grab the node bucket. This will be used to populate // the Node pointers in each edge read from disk. - nodes := tx.Bucket(nodeBucket) + nodes := tx.ReadBucket(nodeBucket) if nodes == nil { return ErrGraphNotFound } @@ -216,11 +217,11 @@ func (c *ChannelGraph) ForEachChannel(cb func(*ChannelEdgeInfo, *ChannelEdgePoli // Next, grab the edge bucket which stores the edges, and also // the index itself so we can group the directed edges together // logically. - edges := tx.Bucket(edgeBucket) + edges := tx.ReadBucket(edgeBucket) if edges == nil { return ErrGraphNoEdgesFound } - edgeIndex := edges.Bucket(edgeIndexBucket) + edgeIndex := edges.NestedReadBucket(edgeIndexBucket) if edgeIndex == nil { return ErrGraphNoEdgesFound } @@ -264,8 +265,8 @@ func (c *ChannelGraph) ForEachChannel(cb func(*ChannelEdgeInfo, *ChannelEdgePoli // should be passed as the first argument. Otherwise the first argument should // be nil and a fresh transaction will be created to execute the graph // traversal. -func (c *ChannelGraph) ForEachNodeChannel(tx *bbolt.Tx, nodePub []byte, - cb func(*bbolt.Tx, *ChannelEdgeInfo, *ChannelEdgePolicy, +func (c *ChannelGraph) ForEachNodeChannel(tx kvdb.ReadTx, nodePub []byte, + cb func(kvdb.ReadTx, *ChannelEdgeInfo, *ChannelEdgePolicy, *ChannelEdgePolicy) error) error { db := c.db @@ -280,13 +281,15 @@ func (c *ChannelGraph) DisabledChannelIDs() ([]uint64, error) { var disabledChanIDs []uint64 chanEdgeFound := make(map[uint64]struct{}) - err := c.db.View(func(tx *bbolt.Tx) error { - edges := tx.Bucket(edgeBucket) + err := kvdb.View(c.db, func(tx kvdb.ReadTx) error { + edges := tx.ReadBucket(edgeBucket) if edges == nil { return ErrGraphNoEdgesFound } - disabledEdgePolicyIndex := edges.Bucket(disabledEdgePolicyBucket) + disabledEdgePolicyIndex := edges.NestedReadBucket( + disabledEdgePolicyBucket, + ) if disabledEdgePolicyIndex == nil { return nil } @@ -325,11 +328,11 @@ func (c *ChannelGraph) DisabledChannelIDs() ([]uint64, error) { // // TODO(roasbeef): add iterator interface to allow for memory efficient graph // traversal when graph gets mega -func (c *ChannelGraph) ForEachNode(tx *bbolt.Tx, cb func(*bbolt.Tx, *LightningNode) error) error { - traversal := func(tx *bbolt.Tx) error { +func (c *ChannelGraph) ForEachNode(tx kvdb.RwTx, cb func(kvdb.ReadTx, *LightningNode) error) error { // nolint:interfacer + traversal := func(tx kvdb.ReadTx) error { // First grab the nodes bucket which stores the mapping from // pubKey to node information. - nodes := tx.Bucket(nodeBucket) + nodes := tx.ReadBucket(nodeBucket) if nodes == nil { return ErrGraphNotFound } @@ -358,7 +361,7 @@ func (c *ChannelGraph) ForEachNode(tx *bbolt.Tx, cb func(*bbolt.Tx, *LightningNo // If no transaction was provided, then we'll create a new transaction // to execute the transaction within. if tx == nil { - return c.db.View(traversal) + return kvdb.View(c.db, traversal) } // Otherwise, we re-use the existing transaction to execute the graph @@ -372,10 +375,10 @@ func (c *ChannelGraph) ForEachNode(tx *bbolt.Tx, cb func(*bbolt.Tx, *LightningNo // node based off the source node. func (c *ChannelGraph) SourceNode() (*LightningNode, error) { var source *LightningNode - err := c.db.View(func(tx *bbolt.Tx) error { + err := kvdb.View(c.db, func(tx kvdb.ReadTx) error { // First grab the nodes bucket which stores the mapping from // pubKey to node information. - nodes := tx.Bucket(nodeBucket) + nodes := tx.ReadBucket(nodeBucket) if nodes == nil { return ErrGraphNotFound } @@ -399,7 +402,7 @@ func (c *ChannelGraph) SourceNode() (*LightningNode, error) { // of the graph. The source node is treated as the center node within a // star-graph. This method may be used to kick off a path finding algorithm in // order to explore the reachability of another node based off the source node. -func (c *ChannelGraph) sourceNode(nodes *bbolt.Bucket) (*LightningNode, error) { +func (c *ChannelGraph) sourceNode(nodes kvdb.ReadBucket) (*LightningNode, error) { selfPub := nodes.Get(sourceKey) if selfPub == nil { return nil, ErrSourceNodeNotSet @@ -422,10 +425,10 @@ func (c *ChannelGraph) sourceNode(nodes *bbolt.Bucket) (*LightningNode, error) { func (c *ChannelGraph) SetSourceNode(node *LightningNode) error { nodePubBytes := node.PubKeyBytes[:] - return c.db.Update(func(tx *bbolt.Tx) error { + return kvdb.Update(c.db, func(tx kvdb.RwTx) error { // First grab the nodes bucket which stores the mapping from // pubKey to node information. - nodes, err := tx.CreateBucketIfNotExists(nodeBucket) + nodes, err := tx.CreateTopLevelBucket(nodeBucket) if err != nil { return err } @@ -451,13 +454,13 @@ func (c *ChannelGraph) SetSourceNode(node *LightningNode) error { // // TODO(roasbeef): also need sig of announcement func (c *ChannelGraph) AddLightningNode(node *LightningNode) error { - return c.db.Update(func(tx *bbolt.Tx) error { + return kvdb.Update(c.db, func(tx kvdb.RwTx) error { return addLightningNode(tx, node) }) } -func addLightningNode(tx *bbolt.Tx, node *LightningNode) error { - nodes, err := tx.CreateBucketIfNotExists(nodeBucket) +func addLightningNode(tx kvdb.RwTx, node *LightningNode) error { + nodes, err := tx.CreateTopLevelBucket(nodeBucket) if err != nil { return err } @@ -482,13 +485,13 @@ func addLightningNode(tx *bbolt.Tx, node *LightningNode) error { func (c *ChannelGraph) LookupAlias(pub *btcec.PublicKey) (string, error) { var alias string - err := c.db.View(func(tx *bbolt.Tx) error { - nodes := tx.Bucket(nodeBucket) + err := kvdb.View(c.db, func(tx kvdb.ReadTx) error { + nodes := tx.ReadBucket(nodeBucket) if nodes == nil { return ErrGraphNodesNotFound } - aliases := nodes.Bucket(aliasIndexBucket) + aliases := nodes.NestedReadBucket(aliasIndexBucket) if aliases == nil { return ErrGraphNodesNotFound } @@ -513,26 +516,24 @@ func (c *ChannelGraph) LookupAlias(pub *btcec.PublicKey) (string, error) { // DeleteLightningNode starts a new database transaction to remove a vertex/node // from the database according to the node's public key. -func (c *ChannelGraph) DeleteLightningNode(nodePub *btcec.PublicKey) error { +func (c *ChannelGraph) DeleteLightningNode(nodePub route.Vertex) error { // TODO(roasbeef): ensure dangling edges are removed... - return c.db.Update(func(tx *bbolt.Tx) error { - nodes := tx.Bucket(nodeBucket) + return kvdb.Update(c.db, func(tx kvdb.RwTx) error { + nodes := tx.ReadWriteBucket(nodeBucket) if nodes == nil { return ErrGraphNodeNotFound } - return c.deleteLightningNode( - nodes, nodePub.SerializeCompressed(), - ) + return c.deleteLightningNode(nodes, nodePub[:]) }) } // deleteLightningNode uses an existing database transaction to remove a // vertex/node from the database according to the node's public key. -func (c *ChannelGraph) deleteLightningNode(nodes *bbolt.Bucket, +func (c *ChannelGraph) deleteLightningNode(nodes kvdb.RwBucket, compressedPubKey []byte) error { - aliases := nodes.Bucket(aliasIndexBucket) + aliases := nodes.NestedReadWriteBucket(aliasIndexBucket) if aliases == nil { return ErrGraphNodesNotFound } @@ -557,7 +558,7 @@ func (c *ChannelGraph) deleteLightningNode(nodes *bbolt.Bucket, // Finally, we'll delete the index entry for the node within the // nodeUpdateIndexBucket as this node is no longer active, so we don't // need to track its last update. - nodeUpdateIndex := nodes.Bucket(nodeUpdateIndexBucket) + nodeUpdateIndex := nodes.NestedReadWriteBucket(nodeUpdateIndexBucket) if nodeUpdateIndex == nil { return ErrGraphNodesNotFound } @@ -582,7 +583,7 @@ func (c *ChannelGraph) AddChannelEdge(edge *ChannelEdgeInfo) error { c.cacheMu.Lock() defer c.cacheMu.Unlock() - err := c.db.Update(func(tx *bbolt.Tx) error { + err := kvdb.Update(c.db, func(tx kvdb.RwTx) error { return c.addChannelEdge(tx, edge) }) if err != nil { @@ -597,16 +598,16 @@ func (c *ChannelGraph) AddChannelEdge(edge *ChannelEdgeInfo) error { // addChannelEdge is the private form of AddChannelEdge that allows callers to // utilize an existing db transaction. -func (c *ChannelGraph) addChannelEdge(tx *bbolt.Tx, edge *ChannelEdgeInfo) error { +func (c *ChannelGraph) addChannelEdge(tx kvdb.RwTx, edge *ChannelEdgeInfo) error { // Construct the channel's primary key which is the 8-byte channel ID. var chanKey [8]byte binary.BigEndian.PutUint64(chanKey[:], edge.ChannelID) - nodes, err := tx.CreateBucketIfNotExists(nodeBucket) + nodes, err := tx.CreateTopLevelBucket(nodeBucket) if err != nil { return err } - edges, err := tx.CreateBucketIfNotExists(edgeBucket) + edges, err := tx.CreateTopLevelBucket(edgeBucket) if err != nil { return err } @@ -732,12 +733,12 @@ func (c *ChannelGraph) HasChannelEdge( return upd1Time, upd2Time, exists, isZombie, nil } - if err := c.db.View(func(tx *bbolt.Tx) error { - edges := tx.Bucket(edgeBucket) + if err := kvdb.View(c.db, func(tx kvdb.ReadTx) error { + edges := tx.ReadBucket(edgeBucket) if edges == nil { return ErrGraphNoEdgesFound } - edgeIndex := edges.Bucket(edgeIndexBucket) + edgeIndex := edges.NestedReadBucket(edgeIndexBucket) if edgeIndex == nil { return ErrGraphNoEdgesFound } @@ -749,7 +750,7 @@ func (c *ChannelGraph) HasChannelEdge( // index. if edgeIndex.Get(channelID[:]) == nil { exists = false - zombieIndex := edges.Bucket(zombieBucket) + zombieIndex := edges.NestedReadBucket(zombieBucket) if zombieIndex != nil { isZombie, _, _ = isZombieEdge( zombieIndex, chanID, @@ -765,7 +766,7 @@ func (c *ChannelGraph) HasChannelEdge( // If the channel has been found in the graph, then retrieve // the edges itself so we can return the last updated // timestamps. - nodes := tx.Bucket(nodeBucket) + nodes := tx.ReadBucket(nodeBucket) if nodes == nil { return ErrGraphNodeNotFound } @@ -809,13 +810,13 @@ func (c *ChannelGraph) UpdateChannelEdge(edge *ChannelEdgeInfo) error { var chanKey [8]byte binary.BigEndian.PutUint64(chanKey[:], edge.ChannelID) - return c.db.Update(func(tx *bbolt.Tx) error { - edges := tx.Bucket(edgeBucket) + return kvdb.Update(c.db, func(tx kvdb.RwTx) error { + edges := tx.ReadWriteBucket(edgeBucket) if edge == nil { return ErrEdgeNotFound } - edgeIndex := edges.Bucket(edgeIndexBucket) + edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket) if edgeIndex == nil { return ErrEdgeNotFound } @@ -852,10 +853,10 @@ func (c *ChannelGraph) PruneGraph(spentOutputs []*wire.OutPoint, var chansClosed []*ChannelEdgeInfo - err := c.db.Update(func(tx *bbolt.Tx) error { + err := kvdb.Update(c.db, func(tx kvdb.RwTx) error { // First grab the edges bucket which houses the information // we'd like to delete - edges, err := tx.CreateBucketIfNotExists(edgeBucket) + edges, err := tx.CreateTopLevelBucket(edgeBucket) if err != nil { return err } @@ -869,7 +870,7 @@ func (c *ChannelGraph) PruneGraph(spentOutputs []*wire.OutPoint, if err != nil { return err } - nodes := tx.Bucket(nodeBucket) + nodes := tx.ReadWriteBucket(nodeBucket) if nodes == nil { return ErrSourceNodeNotSet } @@ -920,7 +921,7 @@ func (c *ChannelGraph) PruneGraph(spentOutputs []*wire.OutPoint, chansClosed = append(chansClosed, &edgeInfo) } - metaBucket, err := tx.CreateBucketIfNotExists(graphMetaBucket) + metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket) if err != nil { return err } @@ -966,16 +967,16 @@ func (c *ChannelGraph) PruneGraph(spentOutputs []*wire.OutPoint, // that we only maintain a graph of reachable nodes. In the event that a pruned // node gains more channels, it will be re-added back to the graph. func (c *ChannelGraph) PruneGraphNodes() error { - return c.db.Update(func(tx *bbolt.Tx) error { - nodes := tx.Bucket(nodeBucket) + return kvdb.Update(c.db, func(tx kvdb.RwTx) error { + nodes := tx.ReadWriteBucket(nodeBucket) if nodes == nil { return ErrGraphNodesNotFound } - edges := tx.Bucket(edgeBucket) + edges := tx.ReadWriteBucket(edgeBucket) if edges == nil { return ErrGraphNotFound } - edgeIndex := edges.Bucket(edgeIndexBucket) + edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket) if edgeIndex == nil { return ErrGraphNoEdgesFound } @@ -987,8 +988,8 @@ func (c *ChannelGraph) PruneGraphNodes() error { // pruneGraphNodes attempts to remove any nodes from the graph who have had a // channel closed within the current block. If the node still has existing // channels in the graph, this will act as a no-op. -func (c *ChannelGraph) pruneGraphNodes(nodes *bbolt.Bucket, - edgeIndex *bbolt.Bucket) error { +func (c *ChannelGraph) pruneGraphNodes(nodes kvdb.RwBucket, + edgeIndex kvdb.RwBucket) error { log.Trace("Pruning nodes from graph with no open channels") @@ -1114,8 +1115,8 @@ func (c *ChannelGraph) DisconnectBlockAtHeight(height uint32) ([]*ChannelEdgeInf // Keep track of the channels that are removed from the graph. var removedChans []*ChannelEdgeInfo - if err := c.db.Update(func(tx *bbolt.Tx) error { - edges, err := tx.CreateBucketIfNotExists(edgeBucket) + if err := kvdb.Update(c.db, func(tx kvdb.RwTx) error { + edges, err := tx.CreateTopLevelBucket(edgeBucket) if err != nil { return err } @@ -1131,7 +1132,7 @@ func (c *ChannelGraph) DisconnectBlockAtHeight(height uint32) ([]*ChannelEdgeInf if err != nil { return err } - nodes, err := tx.CreateBucketIfNotExists(nodeBucket) + nodes, err := tx.CreateTopLevelBucket(nodeBucket) if err != nil { return err } @@ -1141,7 +1142,7 @@ func (c *ChannelGraph) DisconnectBlockAtHeight(height uint32) ([]*ChannelEdgeInf // NOTE: we must delete the edges after the cursor loop, since // modifying the bucket while traversing is not safe. var keys [][]byte - cursor := edgeIndex.Cursor() + cursor := edgeIndex.ReadWriteCursor() for k, v := cursor.Seek(chanIDStart[:]); k != nil && bytes.Compare(k, chanIDEnd[:]) <= 0; k, v = cursor.Next() { @@ -1167,7 +1168,7 @@ func (c *ChannelGraph) DisconnectBlockAtHeight(height uint32) ([]*ChannelEdgeInf // Delete all the entries in the prune log having a height // greater or equal to the block disconnected. - metaBucket, err := tx.CreateBucketIfNotExists(graphMetaBucket) + metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket) if err != nil { return err } @@ -1186,7 +1187,7 @@ func (c *ChannelGraph) DisconnectBlockAtHeight(height uint32) ([]*ChannelEdgeInf // To avoid modifying the bucket while traversing, we delete // the keys in a second loop. var pruneKeys [][]byte - pruneCursor := pruneBucket.Cursor() + pruneCursor := pruneBucket.ReadWriteCursor() for k, _ := pruneCursor.Seek(pruneKeyStart[:]); k != nil && bytes.Compare(k, pruneKeyEnd[:]) <= 0; k, _ = pruneCursor.Next() { @@ -1222,17 +1223,17 @@ func (c *ChannelGraph) PruneTip() (*chainhash.Hash, uint32, error) { tipHeight uint32 ) - err := c.db.View(func(tx *bbolt.Tx) error { - graphMeta := tx.Bucket(graphMetaBucket) + err := kvdb.View(c.db, func(tx kvdb.ReadTx) error { + graphMeta := tx.ReadBucket(graphMetaBucket) if graphMeta == nil { return ErrGraphNotFound } - pruneBucket := graphMeta.Bucket(pruneLogBucket) + pruneBucket := graphMeta.NestedReadBucket(pruneLogBucket) if pruneBucket == nil { return ErrGraphNeverPruned } - pruneCursor := pruneBucket.Cursor() + pruneCursor := pruneBucket.ReadCursor() // The prune key with the largest block height will be our // prune tip. @@ -1267,20 +1268,20 @@ func (c *ChannelGraph) DeleteChannelEdges(chanIDs ...uint64) error { c.cacheMu.Lock() defer c.cacheMu.Unlock() - err := c.db.Update(func(tx *bbolt.Tx) error { - edges := tx.Bucket(edgeBucket) + err := kvdb.Update(c.db, func(tx kvdb.RwTx) error { + edges := tx.ReadWriteBucket(edgeBucket) if edges == nil { return ErrEdgeNotFound } - edgeIndex := edges.Bucket(edgeIndexBucket) + edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket) if edgeIndex == nil { return ErrEdgeNotFound } - chanIndex := edges.Bucket(channelPointBucket) + chanIndex := edges.NestedReadWriteBucket(channelPointBucket) if chanIndex == nil { return ErrEdgeNotFound } - nodes := tx.Bucket(nodeBucket) + nodes := tx.ReadWriteBucket(nodeBucket) if nodes == nil { return ErrGraphNodeNotFound } @@ -1320,7 +1321,7 @@ func (c *ChannelGraph) DeleteChannelEdges(chanIDs ...uint64) error { // the database, then ErrEdgeNotFound is returned. func (c *ChannelGraph) ChannelID(chanPoint *wire.OutPoint) (uint64, error) { var chanID uint64 - if err := c.db.View(func(tx *bbolt.Tx) error { + if err := kvdb.View(c.db, func(tx kvdb.ReadTx) error { var err error chanID, err = getChanID(tx, chanPoint) return err @@ -1332,17 +1333,17 @@ func (c *ChannelGraph) ChannelID(chanPoint *wire.OutPoint) (uint64, error) { } // getChanID returns the assigned channel ID for a given channel point. -func getChanID(tx *bbolt.Tx, chanPoint *wire.OutPoint) (uint64, error) { +func getChanID(tx kvdb.ReadTx, chanPoint *wire.OutPoint) (uint64, error) { var b bytes.Buffer if err := writeOutpoint(&b, chanPoint); err != nil { return 0, err } - edges := tx.Bucket(edgeBucket) + edges := tx.ReadBucket(edgeBucket) if edges == nil { return 0, ErrGraphNoEdgesFound } - chanIndex := edges.Bucket(channelPointBucket) + chanIndex := edges.NestedReadBucket(channelPointBucket) if chanIndex == nil { return 0, ErrGraphNoEdgesFound } @@ -1365,19 +1366,19 @@ func getChanID(tx *bbolt.Tx, chanPoint *wire.OutPoint) (uint64, error) { func (c *ChannelGraph) HighestChanID() (uint64, error) { var cid uint64 - err := c.db.View(func(tx *bbolt.Tx) error { - edges := tx.Bucket(edgeBucket) + err := kvdb.View(c.db, func(tx kvdb.ReadTx) error { + edges := tx.ReadBucket(edgeBucket) if edges == nil { return ErrGraphNoEdgesFound } - edgeIndex := edges.Bucket(edgeIndexBucket) + edgeIndex := edges.NestedReadBucket(edgeIndexBucket) if edgeIndex == nil { return ErrGraphNoEdgesFound } // In order to find the highest chan ID, we'll fetch a cursor // and use that to seek to the "end" of our known rage. - cidCursor := edgeIndex.Cursor() + cidCursor := edgeIndex.ReadCursor() lastChanID, _ := cidCursor.Last() @@ -1429,28 +1430,28 @@ func (c *ChannelGraph) ChanUpdatesInHorizon(startTime, endTime time.Time) ([]Cha defer c.cacheMu.Unlock() var hits int - err := c.db.View(func(tx *bbolt.Tx) error { - edges := tx.Bucket(edgeBucket) + err := kvdb.View(c.db, func(tx kvdb.ReadTx) error { + edges := tx.ReadBucket(edgeBucket) if edges == nil { return ErrGraphNoEdgesFound } - edgeIndex := edges.Bucket(edgeIndexBucket) + edgeIndex := edges.NestedReadBucket(edgeIndexBucket) if edgeIndex == nil { return ErrGraphNoEdgesFound } - edgeUpdateIndex := edges.Bucket(edgeUpdateIndexBucket) + edgeUpdateIndex := edges.NestedReadBucket(edgeUpdateIndexBucket) if edgeUpdateIndex == nil { return ErrGraphNoEdgesFound } - nodes := tx.Bucket(nodeBucket) + nodes := tx.ReadBucket(nodeBucket) if nodes == nil { return ErrGraphNodesNotFound } // We'll now obtain a cursor to perform a range query within // the index to find all channels within the horizon. - updateCursor := edgeUpdateIndex.Cursor() + updateCursor := edgeUpdateIndex.ReadCursor() var startTimeBytes, endTimeBytes [8 + 8]byte byteOrder.PutUint64( @@ -1549,20 +1550,20 @@ func (c *ChannelGraph) ChanUpdatesInHorizon(startTime, endTime time.Time) ([]Cha func (c *ChannelGraph) NodeUpdatesInHorizon(startTime, endTime time.Time) ([]LightningNode, error) { var nodesInHorizon []LightningNode - err := c.db.View(func(tx *bbolt.Tx) error { - nodes := tx.Bucket(nodeBucket) + err := kvdb.View(c.db, func(tx kvdb.ReadTx) error { + nodes := tx.ReadBucket(nodeBucket) if nodes == nil { return ErrGraphNodesNotFound } - nodeUpdateIndex := nodes.Bucket(nodeUpdateIndexBucket) + nodeUpdateIndex := nodes.NestedReadBucket(nodeUpdateIndexBucket) if nodeUpdateIndex == nil { return ErrGraphNodesNotFound } // We'll now obtain a cursor to perform a range query within // the index to find all node announcements within the horizon. - updateCursor := nodeUpdateIndex.Cursor() + updateCursor := nodeUpdateIndex.ReadCursor() var startTimeBytes, endTimeBytes [8 + 33]byte byteOrder.PutUint64( @@ -1611,12 +1612,12 @@ func (c *ChannelGraph) NodeUpdatesInHorizon(startTime, endTime time.Time) ([]Lig func (c *ChannelGraph) FilterKnownChanIDs(chanIDs []uint64) ([]uint64, error) { var newChanIDs []uint64 - err := c.db.View(func(tx *bbolt.Tx) error { - edges := tx.Bucket(edgeBucket) + err := kvdb.View(c.db, func(tx kvdb.ReadTx) error { + edges := tx.ReadBucket(edgeBucket) if edges == nil { return ErrGraphNoEdgesFound } - edgeIndex := edges.Bucket(edgeIndexBucket) + edgeIndex := edges.NestedReadBucket(edgeIndexBucket) if edgeIndex == nil { return ErrGraphNoEdgesFound } @@ -1624,7 +1625,7 @@ func (c *ChannelGraph) FilterKnownChanIDs(chanIDs []uint64) ([]uint64, error) { // Fetch the zombie index, it may not exist if no edges have // ever been marked as zombies. If the index has been // initialized, we will use it later to skip known zombie edges. - zombieIndex := edges.Bucket(zombieBucket) + zombieIndex := edges.NestedReadBucket(zombieBucket) // We'll run through the set of chanIDs and collate only the // set of channel that are unable to be found within our db. @@ -1687,17 +1688,17 @@ func (c *ChannelGraph) FilterChannelRange(startHeight, endHeight uint32) ([]uint byteOrder.PutUint64(chanIDStart[:], startChanID.ToUint64()) byteOrder.PutUint64(chanIDEnd[:], endChanID.ToUint64()) - err := c.db.View(func(tx *bbolt.Tx) error { - edges := tx.Bucket(edgeBucket) + err := kvdb.View(c.db, func(tx kvdb.ReadTx) error { + edges := tx.ReadBucket(edgeBucket) if edges == nil { return ErrGraphNoEdgesFound } - edgeIndex := edges.Bucket(edgeIndexBucket) + edgeIndex := edges.NestedReadBucket(edgeIndexBucket) if edgeIndex == nil { return ErrGraphNoEdgesFound } - cursor := edgeIndex.Cursor() + cursor := edgeIndex.ReadCursor() // We'll now iterate through the database, and find each // channel ID that resides within the specified range. @@ -1740,16 +1741,16 @@ func (c *ChannelGraph) FetchChanInfos(chanIDs []uint64) ([]ChannelEdge, error) { cidBytes [8]byte ) - err := c.db.View(func(tx *bbolt.Tx) error { - edges := tx.Bucket(edgeBucket) + err := kvdb.View(c.db, func(tx kvdb.ReadTx) error { + edges := tx.ReadBucket(edgeBucket) if edges == nil { return ErrGraphNoEdgesFound } - edgeIndex := edges.Bucket(edgeIndexBucket) + edgeIndex := edges.NestedReadBucket(edgeIndexBucket) if edgeIndex == nil { return ErrGraphNoEdgesFound } - nodes := tx.Bucket(nodeBucket) + nodes := tx.ReadBucket(nodeBucket) if nodes == nil { return ErrGraphNotFound } @@ -1795,12 +1796,12 @@ func (c *ChannelGraph) FetchChanInfos(chanIDs []uint64) ([]ChannelEdge, error) { return chanEdges, nil } -func delEdgeUpdateIndexEntry(edgesBucket *bbolt.Bucket, chanID uint64, +func delEdgeUpdateIndexEntry(edgesBucket kvdb.RwBucket, chanID uint64, edge1, edge2 *ChannelEdgePolicy) error { // First, we'll fetch the edge update index bucket which currently // stores an entry for the channel we're about to delete. - updateIndex := edgesBucket.Bucket(edgeUpdateIndexBucket) + updateIndex := edgesBucket.NestedReadWriteBucket(edgeUpdateIndexBucket) if updateIndex == nil { // No edges in bucket, return early. return nil @@ -1834,7 +1835,7 @@ func delEdgeUpdateIndexEntry(edgesBucket *bbolt.Bucket, chanID uint64, } func delChannelEdge(edges, edgeIndex, chanIndex, zombieIndex, - nodes *bbolt.Bucket, chanID []byte, isZombie bool) error { + nodes kvdb.RwBucket, chanID []byte, isZombie bool) error { edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID) if err != nil { @@ -1920,7 +1921,7 @@ func (c *ChannelGraph) UpdateEdgePolicy(edge *ChannelEdgePolicy) error { defer c.cacheMu.Unlock() var isUpdate1 bool - err := c.db.Update(func(tx *bbolt.Tx) error { + err := kvdb.Update(c.db, func(tx kvdb.RwTx) error { var err error isUpdate1, err = updateEdgePolicy(tx, edge) return err @@ -1962,17 +1963,17 @@ func (c *ChannelGraph) UpdateEdgePolicy(edge *ChannelEdgePolicy) error { // buckets using an existing database transaction. The returned boolean will be // true if the updated policy belongs to node1, and false if the policy belonged // to node2. -func updateEdgePolicy(tx *bbolt.Tx, edge *ChannelEdgePolicy) (bool, error) { - edges := tx.Bucket(edgeBucket) +func updateEdgePolicy(tx kvdb.RwTx, edge *ChannelEdgePolicy) (bool, error) { + edges := tx.ReadWriteBucket(edgeBucket) if edges == nil { return false, ErrEdgeNotFound } - edgeIndex := edges.Bucket(edgeIndexBucket) + edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket) if edgeIndex == nil { return false, ErrEdgeNotFound } - nodes, err := tx.CreateBucketIfNotExists(nodeBucket) + nodes, err := tx.CreateTopLevelBucket(nodeBucket) if err != nil { return false, err } @@ -2139,14 +2140,14 @@ func (l *LightningNode) NodeAnnouncement(signed bool) (*lnwire.NodeAnnouncement, // isPublic determines whether the node is seen as public within the graph from // the source node's point of view. An existing database transaction can also be // specified. -func (l *LightningNode) isPublic(tx *bbolt.Tx, sourcePubKey []byte) (bool, error) { +func (l *LightningNode) isPublic(tx kvdb.ReadTx, sourcePubKey []byte) (bool, error) { // In order to determine whether this node is publicly advertised within // the graph, we'll need to look at all of its edges and check whether // they extend to any other node than the source node. errDone will be // used to terminate the check early. nodeIsPublic := false errDone := errors.New("done") - err := l.ForEachChannel(tx, func(_ *bbolt.Tx, info *ChannelEdgeInfo, + err := l.ForEachChannel(tx, func(_ kvdb.ReadTx, info *ChannelEdgeInfo, _, _ *ChannelEdgePolicy) error { // If this edge doesn't extend to the source node, we'll @@ -2180,20 +2181,27 @@ func (l *LightningNode) isPublic(tx *bbolt.Tx, sourcePubKey []byte) (bool, error // FetchLightningNode attempts to look up a target node by its identity public // key. If the node isn't found in the database, then ErrGraphNodeNotFound is // returned. -func (c *ChannelGraph) FetchLightningNode(pub *btcec.PublicKey) (*LightningNode, error) { +// +// If the caller wishes to re-use an existing boltdb transaction, then it +// should be passed as the first argument. Otherwise the first argument should +// be nil and a fresh transaction will be created to execute the graph +// traversal. +func (c *ChannelGraph) FetchLightningNode(tx kvdb.ReadTx, nodePub route.Vertex) ( + *LightningNode, error) { + var node *LightningNode - nodePub := pub.SerializeCompressed() - err := c.db.View(func(tx *bbolt.Tx) error { + + fetchNode := func(tx kvdb.ReadTx) error { // First grab the nodes bucket which stores the mapping from // pubKey to node information. - nodes := tx.Bucket(nodeBucket) + nodes := tx.ReadBucket(nodeBucket) if nodes == nil { return ErrGraphNotFound } // If a key for this serialized public key isn't found, then // the target node doesn't exist within the database. - nodeBytes := nodes.Get(nodePub) + nodeBytes := nodes.Get(nodePub[:]) if nodeBytes == nil { return ErrGraphNodeNotFound } @@ -2210,7 +2218,14 @@ func (c *ChannelGraph) FetchLightningNode(pub *btcec.PublicKey) (*LightningNode, node = &n return nil - }) + } + + var err error + if tx == nil { + err = kvdb.View(c.db, fetchNode) + } else { + err = fetchNode(tx) + } if err != nil { return nil, err } @@ -2229,10 +2244,10 @@ func (c *ChannelGraph) HasLightningNode(nodePub [33]byte) (time.Time, bool, erro exists bool ) - err := c.db.View(func(tx *bbolt.Tx) error { + err := kvdb.View(c.db, func(tx kvdb.ReadTx) error { // First grab the nodes bucket which stores the mapping from // pubKey to node information. - nodes := tx.Bucket(nodeBucket) + nodes := tx.ReadBucket(nodeBucket) if nodes == nil { return ErrGraphNotFound } @@ -2267,19 +2282,19 @@ func (c *ChannelGraph) HasLightningNode(nodePub [33]byte) (time.Time, bool, erro // nodeTraversal is used to traverse all channels of a node given by its // public key and passes channel information into the specified callback. -func nodeTraversal(tx *bbolt.Tx, nodePub []byte, db *DB, - cb func(*bbolt.Tx, *ChannelEdgeInfo, *ChannelEdgePolicy, *ChannelEdgePolicy) error) error { +func nodeTraversal(tx kvdb.ReadTx, nodePub []byte, db *DB, + cb func(kvdb.ReadTx, *ChannelEdgeInfo, *ChannelEdgePolicy, *ChannelEdgePolicy) error) error { - traversal := func(tx *bbolt.Tx) error { - nodes := tx.Bucket(nodeBucket) + traversal := func(tx kvdb.ReadTx) error { + nodes := tx.ReadBucket(nodeBucket) if nodes == nil { return ErrGraphNotFound } - edges := tx.Bucket(edgeBucket) + edges := tx.ReadBucket(edgeBucket) if edges == nil { return ErrGraphNotFound } - edgeIndex := edges.Bucket(edgeIndexBucket) + edgeIndex := edges.NestedReadBucket(edgeIndexBucket) if edgeIndex == nil { return ErrGraphNoEdgesFound } @@ -2299,7 +2314,7 @@ func nodeTraversal(tx *bbolt.Tx, nodePub []byte, db *DB, // bucket until the retrieved key no longer has the public key // as its prefix. This indicates that we've stepped over into // another node's edges, so we can terminate our scan. - edgeCursor := edges.Cursor() + edgeCursor := edges.ReadCursor() for nodeEdge, _ := edgeCursor.Seek(nodeStart[:]); bytes.HasPrefix(nodeEdge, nodePub); nodeEdge, _ = edgeCursor.Next() { // If the prefix still matches, the channel id is // returned in nodeEdge. Channel id is used to lookup @@ -2344,7 +2359,7 @@ func nodeTraversal(tx *bbolt.Tx, nodePub []byte, db *DB, // If no transaction was provided, then we'll create a new transaction // to execute the transaction within. if tx == nil { - return db.View(traversal) + return kvdb.View(db, traversal) } // Otherwise, we re-use the existing transaction to execute the graph @@ -2365,8 +2380,8 @@ func nodeTraversal(tx *bbolt.Tx, nodePub []byte, db *DB, // should be passed as the first argument. Otherwise the first argument should // be nil and a fresh transaction will be created to execute the graph // traversal. -func (l *LightningNode) ForEachChannel(tx *bbolt.Tx, - cb func(*bbolt.Tx, *ChannelEdgeInfo, *ChannelEdgePolicy, *ChannelEdgePolicy) error) error { +func (l *LightningNode) ForEachChannel(tx kvdb.ReadTx, + cb func(kvdb.ReadTx, *ChannelEdgeInfo, *ChannelEdgePolicy, *ChannelEdgePolicy) error) error { nodePub := l.PubKeyBytes[:] db := l.db @@ -2557,7 +2572,7 @@ func (c *ChannelEdgeInfo) OtherNodeKeyBytes(thisNodeKey []byte) ( // the target node in the channel. This is useful when one knows the pubkey of // one of the nodes, and wishes to obtain the full LightningNode for the other // end of the channel. -func (c *ChannelEdgeInfo) FetchOtherNode(tx *bbolt.Tx, thisNodeKey []byte) (*LightningNode, error) { +func (c *ChannelEdgeInfo) FetchOtherNode(tx kvdb.ReadTx, thisNodeKey []byte) (*LightningNode, error) { // Ensure that the node passed in is actually a member of the channel. var targetNodeBytes [33]byte @@ -2571,10 +2586,10 @@ func (c *ChannelEdgeInfo) FetchOtherNode(tx *bbolt.Tx, thisNodeKey []byte) (*Lig } var targetNode *LightningNode - fetchNodeFunc := func(tx *bbolt.Tx) error { + fetchNodeFunc := func(tx kvdb.ReadTx) error { // First grab the nodes bucket which stores the mapping from // pubKey to node information. - nodes := tx.Bucket(nodeBucket) + nodes := tx.ReadBucket(nodeBucket) if nodes == nil { return ErrGraphNotFound } @@ -2594,7 +2609,7 @@ func (c *ChannelEdgeInfo) FetchOtherNode(tx *bbolt.Tx, thisNodeKey []byte) (*Lig // otherwise we can use the existing db transaction. var err error if tx == nil { - err = c.db.View(fetchNodeFunc) + err = kvdb.View(c.db, fetchNodeFunc) } else { err = fetchNodeFunc(tx) } @@ -2767,11 +2782,11 @@ type ChannelEdgePolicy struct { // the node would like to HTLC exchanges. TimeLockDelta uint16 - // MinHTLC is the smallest value HTLC this node will accept, expressed + // MinHTLC is the smallest value HTLC this node will forward, expressed // in millisatoshi. MinHTLC lnwire.MilliSatoshi - // MaxHTLC is the largest value HTLC this node will accept, expressed + // MaxHTLC is the largest value HTLC this node will forward, expressed // in millisatoshi. MaxHTLC lnwire.MilliSatoshi @@ -2870,10 +2885,10 @@ func (c *ChannelGraph) FetchChannelEdgesByOutpoint(op *wire.OutPoint, policy2 *ChannelEdgePolicy ) - err := c.db.View(func(tx *bbolt.Tx) error { + err := kvdb.View(c.db, func(tx kvdb.ReadTx) error { // First, grab the node bucket. This will be used to populate // the Node pointers in each edge read from disk. - nodes := tx.Bucket(nodeBucket) + nodes := tx.ReadBucket(nodeBucket) if nodes == nil { return ErrGraphNotFound } @@ -2881,18 +2896,18 @@ func (c *ChannelGraph) FetchChannelEdgesByOutpoint(op *wire.OutPoint, // Next, grab the edge bucket which stores the edges, and also // the index itself so we can group the directed edges together // logically. - edges := tx.Bucket(edgeBucket) + edges := tx.ReadBucket(edgeBucket) if edges == nil { return ErrGraphNoEdgesFound } - edgeIndex := edges.Bucket(edgeIndexBucket) + edgeIndex := edges.NestedReadBucket(edgeIndexBucket) if edgeIndex == nil { return ErrGraphNoEdgesFound } // If the channel's outpoint doesn't exist within the outpoint // index, then the edge does not exist. - chanIndex := edges.Bucket(channelPointBucket) + chanIndex := edges.NestedReadBucket(channelPointBucket) if chanIndex == nil { return ErrGraphNoEdgesFound } @@ -2954,10 +2969,10 @@ func (c *ChannelGraph) FetchChannelEdgesByID(chanID uint64, channelID [8]byte ) - err := c.db.View(func(tx *bbolt.Tx) error { + err := kvdb.View(c.db, func(tx kvdb.ReadTx) error { // First, grab the node bucket. This will be used to populate // the Node pointers in each edge read from disk. - nodes := tx.Bucket(nodeBucket) + nodes := tx.ReadBucket(nodeBucket) if nodes == nil { return ErrGraphNotFound } @@ -2965,11 +2980,11 @@ func (c *ChannelGraph) FetchChannelEdgesByID(chanID uint64, // Next, grab the edge bucket which stores the edges, and also // the index itself so we can group the directed edges together // logically. - edges := tx.Bucket(edgeBucket) + edges := tx.ReadBucket(edgeBucket) if edges == nil { return ErrGraphNoEdgesFound } - edgeIndex := edges.Bucket(edgeIndexBucket) + edgeIndex := edges.NestedReadBucket(edgeIndexBucket) if edgeIndex == nil { return ErrGraphNoEdgesFound } @@ -2985,7 +3000,7 @@ func (c *ChannelGraph) FetchChannelEdgesByID(chanID uint64, // If the zombie index doesn't exist, or the edge is not // marked as a zombie within it, then we'll return the // original ErrEdgeNotFound error. - zombieIndex := edges.Bucket(zombieBucket) + zombieIndex := edges.NestedReadBucket(zombieBucket) if zombieIndex == nil { return ErrEdgeNotFound } @@ -3044,8 +3059,8 @@ func (c *ChannelGraph) FetchChannelEdgesByID(chanID uint64, // source node's point of view. func (c *ChannelGraph) IsPublicNode(pubKey [33]byte) (bool, error) { var nodeIsPublic bool - err := c.db.View(func(tx *bbolt.Tx) error { - nodes := tx.Bucket(nodeBucket) + err := kvdb.View(c.db, func(tx kvdb.ReadTx) error { + nodes := tx.ReadBucket(nodeBucket) if nodes == nil { return ErrGraphNodesNotFound } @@ -3130,19 +3145,19 @@ func (e *EdgePoint) String() string { // closes on the resident blockchain. func (c *ChannelGraph) ChannelView() ([]EdgePoint, error) { var edgePoints []EdgePoint - if err := c.db.View(func(tx *bbolt.Tx) error { + if err := kvdb.View(c.db, func(tx kvdb.ReadTx) error { // We're going to iterate over the entire channel index, so // we'll need to fetch the edgeBucket to get to the index as // it's a sub-bucket. - edges := tx.Bucket(edgeBucket) + edges := tx.ReadBucket(edgeBucket) if edges == nil { return ErrGraphNoEdgesFound } - chanIndex := edges.Bucket(channelPointBucket) + chanIndex := edges.NestedReadBucket(channelPointBucket) if chanIndex == nil { return ErrGraphNoEdgesFound } - edgeIndex := edges.Bucket(edgeIndexBucket) + edgeIndex := edges.NestedReadBucket(edgeIndexBucket) if edgeIndex == nil { return ErrGraphNoEdgesFound } @@ -3196,7 +3211,7 @@ func (c *ChannelGraph) NewChannelEdgePolicy() *ChannelEdgePolicy { // markEdgeZombie marks an edge as a zombie within our zombie index. The public // keys should represent the node public keys of the two parties involved in the // edge. -func markEdgeZombie(zombieIndex *bbolt.Bucket, chanID uint64, pubKey1, +func markEdgeZombie(zombieIndex kvdb.RwBucket, chanID uint64, pubKey1, pubKey2 [33]byte) error { var k [8]byte @@ -3214,12 +3229,12 @@ func (c *ChannelGraph) MarkEdgeLive(chanID uint64) error { c.cacheMu.Lock() defer c.cacheMu.Unlock() - err := c.db.Update(func(tx *bbolt.Tx) error { - edges := tx.Bucket(edgeBucket) + err := kvdb.Update(c.db, func(tx kvdb.RwTx) error { + edges := tx.ReadWriteBucket(edgeBucket) if edges == nil { return ErrGraphNoEdgesFound } - zombieIndex := edges.Bucket(zombieBucket) + zombieIndex := edges.NestedReadWriteBucket(zombieBucket) if zombieIndex == nil { return nil } @@ -3247,12 +3262,12 @@ func (c *ChannelGraph) IsZombieEdge(chanID uint64) (bool, [33]byte, [33]byte) { pubKey1, pubKey2 [33]byte ) - err := c.db.View(func(tx *bbolt.Tx) error { - edges := tx.Bucket(edgeBucket) + err := kvdb.View(c.db, func(tx kvdb.ReadTx) error { + edges := tx.ReadBucket(edgeBucket) if edges == nil { return ErrGraphNoEdgesFound } - zombieIndex := edges.Bucket(zombieBucket) + zombieIndex := edges.NestedReadBucket(zombieBucket) if zombieIndex == nil { return nil } @@ -3270,7 +3285,7 @@ func (c *ChannelGraph) IsZombieEdge(chanID uint64) (bool, [33]byte, [33]byte) { // isZombieEdge returns whether an entry exists for the given channel in the // zombie index. If an entry exists, then the two node public keys corresponding // to this edge are also returned. -func isZombieEdge(zombieIndex *bbolt.Bucket, +func isZombieEdge(zombieIndex kvdb.ReadBucket, chanID uint64) (bool, [33]byte, [33]byte) { var k [8]byte @@ -3291,12 +3306,12 @@ func isZombieEdge(zombieIndex *bbolt.Bucket, // NumZombies returns the current number of zombie channels in the graph. func (c *ChannelGraph) NumZombies() (uint64, error) { var numZombies uint64 - err := c.db.View(func(tx *bbolt.Tx) error { - edges := tx.Bucket(edgeBucket) + err := kvdb.View(c.db, func(tx kvdb.ReadTx) error { + edges := tx.ReadBucket(edgeBucket) if edges == nil { return nil } - zombieIndex := edges.Bucket(zombieBucket) + zombieIndex := edges.NestedReadBucket(zombieBucket) if zombieIndex == nil { return nil } @@ -3313,8 +3328,8 @@ func (c *ChannelGraph) NumZombies() (uint64, error) { return numZombies, nil } -func putLightningNode(nodeBucket *bbolt.Bucket, aliasBucket *bbolt.Bucket, - updateIndex *bbolt.Bucket, node *LightningNode) error { +func putLightningNode(nodeBucket kvdb.RwBucket, aliasBucket kvdb.RwBucket, // nolint:dupl + updateIndex kvdb.RwBucket, node *LightningNode) error { var ( scratch [16]byte @@ -3442,7 +3457,7 @@ func putLightningNode(nodeBucket *bbolt.Bucket, aliasBucket *bbolt.Bucket, return nodeBucket.Put(nodePub, b.Bytes()) } -func fetchLightningNode(nodeBucket *bbolt.Bucket, +func fetchLightningNode(nodeBucket kvdb.ReadBucket, nodePub []byte) (LightningNode, error) { nodeBytes := nodeBucket.Get(nodePub) @@ -3461,6 +3476,10 @@ func deserializeLightningNode(r io.Reader) (LightningNode, error) { err error ) + // Always populate a feature vector, even if we don't have a node + // announcement and short circuit below. + node.Features = lnwire.EmptyFeatureVector() + if _, err := r.Read(scratch[:]); err != nil { return LightningNode{}, err } @@ -3506,12 +3525,10 @@ func deserializeLightningNode(r io.Reader) (LightningNode, error) { return LightningNode{}, err } - fv := lnwire.NewFeatureVector(nil, lnwire.GlobalFeatures) - err = fv.Decode(r) + err = node.Features.Decode(r) if err != nil { return LightningNode{}, err } - node.Features = fv if _, err := r.Read(scratch[:2]); err != nil { return LightningNode{}, err @@ -3548,7 +3565,7 @@ func deserializeLightningNode(r io.Reader) (LightningNode, error) { return node, nil } -func putChanEdgeInfo(edgeIndex *bbolt.Bucket, edgeInfo *ChannelEdgeInfo, chanID [8]byte) error { +func putChanEdgeInfo(edgeIndex kvdb.RwBucket, edgeInfo *ChannelEdgeInfo, chanID [8]byte) error { var b bytes.Buffer if _, err := b.Write(edgeInfo.NodeKey1Bytes[:]); err != nil { @@ -3614,7 +3631,7 @@ func putChanEdgeInfo(edgeIndex *bbolt.Bucket, edgeInfo *ChannelEdgeInfo, chanID return edgeIndex.Put(chanID[:], b.Bytes()) } -func fetchChanEdgeInfo(edgeIndex *bbolt.Bucket, +func fetchChanEdgeInfo(edgeIndex kvdb.ReadBucket, chanID []byte) (ChannelEdgeInfo, error) { edgeInfoBytes := edgeIndex.Get(chanID) @@ -3703,7 +3720,7 @@ func deserializeChanEdgeInfo(r io.Reader) (ChannelEdgeInfo, error) { return edgeInfo, nil } -func putChanEdgePolicy(edges, nodes *bbolt.Bucket, edge *ChannelEdgePolicy, +func putChanEdgePolicy(edges, nodes kvdb.RwBucket, edge *ChannelEdgePolicy, from, to []byte) error { var edgeKey [33 + 8]byte @@ -3783,7 +3800,7 @@ func putChanEdgePolicy(edges, nodes *bbolt.Bucket, edge *ChannelEdgePolicy, // in this bucket. // Maintaining the bucket this way allows a fast retrieval of disabled // channels, for example when prune is needed. -func updateEdgePolicyDisabledIndex(edges *bbolt.Bucket, chanID uint64, +func updateEdgePolicyDisabledIndex(edges kvdb.RwBucket, chanID uint64, direction bool, disabled bool) error { var disabledEdgeKey [8 + 1]byte @@ -3808,7 +3825,7 @@ func updateEdgePolicyDisabledIndex(edges *bbolt.Bucket, chanID uint64, // putChanEdgePolicyUnknown marks the edge policy as unknown // in the edges bucket. -func putChanEdgePolicyUnknown(edges *bbolt.Bucket, channelID uint64, +func putChanEdgePolicyUnknown(edges kvdb.RwBucket, channelID uint64, from []byte) error { var edgeKey [33 + 8]byte @@ -3823,8 +3840,8 @@ func putChanEdgePolicyUnknown(edges *bbolt.Bucket, channelID uint64, return edges.Put(edgeKey[:], unknownPolicy) } -func fetchChanEdgePolicy(edges *bbolt.Bucket, chanID []byte, - nodePub []byte, nodes *bbolt.Bucket) (*ChannelEdgePolicy, error) { +func fetchChanEdgePolicy(edges kvdb.ReadBucket, chanID []byte, + nodePub []byte, nodes kvdb.ReadBucket) (*ChannelEdgePolicy, error) { var edgeKey [33 + 8]byte copy(edgeKey[:], nodePub) @@ -3856,8 +3873,8 @@ func fetchChanEdgePolicy(edges *bbolt.Bucket, chanID []byte, return ep, nil } -func fetchChanEdgePolicies(edgeIndex *bbolt.Bucket, edges *bbolt.Bucket, - nodes *bbolt.Bucket, chanID []byte, +func fetchChanEdgePolicies(edgeIndex kvdb.ReadBucket, edges kvdb.ReadBucket, + nodes kvdb.ReadBucket, chanID []byte, db *DB) (*ChannelEdgePolicy, *ChannelEdgePolicy, error) { edgeInfo := edgeIndex.Get(chanID) @@ -3965,7 +3982,7 @@ func serializeChanEdgePolicy(w io.Writer, edge *ChannelEdgePolicy, } func deserializeChanEdgePolicy(r io.Reader, - nodes *bbolt.Bucket) (*ChannelEdgePolicy, error) { + nodes kvdb.ReadBucket) (*ChannelEdgePolicy, error) { edge := &ChannelEdgePolicy{} diff --git a/channeldb/graph_test.go b/channeldb/graph_test.go index de8774a98f..b1355bafc0 100644 --- a/channeldb/graph_test.go +++ b/channeldb/graph_test.go @@ -17,9 +17,10 @@ import ( "github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" - "github.com/coreos/bbolt" "github.com/davecgh/go-spew/spew" + "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/routing/route" ) var ( @@ -36,7 +37,9 @@ var ( _, _ = testSig.R.SetString("63724406601629180062774974542967536251589935445068131219452686511677818569431", 10) _, _ = testSig.S.SetString("18801056069249825825291287104931333862866033135609736119018462340006816851118", 10) - testFeatures = lnwire.NewFeatureVector(nil, lnwire.GlobalFeatures) + testFeatures = lnwire.NewFeatureVector(nil, lnwire.Features) + + testPub = route.Vertex{2, 202, 4} ) func createLightningNode(db *DB, priv *btcec.PrivateKey) (*LightningNode, error) { @@ -80,7 +83,6 @@ func TestNodeInsertionAndDeletion(t *testing.T) { // We'd like to test basic insertion/deletion for vertexes from the // graph, so we'll create a test vertex to start with. - _, testPub := btcec.PrivKeyFromBytes(btcec.S256(), key[:]) node := &LightningNode{ HaveNodeAnnouncement: true, AuthSigBytes: testSig.Serialize(), @@ -90,9 +92,9 @@ func TestNodeInsertionAndDeletion(t *testing.T) { Features: testFeatures, Addresses: testAddrs, ExtraOpaqueData: []byte("extra new data"), + PubKeyBytes: testPub, db: db, } - copy(node.PubKeyBytes[:], testPub.SerializeCompressed()) // First, insert the node into the graph DB. This should succeed // without any errors. @@ -102,7 +104,7 @@ func TestNodeInsertionAndDeletion(t *testing.T) { // Next, fetch the node from the database to ensure everything was // serialized properly. - dbNode, err := graph.FetchLightningNode(testPub) + dbNode, err := graph.FetchLightningNode(nil, testPub) if err != nil { t.Fatalf("unable to locate node: %v", err) } @@ -126,7 +128,7 @@ func TestNodeInsertionAndDeletion(t *testing.T) { // Finally, attempt to fetch the node again. This should fail as the // node should have been deleted from the database. - _, err = graph.FetchLightningNode(testPub) + _, err = graph.FetchLightningNode(nil, testPub) if err != ErrGraphNodeNotFound { t.Fatalf("fetch after delete should fail!") } @@ -147,11 +149,10 @@ func TestPartialNode(t *testing.T) { // We want to be able to insert nodes into the graph that only has the // PubKey set. - _, testPub := btcec.PrivKeyFromBytes(btcec.S256(), key[:]) node := &LightningNode{ HaveNodeAnnouncement: false, + PubKeyBytes: testPub, } - copy(node.PubKeyBytes[:], testPub.SerializeCompressed()) if err := graph.AddLightningNode(node); err != nil { t.Fatalf("unable to add node: %v", err) @@ -159,7 +160,7 @@ func TestPartialNode(t *testing.T) { // Next, fetch the node from the database to ensure everything was // serialized properly. - dbNode, err := graph.FetchLightningNode(testPub) + dbNode, err := graph.FetchLightningNode(nil, testPub) if err != nil { t.Fatalf("unable to locate node: %v", err) } @@ -175,9 +176,9 @@ func TestPartialNode(t *testing.T) { node = &LightningNode{ HaveNodeAnnouncement: false, LastUpdate: time.Unix(0, 0), + PubKeyBytes: testPub, db: db, } - copy(node.PubKeyBytes[:], testPub.SerializeCompressed()) if err := compareNodes(node, dbNode); err != nil { t.Fatalf("nodes don't match: %v", err) @@ -191,7 +192,7 @@ func TestPartialNode(t *testing.T) { // Finally, attempt to fetch the node again. This should fail as the // node should have been deleted from the database. - _, err = graph.FetchLightningNode(testPub) + _, err = graph.FetchLightningNode(nil, testPub) if err != ErrGraphNodeNotFound { t.Fatalf("fetch after delete should fail!") } @@ -881,7 +882,7 @@ func TestGraphTraversal(t *testing.T) { // Iterate over each node as returned by the graph, if all nodes are // reached, then the map created above should be empty. - err = graph.ForEachNode(nil, func(_ *bbolt.Tx, node *LightningNode) error { + err = graph.ForEachNode(nil, func(_ kvdb.ReadTx, node *LightningNode) error { delete(nodeIndex, node.Alias) return nil }) @@ -977,7 +978,7 @@ func TestGraphTraversal(t *testing.T) { // Finally, we want to test the ability to iterate over all the // outgoing channels for a particular node. numNodeChans := 0 - err = firstNode.ForEachChannel(nil, func(_ *bbolt.Tx, _ *ChannelEdgeInfo, + err = firstNode.ForEachChannel(nil, func(_ kvdb.ReadTx, _ *ChannelEdgeInfo, outEdge, inEdge *ChannelEdgePolicy) error { // All channels between first and second node should have fully @@ -1050,7 +1051,7 @@ func assertNumChans(t *testing.T, graph *ChannelGraph, n int) { func assertNumNodes(t *testing.T, graph *ChannelGraph, n int) { numNodes := 0 - err := graph.ForEachNode(nil, func(_ *bbolt.Tx, _ *LightningNode) error { + err := graph.ForEachNode(nil, func(_ kvdb.ReadTx, _ *LightningNode) error { numNodes++ return nil }) @@ -2096,10 +2097,9 @@ func TestIncompleteChannelPolicies(t *testing.T) { } // Ensure that channel is reported with unknown policies. - checkPolicies := func(node *LightningNode, expectedIn, expectedOut bool) { calls := 0 - node.ForEachChannel(nil, func(_ *bbolt.Tx, _ *ChannelEdgeInfo, + err := node.ForEachChannel(nil, func(_ kvdb.ReadTx, _ *ChannelEdgeInfo, outEdge, inEdge *ChannelEdgePolicy) error { if !expectedOut && outEdge != nil { @@ -2122,6 +2122,9 @@ func TestIncompleteChannelPolicies(t *testing.T) { return nil }) + if err != nil { + t.Fatalf("unable to scan channels: %v", err) + } if calls != 1 { t.Fatalf("Expected only one callback call") @@ -2232,17 +2235,27 @@ func TestChannelEdgePruningUpdateIndexDeletion(t *testing.T) { timestampSet[t] = struct{}{} } - err := db.View(func(tx *bbolt.Tx) error { - edges := tx.Bucket(edgeBucket) + err := kvdb.View(db, func(tx kvdb.ReadTx) error { + edges := tx.ReadBucket(edgeBucket) if edges == nil { return ErrGraphNoEdgesFound } - edgeUpdateIndex := edges.Bucket(edgeUpdateIndexBucket) + edgeUpdateIndex := edges.NestedReadBucket( + edgeUpdateIndexBucket, + ) if edgeUpdateIndex == nil { return ErrGraphNoEdgesFound } - numEntries := edgeUpdateIndex.Stats().KeyN + var numEntries int + err := edgeUpdateIndex.ForEach(func(k, v []byte) error { + numEntries++ + return nil + }) + if err != nil { + return err + } + expectedEntries := len(timestampSet) if numEntries != expectedEntries { return fmt.Errorf("expected %v entries in the "+ @@ -2386,11 +2399,8 @@ func TestPruneGraphNodes(t *testing.T) { // Finally, we'll ensure that node3, the only fully unconnected node as // properly deleted from the graph and not another node in its place. - node3Pub, err := node3.PubKey() - if err != nil { - t.Fatalf("unable to fetch the pubkey of node3: %v", err) - } - if _, err := graph.FetchLightningNode(node3Pub); err == nil { + _, err = graph.FetchLightningNode(nil, node3.PubKeyBytes) + if err == nil { t.Fatalf("node 3 should have been deleted!") } } @@ -2430,18 +2440,9 @@ func TestAddChannelEdgeShellNodes(t *testing.T) { t.Fatalf("unable to add edge: %v", err) } - node1Pub, err := node1.PubKey() - if err != nil { - t.Fatalf("unable to parse node 1 pub: %v", err) - } - node2Pub, err := node2.PubKey() - if err != nil { - t.Fatalf("unable to parse node 2 pub: %v", err) - } - // Ensure that node1 was inserted as a full node, while node2 only has // a shell node present. - node1, err = graph.FetchLightningNode(node1Pub) + node1, err = graph.FetchLightningNode(nil, node1.PubKeyBytes) if err != nil { t.Fatalf("unable to fetch node1: %v", err) } @@ -2449,7 +2450,7 @@ func TestAddChannelEdgeShellNodes(t *testing.T) { t.Fatalf("have shell announcement for node1, shouldn't") } - node2, err = graph.FetchLightningNode(node2Pub) + node2, err = graph.FetchLightningNode(nil, node2.PubKeyBytes) if err != nil { t.Fatalf("unable to fetch node2: %v", err) } @@ -2504,8 +2505,7 @@ func TestNodePruningUpdateIndexDeletion(t *testing.T) { // We'll now delete the node from the graph, this should result in it // being removed from the update index as well. - nodePub, _ := node1.PubKey() - if err := graph.DeleteLightningNode(nodePub); err != nil { + if err := graph.DeleteLightningNode(node1.PubKeyBytes); err != nil { t.Fatalf("unable to delete node: %v", err) } @@ -2844,8 +2844,8 @@ func TestEdgePolicyMissingMaxHtcl(t *testing.T) { // Attempting to deserialize these bytes should return an error. r := bytes.NewReader(stripped) - err = db.View(func(tx *bbolt.Tx) error { - nodes := tx.Bucket(nodeBucket) + err = kvdb.View(db, func(tx kvdb.ReadTx) error { + nodes := tx.ReadBucket(nodeBucket) if nodes == nil { return ErrGraphNotFound } @@ -2864,13 +2864,13 @@ func TestEdgePolicyMissingMaxHtcl(t *testing.T) { } // Put the stripped bytes in the DB. - err = db.Update(func(tx *bbolt.Tx) error { - edges := tx.Bucket(edgeBucket) + err = kvdb.Update(db, func(tx kvdb.RwTx) error { + edges := tx.ReadWriteBucket(edgeBucket) if edges == nil { return ErrEdgeNotFound } - edgeIndex := edges.Bucket(edgeIndexBucket) + edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket) if edgeIndex == nil { return ErrEdgeNotFound } diff --git a/channeldb/invoice_test.go b/channeldb/invoice_test.go index 4b5dda87eb..2680789445 100644 --- a/channeldb/invoice_test.go +++ b/channeldb/invoice_test.go @@ -2,12 +2,20 @@ package channeldb import ( "crypto/rand" + "math" "reflect" "testing" "time" "github.com/davecgh/go-spew/spew" + "github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/record" +) + +var ( + emptyFeatures = lnwire.NewFeatureVector(nil, lnwire.Features) + testNow = time.Unix(1, 0) ) func randInvoice(value lnwire.MilliSatoshi) (*Invoice, error) { @@ -17,18 +25,16 @@ func randInvoice(value lnwire.MilliSatoshi) (*Invoice, error) { } i := &Invoice{ - // Use single second precision to avoid false positive test - // failures due to the monotonic time component. - CreationDate: time.Unix(time.Now().Unix(), 0), + CreationDate: testNow, Terms: ContractTerm{ + Expiry: 4000, PaymentPreimage: pre, Value: value, + Features: emptyFeatures, }, - Htlcs: map[CircuitKey]*InvoiceHTLC{}, - Expiry: 4000, + Htlcs: map[CircuitKey]*InvoiceHTLC{}, } i.Memo = []byte("memo") - i.Receipt = []byte("receipt") // Create a random byte slice of MaxPaymentRequestSize bytes to be used // as a dummy paymentrequest, and determine if it should be set based @@ -46,6 +52,44 @@ func randInvoice(value lnwire.MilliSatoshi) (*Invoice, error) { return i, nil } +// settleTestInvoice settles a test invoice. +func settleTestInvoice(invoice *Invoice, settleIndex uint64) { + invoice.SettleDate = testNow + invoice.AmtPaid = invoice.Terms.Value + invoice.State = ContractSettled + invoice.Htlcs[CircuitKey{}] = &InvoiceHTLC{ + Amt: invoice.Terms.Value, + AcceptTime: testNow, + ResolveTime: testNow, + State: HtlcStateSettled, + CustomRecords: make(record.CustomSet), + } + invoice.SettleIndex = settleIndex +} + +// Tests that pending invoices are those which are either in ContractOpen or +// in ContractAccepted state. +func TestInvoiceIsPending(t *testing.T) { + contractStates := []ContractState{ + ContractOpen, ContractSettled, ContractCanceled, ContractAccepted, + } + + for _, state := range contractStates { + invoice := Invoice{ + State: state, + } + + // We expect that an invoice is pending if it's either in ContractOpen + // or ContractAccepted state. + pending := (state == ContractOpen || state == ContractAccepted) + + if invoice.IsPending() != pending { + t.Fatalf("expected pending: %v, got: %v, invoice: %v", + pending, invoice.IsPending(), invoice) + } + } +} + func TestInvoiceWorkflow(t *testing.T) { t.Parallel() @@ -58,16 +102,14 @@ func TestInvoiceWorkflow(t *testing.T) { // Create a fake invoice which we'll use several times in the tests // below. fakeInvoice := &Invoice{ - // Use single second precision to avoid false positive test - // failures due to the monotonic time component. - CreationDate: time.Unix(time.Now().Unix(), 0), + CreationDate: testNow, Htlcs: map[CircuitKey]*InvoiceHTLC{}, } fakeInvoice.Memo = []byte("memo") - fakeInvoice.Receipt = []byte("receipt") fakeInvoice.PaymentRequest = []byte("") copy(fakeInvoice.Terms.PaymentPreimage[:], rev[:]) fakeInvoice.Terms.Value = lnwire.NewMSatFromSatoshis(10000) + fakeInvoice.Terms.Features = emptyFeatures paymentHash := fakeInvoice.Terms.PaymentPreimage.Hash() @@ -110,7 +152,7 @@ func TestInvoiceWorkflow(t *testing.T) { if err != nil { t.Fatalf("unable to fetch invoice: %v", err) } - if dbInvoice2.Terms.State != ContractSettled { + if dbInvoice2.State != ContractSettled { t.Fatalf("invoice should now be settled but isn't") } if dbInvoice2.SettleDate.IsZero() { @@ -147,7 +189,7 @@ func TestInvoiceWorkflow(t *testing.T) { amt := lnwire.NewMSatFromSatoshis(1000) invoices := make([]*Invoice, numInvoices+1) invoices[0] = &dbInvoice2 - for i := 1; i < len(invoices)-1; i++ { + for i := 1; i < len(invoices); i++ { invoice, err := randInvoice(amt) if err != nil { t.Fatalf("unable to create invoice: %v", err) @@ -162,24 +204,95 @@ func TestInvoiceWorkflow(t *testing.T) { } // Perform a scan to collect all the active invoices. - dbInvoices, err := db.FetchAllInvoices(false) + query := InvoiceQuery{ + IndexOffset: 0, + NumMaxInvoices: math.MaxUint64, + PendingOnly: false, + } + + response, err := db.QueryInvoices(query) if err != nil { - t.Fatalf("unable to fetch all invoices: %v", err) + t.Fatalf("invoice query failed: %v", err) } // The retrieve list of invoices should be identical as since we're // using big endian, the invoices should be retrieved in ascending // order (and the primary key should be incremented with each // insertion). - for i := 0; i < len(invoices)-1; i++ { - if !reflect.DeepEqual(*invoices[i], dbInvoices[i]) { + for i := 0; i < len(invoices); i++ { + if !reflect.DeepEqual(*invoices[i], response.Invoices[i]) { t.Fatalf("retrieved invoices don't match %v vs %v", spew.Sdump(invoices[i]), - spew.Sdump(dbInvoices[i])) + spew.Sdump(response.Invoices[i])) } } } +// TestInvoiceCancelSingleHtlc tests that a single htlc can be canceled on the +// invoice. +func TestInvoiceCancelSingleHtlc(t *testing.T) { + t.Parallel() + + db, cleanUp, err := makeTestDB() + defer cleanUp() + if err != nil { + t.Fatalf("unable to make test db: %v", err) + } + + testInvoice := &Invoice{ + Htlcs: map[CircuitKey]*InvoiceHTLC{}, + } + testInvoice.Terms.Value = lnwire.NewMSatFromSatoshis(10000) + testInvoice.Terms.Features = emptyFeatures + + var paymentHash lntypes.Hash + if _, err := db.AddInvoice(testInvoice, paymentHash); err != nil { + t.Fatalf("unable to find invoice: %v", err) + } + + // Accept an htlc on this invoice. + key := CircuitKey{ChanID: lnwire.NewShortChanIDFromInt(1), HtlcID: 4} + htlc := HtlcAcceptDesc{ + Amt: 500, + CustomRecords: make(record.CustomSet), + } + invoice, err := db.UpdateInvoice(paymentHash, + func(invoice *Invoice) (*InvoiceUpdateDesc, error) { + return &InvoiceUpdateDesc{ + AddHtlcs: map[CircuitKey]*HtlcAcceptDesc{ + key: &htlc, + }, + }, nil + }) + if err != nil { + t.Fatalf("unable to add invoice htlc: %v", err) + } + if len(invoice.Htlcs) != 1 { + t.Fatalf("expected the htlc to be added") + } + if invoice.Htlcs[key].State != HtlcStateAccepted { + t.Fatalf("expected htlc in state accepted") + } + + // Cancel the htlc again. + invoice, err = db.UpdateInvoice(paymentHash, func(invoice *Invoice) (*InvoiceUpdateDesc, error) { + return &InvoiceUpdateDesc{ + CancelHtlcs: map[CircuitKey]struct{}{ + key: {}, + }, + }, nil + }) + if err != nil { + t.Fatalf("unable to cancel htlc: %v", err) + } + if len(invoice.Htlcs) != 1 { + t.Fatalf("expected the htlc to be present") + } + if invoice.Htlcs[key].State != HtlcStateCanceled { + t.Fatalf("expected htlc in state canceled") + } +} + // TestInvoiceTimeSeries tests that newly added invoices invoices, as well as // settled invoices are added to the database are properly placed in the add // add or settle index which serves as an event time series. @@ -259,6 +372,8 @@ func TestInvoiceAddTimeSeries(t *testing.T) { } } + var settledInvoices []Invoice + var settleIndex uint64 = 1 // We'll now only settle the latter half of each of those invoices. for i := 10; i < len(invoices); i++ { invoice := &invoices[i] @@ -266,21 +381,18 @@ func TestInvoiceAddTimeSeries(t *testing.T) { paymentHash := invoice.Terms.PaymentPreimage.Hash() _, err := db.UpdateInvoice( - paymentHash, getUpdateInvoice(0), + paymentHash, getUpdateInvoice(invoice.Terms.Value), ) if err != nil { t.Fatalf("unable to settle invoice: %v", err) } - } - invoices, err = db.FetchAllInvoices(false) - if err != nil { - t.Fatalf("unable to fetch invoices: %v", err) - } + // Create the settled invoice for the expectation set. + settleTestInvoice(invoice, settleIndex) + settleIndex++ - // We'll slice off the first 10 invoices, as we only settled the last - // 10. - invoices = invoices[10:] + settledInvoices = append(settledInvoices, *invoice) + } // We'll now prepare an additional set of queries to ensure the settle // time series has properly been maintained in the database. @@ -305,7 +417,7 @@ func TestInvoiceAddTimeSeries(t *testing.T) { // being returned, as we only settled those. { sinceSettleIndex: 1, - resp: invoices[1:], + resp: settledInvoices[1:], }, } @@ -322,6 +434,116 @@ func TestInvoiceAddTimeSeries(t *testing.T) { } } +// Tests that FetchAllInvoicesWithPaymentHash returns all invoices with their +// corresponding payment hashes. +func TestFetchAllInvoicesWithPaymentHash(t *testing.T) { + t.Parallel() + + db, cleanup, err := makeTestDB() + defer cleanup() + if err != nil { + t.Fatalf("unable to make test db: %v", err) + } + + // With an empty DB we expect to return no error and an empty list. + empty, err := db.FetchAllInvoicesWithPaymentHash(false) + if err != nil { + t.Fatalf("failed to call FetchAllInvoicesWithPaymentHash on empty DB: %v", + err) + } + + if len(empty) != 0 { + t.Fatalf("expected empty list as a result, got: %v", empty) + } + + states := []ContractState{ + ContractOpen, ContractSettled, ContractCanceled, ContractAccepted, + } + + numInvoices := len(states) * 2 + testPendingInvoices := make(map[lntypes.Hash]*Invoice) + testAllInvoices := make(map[lntypes.Hash]*Invoice) + + // Now populate the DB and check if we can get all invoices with their + // payment hashes as expected. + for i := 1; i <= numInvoices; i++ { + invoice, err := randInvoice(lnwire.MilliSatoshi(i)) + if err != nil { + t.Fatalf("unable to create invoice: %v", err) + } + + // Set the contract state of the next invoice such that there's an equal + // number for all possbile states. + invoice.State = states[i%len(states)] + paymentHash := invoice.Terms.PaymentPreimage.Hash() + + if invoice.IsPending() { + testPendingInvoices[paymentHash] = invoice + } + + testAllInvoices[paymentHash] = invoice + + if _, err := db.AddInvoice(invoice, paymentHash); err != nil { + t.Fatalf("unable to add invoice: %v", err) + } + } + + pendingInvoices, err := db.FetchAllInvoicesWithPaymentHash(true) + if err != nil { + t.Fatalf("can't fetch invoices with payment hash: %v", err) + } + + if len(testPendingInvoices) != len(pendingInvoices) { + t.Fatalf("expected %v pending invoices, got: %v", + len(testPendingInvoices), len(pendingInvoices)) + } + + allInvoices, err := db.FetchAllInvoicesWithPaymentHash(false) + if err != nil { + t.Fatalf("can't fetch invoices with payment hash: %v", err) + } + + if len(testAllInvoices) != len(allInvoices) { + t.Fatalf("expected %v invoices, got: %v", + len(testAllInvoices), len(allInvoices)) + } + + for i := range pendingInvoices { + expected, ok := testPendingInvoices[pendingInvoices[i].PaymentHash] + if !ok { + t.Fatalf("coulnd't find invoice with hash: %v", + pendingInvoices[i].PaymentHash) + } + + // Zero out add index to not confuse DeepEqual. + pendingInvoices[i].Invoice.AddIndex = 0 + expected.AddIndex = 0 + + if !reflect.DeepEqual(*expected, pendingInvoices[i].Invoice) { + t.Fatalf("expected: %v, got: %v", + spew.Sdump(expected), spew.Sdump(pendingInvoices[i].Invoice)) + } + } + + for i := range allInvoices { + expected, ok := testAllInvoices[allInvoices[i].PaymentHash] + if !ok { + t.Fatalf("coulnd't find invoice with hash: %v", + allInvoices[i].PaymentHash) + } + + // Zero out add index to not confuse DeepEqual. + allInvoices[i].Invoice.AddIndex = 0 + expected.AddIndex = 0 + + if !reflect.DeepEqual(*expected, allInvoices[i].Invoice) { + t.Fatalf("expected: %v, got: %v", + spew.Sdump(expected), spew.Sdump(allInvoices[i].Invoice)) + } + } + +} + // TestDuplicateSettleInvoice tests that if we add a new invoice and settle it // twice, then the second time we also receive the invoice that we settled as a // return argument. @@ -333,7 +555,6 @@ func TestDuplicateSettleInvoice(t *testing.T) { if err != nil { t.Fatalf("unable to make test db: %v", err) } - db.now = func() time.Time { return time.Unix(1, 0) } // We'll start out by creating an invoice and writing it to the DB. amt := lnwire.NewMSatFromSatoshis(1000) @@ -359,15 +580,16 @@ func TestDuplicateSettleInvoice(t *testing.T) { // We'll update what we expect the settle invoice to be so that our // comparison below has the correct assumption. invoice.SettleIndex = 1 - invoice.Terms.State = ContractSettled + invoice.State = ContractSettled invoice.AmtPaid = amt invoice.SettleDate = dbInvoice.SettleDate invoice.Htlcs = map[CircuitKey]*InvoiceHTLC{ {}: { - Amt: amt, - AcceptTime: time.Unix(1, 0), - ResolveTime: time.Unix(1, 0), - State: HtlcStateSettled, + Amt: amt, + AcceptTime: time.Unix(1, 0), + ResolveTime: time.Unix(1, 0), + State: HtlcStateSettled, + CustomRecords: make(record.CustomSet), }, } @@ -412,8 +634,13 @@ func TestQueryInvoices(t *testing.T) { // assume that the index of the invoice within the database is the same // as the amount of the invoice itself. const numInvoices = 50 - for i := lnwire.MilliSatoshi(1); i <= numInvoices; i++ { - invoice, err := randInvoice(i) + var settleIndex uint64 = 1 + var invoices []Invoice + var pendingInvoices []Invoice + + for i := 1; i <= numInvoices; i++ { + amt := lnwire.MilliSatoshi(i) + invoice, err := randInvoice(amt) if err != nil { t.Fatalf("unable to create invoice: %v", err) } @@ -427,24 +654,20 @@ func TestQueryInvoices(t *testing.T) { // We'll only settle half of all invoices created. if i%2 == 0 { _, err := db.UpdateInvoice( - paymentHash, getUpdateInvoice(i), + paymentHash, getUpdateInvoice(amt), ) if err != nil { t.Fatalf("unable to settle invoice: %v", err) } + + // Create the settled invoice for the expectation set. + settleTestInvoice(invoice, settleIndex) + settleIndex++ + } else { + pendingInvoices = append(pendingInvoices, *invoice) } - } - // We'll then retrieve the set of all invoices and pending invoices. - // This will serve useful when comparing the expected responses of the - // query with the actual ones. - invoices, err := db.FetchAllInvoices(false) - if err != nil { - t.Fatalf("unable to retrieve invoices: %v", err) - } - pendingInvoices, err := db.FetchAllInvoices(true) - if err != nil { - t.Fatalf("unable to retrieve pending invoices: %v", err) + invoices = append(invoices, *invoice) } // The test will consist of several queries along with their respective @@ -675,16 +898,21 @@ func TestQueryInvoices(t *testing.T) { // settles the invoice with the given amount. func getUpdateInvoice(amt lnwire.MilliSatoshi) InvoiceUpdateCallback { return func(invoice *Invoice) (*InvoiceUpdateDesc, error) { - if invoice.Terms.State == ContractSettled { + if invoice.State == ContractSettled { return nil, ErrInvoiceAlreadySettled } + noRecords := make(record.CustomSet) + update := &InvoiceUpdateDesc{ - Preimage: invoice.Terms.PaymentPreimage, - State: ContractSettled, - Htlcs: map[CircuitKey]*HtlcAcceptDesc{ + State: &InvoiceStateUpdateDesc{ + Preimage: invoice.Terms.PaymentPreimage, + NewState: ContractSettled, + }, + AddHtlcs: map[CircuitKey]*HtlcAcceptDesc{ {}: { - Amt: amt, + Amt: amt, + CustomRecords: noRecords, }, }, } @@ -692,3 +920,64 @@ func getUpdateInvoice(amt lnwire.MilliSatoshi) InvoiceUpdateCallback { return update, nil } } + +// TestCustomRecords tests that custom records are properly recorded in the +// invoice database. +func TestCustomRecords(t *testing.T) { + t.Parallel() + + db, cleanUp, err := makeTestDB() + defer cleanUp() + if err != nil { + t.Fatalf("unable to make test db: %v", err) + } + + testInvoice := &Invoice{ + Htlcs: map[CircuitKey]*InvoiceHTLC{}, + } + testInvoice.Terms.Value = lnwire.NewMSatFromSatoshis(10000) + testInvoice.Terms.Features = emptyFeatures + + var paymentHash lntypes.Hash + if _, err := db.AddInvoice(testInvoice, paymentHash); err != nil { + t.Fatalf("unable to find invoice: %v", err) + } + + // Accept an htlc with custom records on this invoice. + key := CircuitKey{ChanID: lnwire.NewShortChanIDFromInt(1), HtlcID: 4} + + records := record.CustomSet{ + 100000: []byte{}, + 100001: []byte{1, 2}, + } + + _, err = db.UpdateInvoice(paymentHash, + func(invoice *Invoice) (*InvoiceUpdateDesc, error) { + return &InvoiceUpdateDesc{ + AddHtlcs: map[CircuitKey]*HtlcAcceptDesc{ + key: { + Amt: 500, + CustomRecords: records, + }, + }, + }, nil + }, + ) + if err != nil { + t.Fatalf("unable to add invoice htlc: %v", err) + } + + // Retrieve the invoice from that database and verify that the custom + // records are present. + dbInvoice, err := db.LookupInvoice(paymentHash) + if err != nil { + t.Fatalf("unable to lookup invoice: %v", err) + } + + if len(dbInvoice.Htlcs) != 1 { + t.Fatalf("expected the htlc to be added") + } + if !reflect.DeepEqual(records, dbInvoice.Htlcs[key].CustomRecords) { + t.Fatalf("invalid custom records") + } +} diff --git a/channeldb/invoices.go b/channeldb/invoices.go index 20f0c0a8b2..f1954bf3b4 100644 --- a/channeldb/invoices.go +++ b/channeldb/invoices.go @@ -8,10 +8,11 @@ import ( "io" "time" - "github.com/btcsuite/btcd/wire" - "github.com/coreos/bbolt" + "github.com/lightningnetwork/lnd/channeldb/kvdb" + "github.com/lightningnetwork/lnd/htlcswitch/hop" "github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/record" "github.com/lightningnetwork/lnd/tlv" ) @@ -77,6 +78,18 @@ var ( // ErrInvoiceStillOpen is returned when the invoice is still open. ErrInvoiceStillOpen = errors.New("invoice still open") + + // ErrInvoiceCannotOpen is returned when an attempt is made to move an + // invoice to the open state. + ErrInvoiceCannotOpen = errors.New("cannot move invoice to open") + + // ErrInvoiceCannotAccept is returned when an attempt is made to accept + // an invoice while the invoice is not in the open state. + ErrInvoiceCannotAccept = errors.New("cannot accept invoice") + + // ErrInvoicePreimageMismatch is returned when the preimage doesn't + // match the invoice hash. + ErrInvoicePreimageMismatch = errors.New("preimage does not match") ) const ( @@ -84,10 +97,6 @@ const ( // in the database. MaxMemoSize = 1024 - // MaxReceiptSize is the maximum size of the payment receipt stored - // within the database along side incoming/outgoing invoices. - MaxReceiptSize = 1024 - // MaxPaymentRequestSize is the max size of a payment request for // this invoice. // TODO(halseth): determine the max length payment request when field @@ -96,6 +105,11 @@ const ( // A set of tlv type definitions used to serialize invoice htlcs to the // database. + // + // NOTE: A migration should be added whenever this list changes. This + // prevents against the database being rolled back to an older + // format where the surrounding logic might assume a different set of + // fields are known. chanIDType tlv.Type = 1 htlcIDType tlv.Type = 3 amtType tlv.Type = 5 @@ -103,7 +117,29 @@ const ( acceptTimeType tlv.Type = 9 resolveTimeType tlv.Type = 11 expiryHeightType tlv.Type = 13 - stateType tlv.Type = 15 + htlcStateType tlv.Type = 15 + mppTotalAmtType tlv.Type = 17 + + // A set of tlv type definitions used to serialize invoice bodiees. + // + // NOTE: A migration should be added whenever this list changes. This + // prevents against the database being rolled back to an older + // format where the surrounding logic might assume a different set of + // fields are known. + memoType tlv.Type = 0 + payReqType tlv.Type = 1 + createTimeType tlv.Type = 2 + settleTimeType tlv.Type = 3 + addIndexType tlv.Type = 4 + settleIndexType tlv.Type = 5 + preimageType tlv.Type = 6 + valueType tlv.Type = 7 + cltvDeltaType tlv.Type = 8 + expiryType tlv.Type = 9 + paymentAddrType tlv.Type = 10 + featuresType tlv.Type = 11 + invStateType tlv.Type = 12 + amtPaidType tlv.Type = 13 ) // ContractState describes the state the invoice is in. @@ -113,15 +149,13 @@ const ( // ContractOpen means the invoice has only been created. ContractOpen ContractState = 0 - // ContractSettled means the htlc is settled and the invoice has been - // paid. + // ContractSettled means the htlc is settled and the invoice has been paid. ContractSettled ContractState = 1 // ContractCanceled means the invoice has been canceled. ContractCanceled ContractState = 2 - // ContractAccepted means the HTLC has been accepted but not settled - // yet. + // ContractAccepted means the HTLC has been accepted but not settled yet. ContractAccepted ContractState = 3 ) @@ -145,6 +179,13 @@ func (c ContractState) String() string { // the necessary conditions required before the invoice can be considered fully // settled by the payee. type ContractTerm struct { + // FinalCltvDelta is the minimum required number of blocks before htlc + // expiry when the invoice is accepted. + FinalCltvDelta int32 + + // Expiry defines how long after creation this invoice should expire. + Expiry time.Duration + // PaymentPreimage is the preimage which is to be revealed in the // occasion that an HTLC paying to the hash of this preimage is // extended. @@ -154,8 +195,18 @@ type ContractTerm struct { // which can be satisfied by the above preimage. Value lnwire.MilliSatoshi - // State describes the state the invoice is in. - State ContractState + // PaymentAddr is a randomly generated value include in the MPP record + // by the sender to prevent probing of the receiver. + PaymentAddr [32]byte + + // Features is the feature vectors advertised on the payment request. + Features *lnwire.FeatureVector +} + +// String returns a human-readable description of the prominent contract terms. +func (c ContractTerm) String() string { + return fmt.Sprintf("amt=%v, expiry=%v, final_cltv_delta=%v", c.Value, + c.Expiry, c.FinalCltvDelta) } // Invoice is a payment invoice generated by a payee in order to request @@ -174,23 +225,10 @@ type Invoice struct { // or any other message which fits within the size constraints. Memo []byte - // Receipt is an optional field dedicated for storing a - // cryptographically binding receipt of payment. - // - // TODO(roasbeef): document scheme. - Receipt []byte - - // PaymentRequest is an optional field where a payment request created - // for this invoice can be stored. + // PaymentRequest is the encoded payment request for this invoice. For + // spontaneous (keysend) payments, this field will be empty. PaymentRequest []byte - // FinalCltvDelta is the minimum required number of blocks before htlc - // expiry when the invoice is accepted. - FinalCltvDelta int32 - - // Expiry defines how long after creation this invoice should expire. - Expiry time.Duration - // CreationDate is the exact time the invoice was created. CreationDate time.Time @@ -223,6 +261,9 @@ type Invoice struct { // NOTE: This index starts at 1. SettleIndex uint64 + // State describes the state the invoice is in. + State ContractState + // AmtPaid is the final amount that we ultimately accepted for pay for // this invoice. We specify this value independently as it's possible // that the invoice originally didn't specify an amount, or the sender @@ -254,6 +295,10 @@ type InvoiceHTLC struct { // Amt is the amount that is carried by this htlc. Amt lnwire.MilliSatoshi + // MppTotalAmt is a field for mpp that indicates the expected total + // amount. + MppTotalAmt lnwire.MilliSatoshi + // AcceptHeight is the block height at which the invoice registry // decided to accept this htlc as a payment to the invoice. At this // height, the invoice cltv delay must have been met. @@ -274,6 +319,10 @@ type InvoiceHTLC struct { // canceled htlc isn't just removed from the invoice htlcs map, because // we need AcceptHeight to properly cancel the htlc back. State HtlcState + + // CustomRecords contains the custom key/value pairs that accompanied + // the htlc. + CustomRecords record.CustomSet } // HtlcAcceptDesc describes the details of a newly accepted htlc. @@ -284,22 +333,39 @@ type HtlcAcceptDesc struct { // Amt is the amount that is carried by this htlc. Amt lnwire.MilliSatoshi + // MppTotalAmt is a field for mpp that indicates the expected total + // amount. + MppTotalAmt lnwire.MilliSatoshi + // Expiry is the expiry height of this htlc. Expiry uint32 + + // CustomRecords contains the custom key/value pairs that accompanied + // the htlc. + CustomRecords record.CustomSet } // InvoiceUpdateDesc describes the changes that should be applied to the // invoice. type InvoiceUpdateDesc struct { - // State is the new state that this invoice should progress to. - State ContractState + // State is the new state that this invoice should progress to. If nil, + // the state is left unchanged. + State *InvoiceStateUpdateDesc - // Htlcs describes the changes that need to be made to the invoice htlcs - // in the database. Htlc map entries with their value set should be - // added. If the map value is nil, the htlc should be canceled. - Htlcs map[CircuitKey]*HtlcAcceptDesc + // CancelHtlcs describes the htlcs that need to be canceled. + CancelHtlcs map[CircuitKey]struct{} + + // AddHtlcs describes the newly accepted htlcs that need to be added to + // the invoice. + AddHtlcs map[CircuitKey]*HtlcAcceptDesc +} - // Preimage must be set to the preimage when state is settled. +// InvoiceStateUpdateDesc describes an invoice-level state transition. +type InvoiceStateUpdateDesc struct { + // NewState is the new state that this invoice should progress to. + NewState ContractState + + // Preimage must be set to the preimage when NewState is settled. Preimage lntypes.Preimage } @@ -312,19 +378,22 @@ func validateInvoice(i *Invoice) error { return fmt.Errorf("max length a memo is %v, and invoice "+ "of length %v was provided", MaxMemoSize, len(i.Memo)) } - if len(i.Receipt) > MaxReceiptSize { - return fmt.Errorf("max length a receipt is %v, and invoice "+ - "of length %v was provided", MaxReceiptSize, - len(i.Receipt)) - } if len(i.PaymentRequest) > MaxPaymentRequestSize { return fmt.Errorf("max length of payment request is %v, length "+ "provided was %v", MaxPaymentRequestSize, len(i.PaymentRequest)) } + if i.Terms.Features == nil { + return errors.New("invoice must have a feature vector") + } return nil } +// IsPending returns ture if the invoice is in ContractOpen state. +func (i *Invoice) IsPending() bool { + return i.State == ContractOpen || i.State == ContractAccepted +} + // AddInvoice inserts the targeted invoice into the database. If the invoice has // *any* payment hashes which already exists within the database, then the // insertion will be aborted and rejected due to the strict policy banning any @@ -338,8 +407,8 @@ func (d *DB) AddInvoice(newInvoice *Invoice, paymentHash lntypes.Hash) ( } var invoiceAddIndex uint64 - err := d.Update(func(tx *bbolt.Tx) error { - invoices, err := tx.CreateBucketIfNotExists(invoiceBucket) + err := kvdb.Update(d, func(tx kvdb.RwTx) error { + invoices, err := tx.CreateTopLevelBucket(invoiceBucket) if err != nil { return err } @@ -416,13 +485,13 @@ func (d *DB) InvoicesAddedSince(sinceAddIndex uint64) ([]Invoice, error) { var startIndex [8]byte byteOrder.PutUint64(startIndex[:], sinceAddIndex) - err := d.DB.View(func(tx *bbolt.Tx) error { - invoices := tx.Bucket(invoiceBucket) + err := kvdb.View(d, func(tx kvdb.ReadTx) error { + invoices := tx.ReadBucket(invoiceBucket) if invoices == nil { return ErrNoInvoicesCreated } - addIndex := invoices.Bucket(addIndexBucket) + addIndex := invoices.NestedReadBucket(addIndexBucket) if addIndex == nil { return ErrNoInvoicesCreated } @@ -430,7 +499,7 @@ func (d *DB) InvoicesAddedSince(sinceAddIndex uint64) ([]Invoice, error) { // We'll now run through each entry in the add index starting // at our starting index. We'll continue until we reach the // very end of the current key space. - invoiceCursor := addIndex.Cursor() + invoiceCursor := addIndex.ReadCursor() // We'll seek to the starting index, then manually advance the // cursor in order to skip the entry with the since add index. @@ -471,12 +540,12 @@ func (d *DB) InvoicesAddedSince(sinceAddIndex uint64) ([]Invoice, error) { // terms of the payment. func (d *DB) LookupInvoice(paymentHash [32]byte) (Invoice, error) { var invoice Invoice - err := d.View(func(tx *bbolt.Tx) error { - invoices := tx.Bucket(invoiceBucket) + err := kvdb.View(d, func(tx kvdb.ReadTx) error { + invoices := tx.ReadBucket(invoiceBucket) if invoices == nil { return ErrNoInvoicesCreated } - invoiceIndex := invoices.Bucket(invoiceIndexBucket) + invoiceIndex := invoices.NestedReadBucket(invoiceIndexBucket) if invoiceIndex == nil { return ErrNoInvoicesCreated } @@ -505,48 +574,78 @@ func (d *DB) LookupInvoice(paymentHash [32]byte) (Invoice, error) { return invoice, nil } -// FetchAllInvoices returns all invoices currently stored within the database. -// If the pendingOnly param is true, then only unsettled invoices will be -// returned, skipping all invoices that are fully settled. -func (d *DB) FetchAllInvoices(pendingOnly bool) ([]Invoice, error) { - var invoices []Invoice +// InvoiceWithPaymentHash is used to store an invoice and its corresponding +// payment hash. This struct is only used to store results of +// ChannelDB.FetchAllInvoicesWithPaymentHash() call. +type InvoiceWithPaymentHash struct { + // Invoice holds the invoice as selected from the invoices bucket. + Invoice Invoice + + // PaymentHash is the payment hash for the Invoice. + PaymentHash lntypes.Hash +} - err := d.View(func(tx *bbolt.Tx) error { - invoiceB := tx.Bucket(invoiceBucket) - if invoiceB == nil { +// FetchAllInvoicesWithPaymentHash returns all invoices and their payment hashes +// currently stored within the database. If the pendingOnly param is true, then +// only open or accepted invoices and their payment hashes will be returned, +// skipping all invoices that are fully settled or canceled. Note that the +// returned array is not ordered by add index. +func (d *DB) FetchAllInvoicesWithPaymentHash(pendingOnly bool) ( + []InvoiceWithPaymentHash, error) { + + var result []InvoiceWithPaymentHash + + err := kvdb.View(d, func(tx kvdb.ReadTx) error { + invoices := tx.ReadBucket(invoiceBucket) + if invoices == nil { return ErrNoInvoicesCreated } - // Iterate through the entire key space of the top-level - // invoice bucket. If key with a non-nil value stores the next - // invoice ID which maps to the corresponding invoice. - return invoiceB.ForEach(func(k, v []byte) error { + invoiceIndex := invoices.NestedReadBucket(invoiceIndexBucket) + if invoiceIndex == nil { + // Mask the error if there's no invoice + // index as that simply means there are no + // invoices added yet to the DB. In this case + // we simply return an empty list. + return nil + } + + return invoiceIndex.ForEach(func(k, v []byte) error { + // Skip the special numInvoicesKey as that does not + // point to a valid invoice. + if bytes.Equal(k, numInvoicesKey) { + return nil + } + if v == nil { return nil } - invoiceReader := bytes.NewReader(v) - invoice, err := deserializeInvoice(invoiceReader) + invoice, err := fetchInvoice(v, invoices) if err != nil { return err } - if pendingOnly && - invoice.Terms.State == ContractSettled { - + if pendingOnly && !invoice.IsPending() { return nil } - invoices = append(invoices, invoice) + invoiceWithPaymentHash := InvoiceWithPaymentHash{ + Invoice: invoice, + } + + copy(invoiceWithPaymentHash.PaymentHash[:], k) + result = append(result, invoiceWithPaymentHash) return nil }) }) + if err != nil { return nil, err } - return invoices, nil + return result, nil } // InvoiceQuery represents a query to the invoice database. The query allows a @@ -602,21 +701,21 @@ func (d *DB) QueryInvoices(q InvoiceQuery) (InvoiceSlice, error) { InvoiceQuery: q, } - err := d.View(func(tx *bbolt.Tx) error { + err := kvdb.View(d, func(tx kvdb.ReadTx) error { // If the bucket wasn't found, then there aren't any invoices // within the database yet, so we can simply exit. - invoices := tx.Bucket(invoiceBucket) + invoices := tx.ReadBucket(invoiceBucket) if invoices == nil { return ErrNoInvoicesCreated } - invoiceAddIndex := invoices.Bucket(addIndexBucket) + invoiceAddIndex := invoices.NestedReadBucket(addIndexBucket) if invoiceAddIndex == nil { return ErrNoInvoicesCreated } // keyForIndex is a helper closure that retrieves the invoice // key for the given add index of an invoice. - keyForIndex := func(c *bbolt.Cursor, index uint64) []byte { + keyForIndex := func(c kvdb.ReadCursor, index uint64) []byte { var keyIndex [8]byte byteOrder.PutUint64(keyIndex[:], index) _, invoiceKey := c.Seek(keyIndex[:]) @@ -625,7 +724,7 @@ func (d *DB) QueryInvoices(q InvoiceQuery) (InvoiceSlice, error) { // nextKey is a helper closure to determine what the next // invoice key is when iterating over the invoice add index. - nextKey := func(c *bbolt.Cursor) ([]byte, []byte) { + nextKey := func(c kvdb.ReadCursor) ([]byte, []byte) { if q.Reversed { return c.Prev() } @@ -635,7 +734,7 @@ func (d *DB) QueryInvoices(q InvoiceQuery) (InvoiceSlice, error) { // We'll be using a cursor to seek into the database and return // a slice of invoices. We'll need to determine where to start // our cursor depending on the parameters set within the query. - c := invoiceAddIndex.Cursor() + c := invoiceAddIndex.ReadCursor() invoiceKey := keyForIndex(c, q.IndexOffset+1) // If the query is specifying reverse iteration, then we must @@ -679,11 +778,9 @@ func (d *DB) QueryInvoices(q InvoiceQuery) (InvoiceSlice, error) { return err } - // Skip any settled invoices if the caller is only - // interested in unsettled. - if q.PendingOnly && - invoice.Terms.State == ContractSettled { - + // Skip any settled or canceled invoices if the caller is + // only interested in pending ones. + if q.PendingOnly && !invoice.IsPending() { continue } @@ -731,8 +828,8 @@ func (d *DB) UpdateInvoice(paymentHash lntypes.Hash, callback InvoiceUpdateCallback) (*Invoice, error) { var updatedInvoice *Invoice - err := d.Update(func(tx *bbolt.Tx) error { - invoices, err := tx.CreateBucketIfNotExists(invoiceBucket) + err := kvdb.Update(d, func(tx kvdb.RwTx) error { + invoices, err := tx.CreateTopLevelBucket(invoiceBucket) if err != nil { return err } @@ -786,13 +883,13 @@ func (d *DB) InvoicesSettledSince(sinceSettleIndex uint64) ([]Invoice, error) { var startIndex [8]byte byteOrder.PutUint64(startIndex[:], sinceSettleIndex) - err := d.DB.View(func(tx *bbolt.Tx) error { - invoices := tx.Bucket(invoiceBucket) + err := kvdb.View(d, func(tx kvdb.ReadTx) error { + invoices := tx.ReadBucket(invoiceBucket) if invoices == nil { return ErrNoInvoicesCreated } - settleIndex := invoices.Bucket(settleIndexBucket) + settleIndex := invoices.NestedReadBucket(settleIndexBucket) if settleIndex == nil { return ErrNoInvoicesCreated } @@ -800,7 +897,7 @@ func (d *DB) InvoicesSettledSince(sinceSettleIndex uint64) ([]Invoice, error) { // We'll now run through each entry in the add index starting // at our starting index. We'll continue until we reach the // very end of the current key space. - invoiceCursor := settleIndex.Cursor() + invoiceCursor := settleIndex.ReadCursor() // We'll seek to the starting index, then manually advance the // cursor in order to skip the entry with the since add index. @@ -828,7 +925,7 @@ func (d *DB) InvoicesSettledSince(sinceSettleIndex uint64) ([]Invoice, error) { return settledInvoices, nil } -func putInvoice(invoices, invoiceIndex, addIndex *bbolt.Bucket, +func putInvoice(invoices, invoiceIndex, addIndex kvdb.RwBucket, i *Invoice, invoiceNum uint32, paymentHash lntypes.Hash) ( uint64, error) { @@ -892,71 +989,73 @@ func putInvoice(invoices, invoiceIndex, addIndex *bbolt.Bucket, // would modify the on disk format, make a copy of the original code and store // it with the migration. func serializeInvoice(w io.Writer, i *Invoice) error { - if err := wire.WriteVarBytes(w, 0, i.Memo[:]); err != nil { - return err - } - if err := wire.WriteVarBytes(w, 0, i.Receipt[:]); err != nil { - return err - } - if err := wire.WriteVarBytes(w, 0, i.PaymentRequest[:]); err != nil { - return err - } - - if err := binary.Write(w, byteOrder, i.FinalCltvDelta); err != nil { - return err - } - - if err := binary.Write(w, byteOrder, int64(i.Expiry)); err != nil { - return err - } - - birthBytes, err := i.CreationDate.MarshalBinary() + creationDateBytes, err := i.CreationDate.MarshalBinary() if err != nil { return err } - if err := wire.WriteVarBytes(w, 0, birthBytes); err != nil { - return err - } - - settleBytes, err := i.SettleDate.MarshalBinary() + settleDateBytes, err := i.SettleDate.MarshalBinary() if err != nil { return err } - if err := wire.WriteVarBytes(w, 0, settleBytes); err != nil { - return err - } - - if _, err := w.Write(i.Terms.PaymentPreimage[:]); err != nil { + var fb bytes.Buffer + err = i.Terms.Features.EncodeBase256(&fb) + if err != nil { return err } - - var scratch [8]byte - byteOrder.PutUint64(scratch[:], uint64(i.Terms.Value)) - if _, err := w.Write(scratch[:]); err != nil { + featureBytes := fb.Bytes() + + preimage := [32]byte(i.Terms.PaymentPreimage) + value := uint64(i.Terms.Value) + cltvDelta := uint32(i.Terms.FinalCltvDelta) + expiry := uint64(i.Terms.Expiry) + + amtPaid := uint64(i.AmtPaid) + state := uint8(i.State) + + tlvStream, err := tlv.NewStream( + // Memo and payreq. + tlv.MakePrimitiveRecord(memoType, &i.Memo), + tlv.MakePrimitiveRecord(payReqType, &i.PaymentRequest), + + // Add/settle metadata. + tlv.MakePrimitiveRecord(createTimeType, &creationDateBytes), + tlv.MakePrimitiveRecord(settleTimeType, &settleDateBytes), + tlv.MakePrimitiveRecord(addIndexType, &i.AddIndex), + tlv.MakePrimitiveRecord(settleIndexType, &i.SettleIndex), + + // Terms. + tlv.MakePrimitiveRecord(preimageType, &preimage), + tlv.MakePrimitiveRecord(valueType, &value), + tlv.MakePrimitiveRecord(cltvDeltaType, &cltvDelta), + tlv.MakePrimitiveRecord(expiryType, &expiry), + tlv.MakePrimitiveRecord(paymentAddrType, &i.Terms.PaymentAddr), + tlv.MakePrimitiveRecord(featuresType, &featureBytes), + + // Invoice state. + tlv.MakePrimitiveRecord(invStateType, &state), + tlv.MakePrimitiveRecord(amtPaidType, &amtPaid), + ) + if err != nil { return err } - if err := binary.Write(w, byteOrder, i.Terms.State); err != nil { + var b bytes.Buffer + if err = tlvStream.Encode(&b); err != nil { return err } - if err := binary.Write(w, byteOrder, i.AddIndex); err != nil { - return err - } - if err := binary.Write(w, byteOrder, i.SettleIndex); err != nil { - return err - } - if err := binary.Write(w, byteOrder, int64(i.AmtPaid)); err != nil { + err = binary.Write(w, byteOrder, uint64(b.Len())) + if err != nil { return err } - if err := serializeHtlcs(w, i.Htlcs); err != nil { + if _, err = w.Write(b.Bytes()); err != nil { return err } - return nil + return serializeHtlcs(w, i.Htlcs) } // serializeHtlcs serializes a map containing circuit keys and invoice htlcs to @@ -966,11 +1065,13 @@ func serializeHtlcs(w io.Writer, htlcs map[CircuitKey]*InvoiceHTLC) error { // Encode the htlc in a tlv stream. chanID := key.ChanID.ToUint64() amt := uint64(htlc.Amt) + mppTotalAmt := uint64(htlc.MppTotalAmt) acceptTime := uint64(htlc.AcceptTime.UnixNano()) resolveTime := uint64(htlc.ResolveTime.UnixNano()) state := uint8(htlc.State) - tlvStream, err := tlv.NewStream( + var records []tlv.Record + records = append(records, tlv.MakePrimitiveRecord(chanIDType, &chanID), tlv.MakePrimitiveRecord(htlcIDType, &key.HtlcID), tlv.MakePrimitiveRecord(amtType, &amt), @@ -980,8 +1081,19 @@ func serializeHtlcs(w io.Writer, htlcs map[CircuitKey]*InvoiceHTLC) error { tlv.MakePrimitiveRecord(acceptTimeType, &acceptTime), tlv.MakePrimitiveRecord(resolveTimeType, &resolveTime), tlv.MakePrimitiveRecord(expiryHeightType, &htlc.Expiry), - tlv.MakePrimitiveRecord(stateType, &state), + tlv.MakePrimitiveRecord(htlcStateType, &state), + tlv.MakePrimitiveRecord(mppTotalAmtType, &mppTotalAmt), ) + + // Convert the custom records to tlv.Record types that are ready + // for serialization. + customRecords := tlv.MapToRecords(htlc.CustomRecords) + + // Append the custom records. Their ids are in the experimental + // range and sorted, so there is no need to sort again. + records = append(records, customRecords...) + + tlvStream, err := tlv.NewStream(records...) if err != nil { return err } @@ -1006,7 +1118,7 @@ func serializeHtlcs(w io.Writer, htlcs map[CircuitKey]*InvoiceHTLC) error { return nil } -func fetchInvoice(invoiceNum []byte, invoices *bbolt.Bucket) (Invoice, error) { +func fetchInvoice(invoiceNum []byte, invoices kvdb.ReadBucket) (Invoice, error) { invoiceBytes := invoices.Get(invoiceNum) if invoiceBytes == nil { return Invoice{}, ErrInvoiceNotFound @@ -1018,79 +1130,89 @@ func fetchInvoice(invoiceNum []byte, invoices *bbolt.Bucket) (Invoice, error) { } func deserializeInvoice(r io.Reader) (Invoice, error) { - var err error - invoice := Invoice{} - - // TODO(roasbeef): use read full everywhere - invoice.Memo, err = wire.ReadVarBytes(r, 0, MaxMemoSize, "") - if err != nil { - return invoice, err - } - invoice.Receipt, err = wire.ReadVarBytes(r, 0, MaxReceiptSize, "") + var ( + preimage [32]byte + value uint64 + cltvDelta uint32 + expiry uint64 + amtPaid uint64 + state uint8 + + creationDateBytes []byte + settleDateBytes []byte + featureBytes []byte + ) + + var i Invoice + tlvStream, err := tlv.NewStream( + // Memo and payreq. + tlv.MakePrimitiveRecord(memoType, &i.Memo), + tlv.MakePrimitiveRecord(payReqType, &i.PaymentRequest), + + // Add/settle metadata. + tlv.MakePrimitiveRecord(createTimeType, &creationDateBytes), + tlv.MakePrimitiveRecord(settleTimeType, &settleDateBytes), + tlv.MakePrimitiveRecord(addIndexType, &i.AddIndex), + tlv.MakePrimitiveRecord(settleIndexType, &i.SettleIndex), + + // Terms. + tlv.MakePrimitiveRecord(preimageType, &preimage), + tlv.MakePrimitiveRecord(valueType, &value), + tlv.MakePrimitiveRecord(cltvDeltaType, &cltvDelta), + tlv.MakePrimitiveRecord(expiryType, &expiry), + tlv.MakePrimitiveRecord(paymentAddrType, &i.Terms.PaymentAddr), + tlv.MakePrimitiveRecord(featuresType, &featureBytes), + + // Invoice state. + tlv.MakePrimitiveRecord(invStateType, &state), + tlv.MakePrimitiveRecord(amtPaidType, &amtPaid), + ) if err != nil { - return invoice, err + return i, err } - invoice.PaymentRequest, err = wire.ReadVarBytes(r, 0, MaxPaymentRequestSize, "") + var bodyLen int64 + err = binary.Read(r, byteOrder, &bodyLen) if err != nil { - return invoice, err + return i, err } - if err := binary.Read(r, byteOrder, &invoice.FinalCltvDelta); err != nil { - return invoice, err + lr := io.LimitReader(r, bodyLen) + if err = tlvStream.Decode(lr); err != nil { + return i, err } - var expiry int64 - if err := binary.Read(r, byteOrder, &expiry); err != nil { - return invoice, err - } - invoice.Expiry = time.Duration(expiry) + i.Terms.PaymentPreimage = lntypes.Preimage(preimage) + i.Terms.Value = lnwire.MilliSatoshi(value) + i.Terms.FinalCltvDelta = int32(cltvDelta) + i.Terms.Expiry = time.Duration(expiry) + i.AmtPaid = lnwire.MilliSatoshi(amtPaid) + i.State = ContractState(state) - birthBytes, err := wire.ReadVarBytes(r, 0, 300, "birth") + err = i.CreationDate.UnmarshalBinary(creationDateBytes) if err != nil { - return invoice, err - } - if err := invoice.CreationDate.UnmarshalBinary(birthBytes); err != nil { - return invoice, err + return i, err } - settledBytes, err := wire.ReadVarBytes(r, 0, 300, "settled") + err = i.SettleDate.UnmarshalBinary(settleDateBytes) if err != nil { - return invoice, err - } - if err := invoice.SettleDate.UnmarshalBinary(settledBytes); err != nil { - return invoice, err + return i, err } - if _, err := io.ReadFull(r, invoice.Terms.PaymentPreimage[:]); err != nil { - return invoice, err - } - var scratch [8]byte - if _, err := io.ReadFull(r, scratch[:]); err != nil { - return invoice, err - } - invoice.Terms.Value = lnwire.MilliSatoshi(byteOrder.Uint64(scratch[:])) - - if err := binary.Read(r, byteOrder, &invoice.Terms.State); err != nil { - return invoice, err - } - - if err := binary.Read(r, byteOrder, &invoice.AddIndex); err != nil { - return invoice, err - } - if err := binary.Read(r, byteOrder, &invoice.SettleIndex); err != nil { - return invoice, err - } - if err := binary.Read(r, byteOrder, &invoice.AmtPaid); err != nil { - return invoice, err - } - - invoice.Htlcs, err = deserializeHtlcs(r) + rawFeatures := lnwire.NewRawFeatureVector() + err = rawFeatures.DecodeBase256( + bytes.NewReader(featureBytes), len(featureBytes), + ) if err != nil { - return Invoice{}, err + return i, err } - return invoice, nil + i.Terms.Features = lnwire.NewFeatureVector( + rawFeatures, lnwire.Features, + ) + + i.Htlcs, err = deserializeHtlcs(r) + return i, err } // deserializeHtlcs reads a list of invoice htlcs from a reader and returns it @@ -1100,7 +1222,7 @@ func deserializeHtlcs(r io.Reader) (map[CircuitKey]*InvoiceHTLC, error) { for { // Read the length of the tlv stream for this htlc. - var streamLen uint64 + var streamLen int64 if err := binary.Read(r, byteOrder, &streamLen); err != nil { if err == io.EOF { break @@ -1109,11 +1231,9 @@ func deserializeHtlcs(r io.Reader) (map[CircuitKey]*InvoiceHTLC, error) { return nil, err } - streamBytes := make([]byte, streamLen) - if _, err := r.Read(streamBytes); err != nil { - return nil, err - } - streamReader := bytes.NewReader(streamBytes) + // Limit the reader so that it stops at the end of this htlc's + // stream. + htlcReader := io.LimitReader(r, streamLen) // Decode the contents into the htlc fields. var ( @@ -1122,7 +1242,7 @@ func deserializeHtlcs(r io.Reader) (map[CircuitKey]*InvoiceHTLC, error) { chanID uint64 state uint8 acceptTime, resolveTime uint64 - amt uint64 + amt, mppTotalAmt uint64 ) tlvStream, err := tlv.NewStream( tlv.MakePrimitiveRecord(chanIDType, &chanID), @@ -1134,13 +1254,15 @@ func deserializeHtlcs(r io.Reader) (map[CircuitKey]*InvoiceHTLC, error) { tlv.MakePrimitiveRecord(acceptTimeType, &acceptTime), tlv.MakePrimitiveRecord(resolveTimeType, &resolveTime), tlv.MakePrimitiveRecord(expiryHeightType, &htlc.Expiry), - tlv.MakePrimitiveRecord(stateType, &state), + tlv.MakePrimitiveRecord(htlcStateType, &state), + tlv.MakePrimitiveRecord(mppTotalAmtType, &mppTotalAmt), ) if err != nil { return nil, err } - if err := tlvStream.Decode(streamReader); err != nil { + parsedTypes, err := tlvStream.DecodeWithParsedTypes(htlcReader) + if err != nil { return nil, err } @@ -1149,6 +1271,11 @@ func deserializeHtlcs(r io.Reader) (map[CircuitKey]*InvoiceHTLC, error) { htlc.ResolveTime = time.Unix(0, int64(resolveTime)) htlc.State = HtlcState(state) htlc.Amt = lnwire.MilliSatoshi(amt) + htlc.MppTotalAmt = lnwire.MilliSatoshi(mppTotalAmt) + + // Reconstruct the custom records fields from the parsed types + // map return from the tlv parser. + htlc.CustomRecords = hop.NewCustomRecords(parsedTypes) htlcs[key] = &htlc } @@ -1163,26 +1290,40 @@ func copySlice(src []byte) []byte { return dest } +// copyInvoiceHTLC makes a deep copy of the supplied invoice HTLC. +func copyInvoiceHTLC(src *InvoiceHTLC) *InvoiceHTLC { + result := *src + + // Make a copy of the CustomSet map. + result.CustomRecords = make(record.CustomSet) + for k, v := range src.CustomRecords { + result.CustomRecords[k] = v + } + + return &result +} + // copyInvoice makes a deep copy of the supplied invoice. func copyInvoice(src *Invoice) *Invoice { dest := Invoice{ Memo: copySlice(src.Memo), - Receipt: copySlice(src.Receipt), PaymentRequest: copySlice(src.PaymentRequest), - FinalCltvDelta: src.FinalCltvDelta, CreationDate: src.CreationDate, SettleDate: src.SettleDate, Terms: src.Terms, AddIndex: src.AddIndex, SettleIndex: src.SettleIndex, + State: src.State, AmtPaid: src.AmtPaid, Htlcs: make( map[CircuitKey]*InvoiceHTLC, len(src.Htlcs), ), } + dest.Terms.Features = src.Terms.Features.Clone() + for k, v := range src.Htlcs { - dest.Htlcs[k] = v + dest.Htlcs[k] = copyInvoiceHTLC(v) } return &dest @@ -1190,7 +1331,7 @@ func copyInvoice(src *Invoice) *Invoice { // updateInvoice fetches the invoice, obtains the update descriptor from the // callback and applies the updates in a single db transaction. -func (d *DB) updateInvoice(hash lntypes.Hash, invoices, settleIndex *bbolt.Bucket, +func (d *DB) updateInvoice(hash lntypes.Hash, invoices, settleIndex kvdb.RwBucket, invoiceNum []byte, callback InvoiceUpdateCallback) (*Invoice, error) { invoice, err := fetchInvoice(invoiceNum, invoices) @@ -1198,91 +1339,121 @@ func (d *DB) updateInvoice(hash lntypes.Hash, invoices, settleIndex *bbolt.Bucke return nil, err } - preUpdateState := invoice.Terms.State - // Create deep copy to prevent any accidental modification in the // callback. - copy := copyInvoice(&invoice) + invoiceCopy := copyInvoice(&invoice) // Call the callback and obtain the update descriptor. - update, err := callback(copy) + update, err := callback(invoiceCopy) if err != nil { return &invoice, err } - // Update invoice state. - invoice.Terms.State = update.State + // If there is nothing to update, return early. + if update == nil { + return &invoice, nil + } - now := d.now() + now := d.clock.Now() - // Update htlc set. - for key, htlcUpdate := range update.Htlcs { - htlc, ok := invoice.Htlcs[key] + // Update invoice state if the update descriptor indicates an invoice + // state change. + if update.State != nil { + err := updateInvoiceState(&invoice, hash, *update.State) + if err != nil { + return nil, err + } - // No update means the htlc needs to be canceled. - if htlcUpdate == nil { - if !ok { - return nil, fmt.Errorf("unknown htlc %v", key) - } - if htlc.State != HtlcStateAccepted { - return nil, fmt.Errorf("can only cancel " + - "accepted htlcs") + if update.State.NewState == ContractSettled { + err := setSettleMetaFields( + settleIndex, invoiceNum, &invoice, now, + ) + if err != nil { + return nil, err } - - htlc.State = HtlcStateCanceled - htlc.ResolveTime = now - invoice.AmtPaid -= htlc.Amt - - continue } + } - // Add new htlc paying to the invoice. - if ok { - return nil, fmt.Errorf("htlc %v already exists", key) + // Process add actions from update descriptor. + for key, htlcUpdate := range update.AddHtlcs { + if _, exists := invoice.Htlcs[key]; exists { + return nil, fmt.Errorf("duplicate add of htlc %v", key) } - htlc = &InvoiceHTLC{ - Amt: htlcUpdate.Amt, - Expiry: htlcUpdate.Expiry, - AcceptHeight: uint32(htlcUpdate.AcceptHeight), - AcceptTime: now, + + // Force caller to supply htlc without custom records in a + // consistent way. + if htlcUpdate.CustomRecords == nil { + return nil, errors.New("nil custom records map") } - if preUpdateState == ContractSettled { - htlc.State = HtlcStateSettled - htlc.ResolveTime = now - } else { - htlc.State = HtlcStateAccepted + + htlc := &InvoiceHTLC{ + Amt: htlcUpdate.Amt, + MppTotalAmt: htlcUpdate.MppTotalAmt, + Expiry: htlcUpdate.Expiry, + AcceptHeight: uint32(htlcUpdate.AcceptHeight), + AcceptTime: now, + State: HtlcStateAccepted, + CustomRecords: htlcUpdate.CustomRecords, } invoice.Htlcs[key] = htlc - invoice.AmtPaid += htlc.Amt } - // If invoice moved to the settled state, update settle index and settle - // time. - if preUpdateState != invoice.Terms.State && - invoice.Terms.State == ContractSettled { - - if update.Preimage.Hash() != hash { - return nil, fmt.Errorf("preimage does not match") - } - invoice.Terms.PaymentPreimage = update.Preimage + // Align htlc states with invoice state and recalculate amount paid. + var ( + amtPaid lnwire.MilliSatoshi + cancelHtlcs = update.CancelHtlcs + ) + for key, htlc := range invoice.Htlcs { + // Check whether this htlc needs to be canceled. If it does, + // update the htlc state to Canceled. + _, cancel := cancelHtlcs[key] + if cancel { + // Consistency check to verify that there is no overlap + // between the add and cancel sets. + if _, added := update.AddHtlcs[key]; added { + return nil, fmt.Errorf("added htlc %v canceled", + key) + } - // Settle all accepted htlcs. - for _, htlc := range invoice.Htlcs { - if htlc.State != HtlcStateAccepted { - continue + err := cancelSingleHtlc(now, htlc, invoice.State) + if err != nil { + return nil, err } - htlc.State = HtlcStateSettled - htlc.ResolveTime = now + // Delete processed cancel action, so that we can check + // later that there are no actions left. + delete(cancelHtlcs, key) + + continue } - err := setSettleFields(settleIndex, invoiceNum, &invoice, now) + // The invoice state may have changed and this could have + // implications for the states of the individual htlcs. Align + // the htlc state with the current invoice state. + err := updateHtlc(now, htlc, invoice.State) if err != nil { return nil, err } + + // Update the running amount paid to this invoice. We don't + // include accepted htlcs when the invoice is still open. + if invoice.State != ContractOpen && + (htlc.State == HtlcStateAccepted || + htlc.State == HtlcStateSettled) { + + amtPaid += htlc.Amt + } } + invoice.AmtPaid = amtPaid + // Verify that we didn't get an action for htlcs that are not present on + // the invoice. + if len(cancelHtlcs) > 0 { + return nil, errors.New("cancel action on non-existent htlc(s)") + } + + // Reserialize and update invoice. var buf bytes.Buffer if err := serializeInvoice(&buf, &invoice); err != nil { return nil, err @@ -1295,7 +1466,119 @@ func (d *DB) updateInvoice(hash lntypes.Hash, invoices, settleIndex *bbolt.Bucke return &invoice, nil } -func setSettleFields(settleIndex *bbolt.Bucket, invoiceNum []byte, +// updateInvoiceState validates and processes an invoice state update. +func updateInvoiceState(invoice *Invoice, hash lntypes.Hash, + update InvoiceStateUpdateDesc) error { + + // Returning to open is never allowed from any state. + if update.NewState == ContractOpen { + return ErrInvoiceCannotOpen + } + + switch invoice.State { + + // Once a contract is accepted, we can only transition to settled or + // canceled. Forbid transitioning back into this state. Otherwise this + // state is identical to ContractOpen, so we fallthrough to apply the + // same checks that we apply to open invoices. + case ContractAccepted: + if update.NewState == ContractAccepted { + return ErrInvoiceCannotAccept + } + + fallthrough + + // If a contract is open, permit a state transition to accepted, settled + // or canceled. The only restriction is on transitioning to settled + // where we ensure the preimage is valid. + case ContractOpen: + if update.NewState == ContractSettled { + // Validate preimage. + if update.Preimage.Hash() != hash { + return ErrInvoicePreimageMismatch + } + invoice.Terms.PaymentPreimage = update.Preimage + } + + // Once settled, we are in a terminal state. + case ContractSettled: + return ErrInvoiceAlreadySettled + + // Once canceled, we are in a terminal state. + case ContractCanceled: + return ErrInvoiceAlreadyCanceled + + default: + return errors.New("unknown state transition") + } + + invoice.State = update.NewState + + return nil +} + +// cancelSingleHtlc validates cancelation of a single htlc and update its state. +func cancelSingleHtlc(resolveTime time.Time, htlc *InvoiceHTLC, + invState ContractState) error { + + // It is only possible to cancel individual htlcs on an open invoice. + if invState != ContractOpen { + return fmt.Errorf("htlc canceled on invoice in "+ + "state %v", invState) + } + + // It is only possible if the htlc is still pending. + if htlc.State != HtlcStateAccepted { + return fmt.Errorf("htlc canceled in state %v", + htlc.State) + } + + htlc.State = HtlcStateCanceled + htlc.ResolveTime = resolveTime + + return nil +} + +// updateHtlc aligns the state of an htlc with the given invoice state. +func updateHtlc(resolveTime time.Time, htlc *InvoiceHTLC, + invState ContractState) error { + + switch invState { + + case ContractSettled: + if htlc.State == HtlcStateAccepted { + htlc.State = HtlcStateSettled + htlc.ResolveTime = resolveTime + } + + case ContractCanceled: + switch htlc.State { + + case HtlcStateAccepted: + htlc.State = HtlcStateCanceled + htlc.ResolveTime = resolveTime + + case HtlcStateSettled: + return fmt.Errorf("cannot have a settled htlc with " + + "invoice in state canceled") + } + + case ContractOpen, ContractAccepted: + if htlc.State == HtlcStateSettled { + return fmt.Errorf("cannot have a settled htlc with "+ + "invoice in state %v", invState) + } + + default: + return errors.New("unknown state transition") + } + + return nil +} + +// setSettleMetaFields updates the metadata associated with settlement of an +// invoice. +func setSettleMetaFields(settleIndex kvdb.RwBucket, invoiceNum []byte, invoice *Invoice, now time.Time) error { // Now that we know the invoice hasn't already been settled, we'll @@ -1312,7 +1595,6 @@ func setSettleFields(settleIndex *bbolt.Bucket, invoiceNum []byte, return err } - invoice.Terms.State = ContractSettled invoice.SettleDate = now invoice.SettleIndex = nextSettleSeqNo diff --git a/channeldb/kvdb/bbolt.go b/channeldb/kvdb/bbolt.go new file mode 100644 index 0000000000..b249e7dbb6 --- /dev/null +++ b/channeldb/kvdb/bbolt.go @@ -0,0 +1,10 @@ +package kvdb + +import ( + _ "github.com/btcsuite/btcwallet/walletdb/bdb" // Import to register backend. +) + +// BoltBackendName is the name of the backend that should be passed into +// kvdb.Create to initialize a new instance of kvdb.Backend backed by a live +// instance of bbolt. +const BoltBackendName = "bdb" diff --git a/channeldb/kvdb/interface.go b/channeldb/kvdb/interface.go new file mode 100644 index 0000000000..ec426410c7 --- /dev/null +++ b/channeldb/kvdb/interface.go @@ -0,0 +1,90 @@ +package kvdb + +import ( + "github.com/btcsuite/btcwallet/walletdb" + _ "github.com/btcsuite/btcwallet/walletdb/bdb" // Import to register backend. +) + +// Update opens a database read/write transaction and executes the function f +// with the transaction passed as a parameter. After f exits, if f did not +// error, the transaction is committed. Otherwise, if f did error, the +// transaction is rolled back. If the rollback fails, the original error +// returned by f is still returned. If the commit fails, the commit error is +// returned. +var Update = walletdb.Update + +// View opens a database read transaction and executes the function f with the +// transaction passed as a parameter. After f exits, the transaction is rolled +// back. If f errors, its error is returned, not a rollback error (if any +// occur). +var View = walletdb.View + +// Batch is identical to the Update call, but it attempts to combine several +// individual Update transactions into a single write database transaction on +// an optimistic basis. This only has benefits if multiple goroutines call +// Batch. +var Batch = walletdb.Batch + +// Create initializes and opens a database for the specified type. The +// arguments are specific to the database type driver. See the documentation +// for the database driver for further details. +// +// ErrDbUnknownType will be returned if the database type is not registered. +var Create = walletdb.Create + +// Backend represents an ACID database. All database access is performed +// through read or read+write transactions. +type Backend = walletdb.DB + +// Open opens an existing database for the specified type. The arguments are +// specific to the database type driver. See the documentation for the database +// driver for further details. +// +// ErrDbUnknownType will be returned if the database type is not registered. +var Open = walletdb.Open + +// Driver defines a structure for backend drivers to use when they registered +// themselves as a backend which implements the Backend interface. +type Driver = walletdb.Driver + +// ReadBucket represents a bucket (a hierarchical structure within the +// database) that is only allowed to perform read operations. +type ReadBucket = walletdb.ReadBucket + +// ReadCursor represents a bucket cursor that can be positioned at the start or +// end of the bucket's key/value pairs and iterate over pairs in the bucket. +// This type is only allowed to perform database read operations. +type ReadCursor = walletdb.ReadCursor + +// ReadTx represents a database transaction that can only be used for reads. If +// a database update must occur, use a RwTx. +type ReadTx = walletdb.ReadTx + +// RwBucket represents a bucket (a hierarchical structure within the database) +// that is allowed to perform both read and write operations. +type RwBucket = walletdb.ReadWriteBucket + +// RwCursor represents a bucket cursor that can be positioned at the start or +// end of the bucket's key/value pairs and iterate over pairs in the bucket. +// This abstraction is allowed to perform both database read and write +// operations. +type RwCursor = walletdb.ReadWriteCursor + +// ReadWriteTx represents a database transaction that can be used for both +// reads and writes. When only reads are necessary, consider using a ReadTx +// instead. +type RwTx = walletdb.ReadWriteTx + +var ( + // ErrBucketNotFound is returned when trying to access a bucket that + // has not been created yet. + ErrBucketNotFound = walletdb.ErrBucketNotFound + + // ErrBucketExists is returned when creating a bucket that already + // exists. + ErrBucketExists = walletdb.ErrBucketExists + + // ErrDatabaseNotOpen is returned when a database instance is accessed + // before it is opened or after it is closed. + ErrDatabaseNotOpen = walletdb.ErrDbNotOpen +) diff --git a/channeldb/log.go b/channeldb/log.go index e0158d457d..7490c6bf5f 100644 --- a/channeldb/log.go +++ b/channeldb/log.go @@ -3,6 +3,9 @@ package channeldb import ( "github.com/btcsuite/btclog" "github.com/lightningnetwork/lnd/build" + "github.com/lightningnetwork/lnd/channeldb/migration12" + "github.com/lightningnetwork/lnd/channeldb/migration13" + "github.com/lightningnetwork/lnd/channeldb/migration_01_to_11" ) // log is a logger that is initialized with no output filters. This @@ -25,4 +28,7 @@ func DisableLog() { // using btclog. func UseLogger(logger btclog.Logger) { log = logger + migration_01_to_11.UseLogger(logger) + migration12.UseLogger(logger) + migration13.UseLogger(logger) } diff --git a/channeldb/meta.go b/channeldb/meta.go index 541559b4a5..a2dd853e38 100644 --- a/channeldb/meta.go +++ b/channeldb/meta.go @@ -1,6 +1,8 @@ package channeldb -import "github.com/coreos/bbolt" +import ( + "github.com/lightningnetwork/lnd/channeldb/kvdb" +) var ( // metaBucket stores all the meta information concerning the state of @@ -20,10 +22,10 @@ type Meta struct { // FetchMeta fetches the meta data from boltdb and returns filled meta // structure. -func (d *DB) FetchMeta(tx *bbolt.Tx) (*Meta, error) { +func (d *DB) FetchMeta(tx kvdb.ReadTx) (*Meta, error) { meta := &Meta{} - err := d.View(func(tx *bbolt.Tx) error { + err := kvdb.View(d, func(tx kvdb.ReadTx) error { return fetchMeta(meta, tx) }) if err != nil { @@ -36,8 +38,8 @@ func (d *DB) FetchMeta(tx *bbolt.Tx) (*Meta, error) { // fetchMeta is an internal helper function used in order to allow callers to // re-use a database transaction. See the publicly exported FetchMeta method // for more information. -func fetchMeta(meta *Meta, tx *bbolt.Tx) error { - metaBucket := tx.Bucket(metaBucket) +func fetchMeta(meta *Meta, tx kvdb.ReadTx) error { + metaBucket := tx.ReadBucket(metaBucket) if metaBucket == nil { return ErrMetaNotFound } @@ -54,7 +56,7 @@ func fetchMeta(meta *Meta, tx *bbolt.Tx) error { // PutMeta writes the passed instance of the database met-data struct to disk. func (d *DB) PutMeta(meta *Meta) error { - return d.Update(func(tx *bbolt.Tx) error { + return kvdb.Update(d, func(tx kvdb.RwTx) error { return putMeta(meta, tx) }) } @@ -62,8 +64,8 @@ func (d *DB) PutMeta(meta *Meta) error { // putMeta is an internal helper function used in order to allow callers to // re-use a database transaction. See the publicly exported PutMeta method for // more information. -func putMeta(meta *Meta, tx *bbolt.Tx) error { - metaBucket, err := tx.CreateBucketIfNotExists(metaBucket) +func putMeta(meta *Meta, tx kvdb.RwTx) error { + metaBucket, err := tx.CreateTopLevelBucket(metaBucket) if err != nil { return err } @@ -71,7 +73,7 @@ func putMeta(meta *Meta, tx *bbolt.Tx) error { return putDbVersion(metaBucket, meta) } -func putDbVersion(metaBucket *bbolt.Bucket, meta *Meta) error { +func putDbVersion(metaBucket kvdb.RwBucket, meta *Meta) error { scratch := make([]byte, 4) byteOrder.PutUint32(scratch, meta.DbVersionNumber) return metaBucket.Put(dbVersionKey, scratch) diff --git a/channeldb/meta_test.go b/channeldb/meta_test.go index 014d5966cb..1933c0e161 100644 --- a/channeldb/meta_test.go +++ b/channeldb/meta_test.go @@ -5,8 +5,8 @@ import ( "io/ioutil" "testing" - "github.com/coreos/bbolt" "github.com/go-errors/errors" + "github.com/lightningnetwork/lnd/channeldb/kvdb" ) // applyMigration is a helper test function that encapsulates the general steps @@ -121,11 +121,11 @@ func TestOrderOfMigrations(t *testing.T) { versions := []version{ {0, nil}, {1, nil}, - {2, func(tx *bbolt.Tx) error { + {2, func(tx kvdb.RwTx) error { appliedMigration = 2 return nil }}, - {3, func(tx *bbolt.Tx) error { + {3, func(tx kvdb.RwTx) error { appliedMigration = 3 return nil }}, @@ -197,21 +197,23 @@ func TestMigrationWithPanic(t *testing.T) { beforeMigrationFunc := func(d *DB) { // Insert data in database and in order then make sure that the // key isn't changes in case of panic or fail. - d.Update(func(tx *bbolt.Tx) error { - bucket, err := tx.CreateBucketIfNotExists(bucketPrefix) + err := kvdb.Update(d, func(tx kvdb.RwTx) error { + bucket, err := tx.CreateTopLevelBucket(bucketPrefix) if err != nil { return err } - bucket.Put(keyPrefix, beforeMigration) - return nil + return bucket.Put(keyPrefix, beforeMigration) }) + if err != nil { + t.Fatalf("unable to insert: %v", err) + } } // Create migration function which changes the initially created data and // throw the panic, in this case we pretending that something goes. - migrationWithPanic := func(tx *bbolt.Tx) error { - bucket, err := tx.CreateBucketIfNotExists(bucketPrefix) + migrationWithPanic := func(tx kvdb.RwTx) error { + bucket, err := tx.CreateTopLevelBucket(bucketPrefix) if err != nil { return err } @@ -231,8 +233,8 @@ func TestMigrationWithPanic(t *testing.T) { t.Fatal("migration panicked but version is changed") } - err = d.Update(func(tx *bbolt.Tx) error { - bucket, err := tx.CreateBucketIfNotExists(bucketPrefix) + err = kvdb.Update(d, func(tx kvdb.RwTx) error { + bucket, err := tx.CreateTopLevelBucket(bucketPrefix) if err != nil { return err } @@ -268,22 +270,24 @@ func TestMigrationWithFatal(t *testing.T) { afterMigration := []byte("aftermigration") beforeMigrationFunc := func(d *DB) { - d.Update(func(tx *bbolt.Tx) error { - bucket, err := tx.CreateBucketIfNotExists(bucketPrefix) + err := kvdb.Update(d, func(tx kvdb.RwTx) error { + bucket, err := tx.CreateTopLevelBucket(bucketPrefix) if err != nil { return err } - bucket.Put(keyPrefix, beforeMigration) - return nil + return bucket.Put(keyPrefix, beforeMigration) }) + if err != nil { + t.Fatalf("unable to insert pre migration key: %v", err) + } } // Create migration function which changes the initially created data and // return the error, in this case we pretending that something goes // wrong. - migrationWithFatal := func(tx *bbolt.Tx) error { - bucket, err := tx.CreateBucketIfNotExists(bucketPrefix) + migrationWithFatal := func(tx kvdb.RwTx) error { + bucket, err := tx.CreateTopLevelBucket(bucketPrefix) if err != nil { return err } @@ -303,8 +307,8 @@ func TestMigrationWithFatal(t *testing.T) { t.Fatal("migration failed but version is changed") } - err = d.Update(func(tx *bbolt.Tx) error { - bucket, err := tx.CreateBucketIfNotExists(bucketPrefix) + err = kvdb.Update(d, func(tx kvdb.RwTx) error { + bucket, err := tx.CreateTopLevelBucket(bucketPrefix) if err != nil { return err } @@ -341,20 +345,22 @@ func TestMigrationWithoutErrors(t *testing.T) { // Populate database with initial data. beforeMigrationFunc := func(d *DB) { - d.Update(func(tx *bbolt.Tx) error { - bucket, err := tx.CreateBucketIfNotExists(bucketPrefix) + err := kvdb.Update(d, func(tx kvdb.RwTx) error { + bucket, err := tx.CreateTopLevelBucket(bucketPrefix) if err != nil { return err } - bucket.Put(keyPrefix, beforeMigration) - return nil + return bucket.Put(keyPrefix, beforeMigration) }) + if err != nil { + t.Fatalf("unable to update db pre migration: %v", err) + } } // Create migration function which changes the initially created data. - migrationWithoutErrors := func(tx *bbolt.Tx) error { - bucket, err := tx.CreateBucketIfNotExists(bucketPrefix) + migrationWithoutErrors := func(tx kvdb.RwTx) error { + bucket, err := tx.CreateTopLevelBucket(bucketPrefix) if err != nil { return err } @@ -375,8 +381,8 @@ func TestMigrationWithoutErrors(t *testing.T) { "successfully applied migration") } - err = d.Update(func(tx *bbolt.Tx) error { - bucket, err := tx.CreateBucketIfNotExists(bucketPrefix) + err = kvdb.Update(d, func(tx kvdb.RwTx) error { + bucket, err := tx.CreateTopLevelBucket(bucketPrefix) if err != nil { return err } @@ -419,7 +425,7 @@ func TestMigrationReversion(t *testing.T) { // Update the database metadata to point to one more than the highest // known version. - err = cdb.Update(func(tx *bbolt.Tx) error { + err = kvdb.Update(cdb, func(tx kvdb.RwTx) error { newMeta := &Meta{ DbVersionNumber: getLatestDBVersion(dbVersions) + 1, } diff --git a/channeldb/migration12/invoices.go b/channeldb/migration12/invoices.go new file mode 100644 index 0000000000..0b83fe1fe9 --- /dev/null +++ b/channeldb/migration12/invoices.go @@ -0,0 +1,318 @@ +package migration12 + +import ( + "bytes" + "encoding/binary" + "io" + "time" + + "github.com/btcsuite/btcd/wire" + "github.com/lightningnetwork/lnd/lntypes" + "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/tlv" +) + +const ( + // MaxMemoSize is maximum size of the memo field within invoices stored + // in the database. + MaxMemoSize = 1024 + + // maxReceiptSize is the maximum size of the payment receipt stored + // within the database along side incoming/outgoing invoices. + maxReceiptSize = 1024 + + // MaxPaymentRequestSize is the max size of a payment request for + // this invoice. + // TODO(halseth): determine the max length payment request when field + // lengths are final. + MaxPaymentRequestSize = 4096 + + memoType tlv.Type = 0 + payReqType tlv.Type = 1 + createTimeType tlv.Type = 2 + settleTimeType tlv.Type = 3 + addIndexType tlv.Type = 4 + settleIndexType tlv.Type = 5 + preimageType tlv.Type = 6 + valueType tlv.Type = 7 + cltvDeltaType tlv.Type = 8 + expiryType tlv.Type = 9 + paymentAddrType tlv.Type = 10 + featuresType tlv.Type = 11 + invStateType tlv.Type = 12 + amtPaidType tlv.Type = 13 +) + +var ( + // invoiceBucket is the name of the bucket within the database that + // stores all data related to invoices no matter their final state. + // Within the invoice bucket, each invoice is keyed by its invoice ID + // which is a monotonically increasing uint32. + invoiceBucket = []byte("invoices") + + // Big endian is the preferred byte order, due to cursor scans over + // integer keys iterating in order. + byteOrder = binary.BigEndian +) + +// ContractState describes the state the invoice is in. +type ContractState uint8 + +// ContractTerm is a companion struct to the Invoice struct. This struct houses +// the necessary conditions required before the invoice can be considered fully +// settled by the payee. +type ContractTerm struct { + // PaymentPreimage is the preimage which is to be revealed in the + // occasion that an HTLC paying to the hash of this preimage is + // extended. + PaymentPreimage lntypes.Preimage + + // Value is the expected amount of milli-satoshis to be paid to an HTLC + // which can be satisfied by the above preimage. + Value lnwire.MilliSatoshi + + // State describes the state the invoice is in. + State ContractState + + // PaymentAddr is a randomly generated value include in the MPP record + // by the sender to prevent probing of the receiver. + PaymentAddr [32]byte + + // Features is the feature vectors advertised on the payment request. + Features *lnwire.FeatureVector +} + +// Invoice is a payment invoice generated by a payee in order to request +// payment for some good or service. The inclusion of invoices within Lightning +// creates a payment work flow for merchants very similar to that of the +// existing financial system within PayPal, etc. Invoices are added to the +// database when a payment is requested, then can be settled manually once the +// payment is received at the upper layer. For record keeping purposes, +// invoices are never deleted from the database, instead a bit is toggled +// denoting the invoice has been fully settled. Within the database, all +// invoices must have a unique payment hash which is generated by taking the +// sha256 of the payment preimage. +type Invoice struct { + // Memo is an optional memo to be stored along side an invoice. The + // memo may contain further details pertaining to the invoice itself, + // or any other message which fits within the size constraints. + Memo []byte + + // PaymentRequest is an optional field where a payment request created + // for this invoice can be stored. + PaymentRequest []byte + + // FinalCltvDelta is the minimum required number of blocks before htlc + // expiry when the invoice is accepted. + FinalCltvDelta int32 + + // Expiry defines how long after creation this invoice should expire. + Expiry time.Duration + + // CreationDate is the exact time the invoice was created. + CreationDate time.Time + + // SettleDate is the exact time the invoice was settled. + SettleDate time.Time + + // Terms are the contractual payment terms of the invoice. Once all the + // terms have been satisfied by the payer, then the invoice can be + // considered fully fulfilled. + // + // TODO(roasbeef): later allow for multiple terms to fulfill the final + // invoice: payment fragmentation, etc. + Terms ContractTerm + + // AddIndex is an auto-incrementing integer that acts as a + // monotonically increasing sequence number for all invoices created. + // Clients can then use this field as a "checkpoint" of sorts when + // implementing a streaming RPC to notify consumers of instances where + // an invoice has been added before they re-connected. + // + // NOTE: This index starts at 1. + AddIndex uint64 + + // SettleIndex is an auto-incrementing integer that acts as a + // monotonically increasing sequence number for all settled invoices. + // Clients can then use this field as a "checkpoint" of sorts when + // implementing a streaming RPC to notify consumers of instances where + // an invoice has been settled before they re-connected. + // + // NOTE: This index starts at 1. + SettleIndex uint64 + + // AmtPaid is the final amount that we ultimately accepted for pay for + // this invoice. We specify this value independently as it's possible + // that the invoice originally didn't specify an amount, or the sender + // overpaid. + AmtPaid lnwire.MilliSatoshi + + // Htlcs records all htlcs that paid to this invoice. Some of these + // htlcs may have been marked as canceled. + Htlcs []byte +} + +// LegacyDeserializeInvoice decodes an invoice from the passed io.Reader using +// the pre-TLV serialization. +func LegacyDeserializeInvoice(r io.Reader) (Invoice, error) { + var err error + invoice := Invoice{} + + // TODO(roasbeef): use read full everywhere + invoice.Memo, err = wire.ReadVarBytes(r, 0, MaxMemoSize, "") + if err != nil { + return invoice, err + } + _, err = wire.ReadVarBytes(r, 0, maxReceiptSize, "") + if err != nil { + return invoice, err + } + + invoice.PaymentRequest, err = wire.ReadVarBytes(r, 0, MaxPaymentRequestSize, "") + if err != nil { + return invoice, err + } + + if err := binary.Read(r, byteOrder, &invoice.FinalCltvDelta); err != nil { + return invoice, err + } + + var expiry int64 + if err := binary.Read(r, byteOrder, &expiry); err != nil { + return invoice, err + } + invoice.Expiry = time.Duration(expiry) + + birthBytes, err := wire.ReadVarBytes(r, 0, 300, "birth") + if err != nil { + return invoice, err + } + if err := invoice.CreationDate.UnmarshalBinary(birthBytes); err != nil { + return invoice, err + } + + settledBytes, err := wire.ReadVarBytes(r, 0, 300, "settled") + if err != nil { + return invoice, err + } + if err := invoice.SettleDate.UnmarshalBinary(settledBytes); err != nil { + return invoice, err + } + + if _, err := io.ReadFull(r, invoice.Terms.PaymentPreimage[:]); err != nil { + return invoice, err + } + var scratch [8]byte + if _, err := io.ReadFull(r, scratch[:]); err != nil { + return invoice, err + } + invoice.Terms.Value = lnwire.MilliSatoshi(byteOrder.Uint64(scratch[:])) + + if err := binary.Read(r, byteOrder, &invoice.Terms.State); err != nil { + return invoice, err + } + + if err := binary.Read(r, byteOrder, &invoice.AddIndex); err != nil { + return invoice, err + } + if err := binary.Read(r, byteOrder, &invoice.SettleIndex); err != nil { + return invoice, err + } + if err := binary.Read(r, byteOrder, &invoice.AmtPaid); err != nil { + return invoice, err + } + + invoice.Htlcs, err = deserializeHtlcs(r) + if err != nil { + return Invoice{}, err + } + + return invoice, nil +} + +// deserializeHtlcs reads a list of invoice htlcs from a reader and returns it +// as a flattened byte slice. +func deserializeHtlcs(r io.Reader) ([]byte, error) { + var b bytes.Buffer + _, err := io.Copy(&b, r) + return b.Bytes(), err +} + +// SerializeInvoice serializes an invoice to a writer. +// +// nolint: dupl +func SerializeInvoice(w io.Writer, i *Invoice) error { + creationDateBytes, err := i.CreationDate.MarshalBinary() + if err != nil { + return err + } + + settleDateBytes, err := i.SettleDate.MarshalBinary() + if err != nil { + return err + } + + var fb bytes.Buffer + err = i.Terms.Features.EncodeBase256(&fb) + if err != nil { + return err + } + featureBytes := fb.Bytes() + + preimage := [32]byte(i.Terms.PaymentPreimage) + value := uint64(i.Terms.Value) + cltvDelta := uint32(i.FinalCltvDelta) + expiry := uint64(i.Expiry) + + amtPaid := uint64(i.AmtPaid) + state := uint8(i.Terms.State) + + tlvStream, err := tlv.NewStream( + // Memo and payreq. + tlv.MakePrimitiveRecord(memoType, &i.Memo), + tlv.MakePrimitiveRecord(payReqType, &i.PaymentRequest), + + // Add/settle metadata. + tlv.MakePrimitiveRecord(createTimeType, &creationDateBytes), + tlv.MakePrimitiveRecord(settleTimeType, &settleDateBytes), + tlv.MakePrimitiveRecord(addIndexType, &i.AddIndex), + tlv.MakePrimitiveRecord(settleIndexType, &i.SettleIndex), + + // Terms. + tlv.MakePrimitiveRecord(preimageType, &preimage), + tlv.MakePrimitiveRecord(valueType, &value), + tlv.MakePrimitiveRecord(cltvDeltaType, &cltvDelta), + tlv.MakePrimitiveRecord(expiryType, &expiry), + tlv.MakePrimitiveRecord(paymentAddrType, &i.Terms.PaymentAddr), + tlv.MakePrimitiveRecord(featuresType, &featureBytes), + + // Invoice state. + tlv.MakePrimitiveRecord(invStateType, &state), + tlv.MakePrimitiveRecord(amtPaidType, &amtPaid), + ) + if err != nil { + return err + } + + var b bytes.Buffer + if err = tlvStream.Encode(&b); err != nil { + return err + } + + err = binary.Write(w, byteOrder, uint64(b.Len())) + if err != nil { + return err + } + + if _, err = w.Write(b.Bytes()); err != nil { + return err + } + + return serializeHtlcs(w, i.Htlcs) +} + +// serializeHtlcs writes a serialized list of invoice htlcs into a writer. +func serializeHtlcs(w io.Writer, htlcs []byte) error { + _, err := w.Write(htlcs) + return err +} diff --git a/channeldb/migration12/log.go b/channeldb/migration12/log.go new file mode 100644 index 0000000000..1352e52aea --- /dev/null +++ b/channeldb/migration12/log.go @@ -0,0 +1,14 @@ +package migration12 + +import ( + "github.com/btcsuite/btclog" +) + +// log is a logger that is initialized as disabled. This means the package will +// not perform any logging by default until a logger is set. +var log = btclog.Disabled + +// UseLogger uses a specified Logger to output package logging info. +func UseLogger(logger btclog.Logger) { + log = logger +} diff --git a/channeldb/migration12/migration.go b/channeldb/migration12/migration.go new file mode 100644 index 0000000000..66f988deb2 --- /dev/null +++ b/channeldb/migration12/migration.go @@ -0,0 +1,74 @@ +package migration12 + +import ( + "bytes" + + "github.com/lightningnetwork/lnd/channeldb/kvdb" + "github.com/lightningnetwork/lnd/lnwire" +) + +var emptyFeatures = lnwire.NewFeatureVector(nil, nil) + +// MigrateInvoiceTLV migrates all existing invoice bodies over to be serialized +// in a single TLV stream. In the process, we drop the Receipt field and add +// PaymentAddr and Features to the invoice Terms. +func MigrateInvoiceTLV(tx kvdb.RwTx) error { + log.Infof("Migrating invoice bodies to TLV, " + + "adding payment addresses and feature vectors.") + + invoiceB := tx.ReadWriteBucket(invoiceBucket) + if invoiceB == nil { + return nil + } + + type keyedInvoice struct { + key []byte + invoice Invoice + } + + // Read in all existing invoices using the old format. + var invoices []keyedInvoice + err := invoiceB.ForEach(func(k, v []byte) error { + if v == nil { + return nil + } + + invoiceReader := bytes.NewReader(v) + invoice, err := LegacyDeserializeInvoice(invoiceReader) + if err != nil { + return err + } + + // Insert an empty feature vector on all old payments. + invoice.Terms.Features = emptyFeatures + + invoices = append(invoices, keyedInvoice{ + key: k, + invoice: invoice, + }) + + return nil + }) + if err != nil { + return err + } + + // Write out each one under its original key using TLV. + for _, ki := range invoices { + var b bytes.Buffer + err = SerializeInvoice(&b, &ki.invoice) + if err != nil { + return err + } + + err = invoiceB.Put(ki.key, b.Bytes()) + if err != nil { + return err + } + } + + log.Infof("Migration to TLV invoice bodies, " + + "payment address, and features complete!") + + return nil +} diff --git a/channeldb/migration12/migration_test.go b/channeldb/migration12/migration_test.go new file mode 100644 index 0000000000..ab420fe6da --- /dev/null +++ b/channeldb/migration12/migration_test.go @@ -0,0 +1,206 @@ +package migration12_test + +import ( + "bytes" + "fmt" + "testing" + + "github.com/lightningnetwork/lnd/channeldb/kvdb" + "github.com/lightningnetwork/lnd/channeldb/migration12" + "github.com/lightningnetwork/lnd/channeldb/migtest" + "github.com/lightningnetwork/lnd/lntypes" +) + +var ( + // invoiceBucket is the name of the bucket within the database that + // stores all data related to invoices no matter their final state. + // Within the invoice bucket, each invoice is keyed by its invoice ID + // which is a monotonically increasing uint32. + invoiceBucket = []byte("invoices") + + preimage = lntypes.Preimage{ + 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, + 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, + 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, + 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, + } + + hash = preimage.Hash() + + beforeInvoice0Htlcs = []byte{ + 0x0b, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, + 0x6c, 0x64, 0x09, 0x62, 0x79, 0x65, 0x20, 0x77, 0x6f, 0x72, + 0x6c, 0x64, 0x06, 0x70, 0x61, 0x79, 0x72, 0x65, 0x71, 0x00, + 0x00, 0x00, 0x20, 0x00, 0x00, 0x4e, 0x94, 0x91, 0x4f, 0x00, + 0x00, 0x0f, 0x01, 0x00, 0x00, 0x00, 0x0e, 0x77, 0xc4, 0xd3, + 0xd5, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x20, 0x0f, 0x01, 0x00, + 0x00, 0x00, 0x0e, 0x77, 0xd5, 0xc8, 0x1c, 0x00, 0x00, 0x00, + 0x00, 0xfe, 0x20, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, + 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, + 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, + 0x42, 0x42, 0x42, 0x42, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x03, 0xe8, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xa4, + } + + afterInvoice0Htlcs = []byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, 0x00, 0x0b, + 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c, + 0x64, 0x01, 0x06, 0x70, 0x61, 0x79, 0x72, 0x65, 0x71, 0x02, + 0x0f, 0x01, 0x00, 0x00, 0x00, 0x0e, 0x77, 0xc4, 0xd3, 0xd5, + 0x00, 0x00, 0x00, 0x00, 0xfe, 0x20, 0x03, 0x0f, 0x01, 0x00, + 0x00, 0x00, 0x0e, 0x77, 0xd5, 0xc8, 0x1c, 0x00, 0x00, 0x00, + 0x00, 0xfe, 0x20, 0x04, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x05, 0x05, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x06, 0x06, 0x20, 0x42, 0x42, 0x42, 0x42, 0x42, + 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, + 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, + 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x07, 0x08, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xe8, 0x08, 0x04, 0x00, + 0x00, 0x00, 0x20, 0x09, 0x08, 0x00, 0x00, 0x4e, 0x94, 0x91, + 0x4f, 0x00, 0x00, 0x0a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x0c, + 0x01, 0x03, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0xa4, + } + + testHtlc = []byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x41, + 0x01, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, + 0x03, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, + 0x05, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, + 0x07, 0x04, 0x00, 0x00, 0x00, 0x58, 0x09, 0x08, 0x00, 0x13, + 0xbc, 0xbf, 0x72, 0x4e, 0x1e, 0x00, 0x0b, 0x08, 0x00, 0x17, + 0xaf, 0x4c, 0x22, 0xc4, 0x24, 0x00, 0x0d, 0x04, 0x00, 0x00, + 0x23, 0x1d, 0x0f, 0x01, 0x02, + } + + beforeInvoice1Htlc = append([]byte{ + 0x0b, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, + 0x6c, 0x64, 0x09, 0x62, 0x79, 0x65, 0x20, 0x77, 0x6f, 0x72, + 0x6c, 0x64, 0x06, 0x70, 0x61, 0x79, 0x72, 0x65, 0x71, 0x00, + 0x00, 0x00, 0x20, 0x00, 0x00, 0x4e, 0x94, 0x91, 0x4f, 0x00, + 0x00, 0x0f, 0x01, 0x00, 0x00, 0x00, 0x0e, 0x77, 0xc4, 0xd3, + 0xd5, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x20, 0x0f, 0x01, 0x00, + 0x00, 0x00, 0x0e, 0x77, 0xd5, 0xc8, 0x1c, 0x00, 0x00, 0x00, + 0x00, 0xfe, 0x20, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, + 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, + 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, + 0x42, 0x42, 0x42, 0x42, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x03, 0xe8, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xa4, + }, testHtlc...) + + afterInvoice1Htlc = append([]byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, 0x00, 0x0b, + 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x20, 0x77, 0x6f, 0x72, 0x6c, + 0x64, 0x01, 0x06, 0x70, 0x61, 0x79, 0x72, 0x65, 0x71, 0x02, + 0x0f, 0x01, 0x00, 0x00, 0x00, 0x0e, 0x77, 0xc4, 0xd3, 0xd5, + 0x00, 0x00, 0x00, 0x00, 0xfe, 0x20, 0x03, 0x0f, 0x01, 0x00, + 0x00, 0x00, 0x0e, 0x77, 0xd5, 0xc8, 0x1c, 0x00, 0x00, 0x00, + 0x00, 0xfe, 0x20, 0x04, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x05, 0x05, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x06, 0x06, 0x20, 0x42, 0x42, 0x42, 0x42, 0x42, + 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, + 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, + 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x07, 0x08, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xe8, 0x08, 0x04, 0x00, + 0x00, 0x00, 0x20, 0x09, 0x08, 0x00, 0x00, 0x4e, 0x94, 0x91, + 0x4f, 0x00, 0x00, 0x0a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, 0x00, 0x0c, + 0x01, 0x03, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0xa4, + }, testHtlc...) +) + +type migrationTest struct { + name string + beforeMigration func(kvdb.RwTx) error + afterMigration func(kvdb.RwTx) error +} + +var migrationTests = []migrationTest{ + { + name: "no invoices", + beforeMigration: func(kvdb.RwTx) error { return nil }, + afterMigration: func(kvdb.RwTx) error { return nil }, + }, + { + name: "zero htlcs", + beforeMigration: genBeforeMigration(beforeInvoice0Htlcs), + afterMigration: genAfterMigration(afterInvoice0Htlcs), + }, + { + name: "one htlc", + beforeMigration: genBeforeMigration(beforeInvoice1Htlc), + afterMigration: genAfterMigration(afterInvoice1Htlc), + }, +} + +// genBeforeMigration creates a closure that inserts an invoice serialized under +// the old format under the test payment hash. +func genBeforeMigration(beforeBytes []byte) func(kvdb.RwTx) error { + return func(tx kvdb.RwTx) error { + invoices, err := tx.CreateTopLevelBucket( + invoiceBucket, + ) + if err != nil { + return err + } + + return invoices.Put(hash[:], beforeBytes) + } +} + +// genAfterMigration creates a closure that verifies the tlv invoice migration +// succeeded, but comparing the resulting encoding of the invoice to the +// expected serialization. In addition, the decoded invoice is compared against +// the expected invoice for equality. +func genAfterMigration(afterBytes []byte) func(kvdb.RwTx) error { + return func(tx kvdb.RwTx) error { + invoices := tx.ReadWriteBucket(invoiceBucket) + if invoices == nil { + return fmt.Errorf("invoice bucket not found") + } + + // Fetch the new invoice bytes and check that they match our + // expected serialization. + invoiceBytes := invoices.Get(hash[:]) + if !bytes.Equal(invoiceBytes, afterBytes) { + return fmt.Errorf("invoice bytes mismatch, "+ + "want: %x, got: %x", + invoiceBytes, afterBytes) + } + + return nil + } +} + +// TestTLVInvoiceMigration executes a suite of migration tests for moving +// invoices to use TLV for their bodies. In the process, feature bits and +// payment addresses are added to the invoice while the receipt field is +// dropped. We test a few different invoices with a varying number of HTLCs, as +// well as the case where there are no invoices present. +// +// NOTE: The test vectors each include a receipt that is not present on the +// final struct, but verifies that the field is properly removed. +func TestTLVInvoiceMigration(t *testing.T) { + for _, test := range migrationTests { + test := test + t.Run(test.name, func(t *testing.T) { + migtest.ApplyMigration( + t, + test.beforeMigration, + test.afterMigration, + migration12.MigrateInvoiceTLV, + false, + ) + }) + } +} diff --git a/channeldb/migration13/log.go b/channeldb/migration13/log.go new file mode 100644 index 0000000000..33ec1812af --- /dev/null +++ b/channeldb/migration13/log.go @@ -0,0 +1,14 @@ +package migration13 + +import ( + "github.com/btcsuite/btclog" +) + +// log is a logger that is initialized as disabled. This means the package will +// not perform any logging by default until a logger is set. +var log = btclog.Disabled + +// UseLogger uses a specified Logger to output package logging info. +func UseLogger(logger btclog.Logger) { + log = logger +} diff --git a/channeldb/migration13/migration.go b/channeldb/migration13/migration.go new file mode 100644 index 0000000000..df737d25a6 --- /dev/null +++ b/channeldb/migration13/migration.go @@ -0,0 +1,202 @@ +package migration13 + +import ( + "encoding/binary" + "fmt" + + "github.com/lightningnetwork/lnd/channeldb/kvdb" +) + +var ( + paymentsRootBucket = []byte("payments-root-bucket") + + // paymentCreationInfoKey is a key used in the payment's sub-bucket to + // store the creation info of the payment. + paymentCreationInfoKey = []byte("payment-creation-info") + + // paymentFailInfoKey is a key used in the payment's sub-bucket to + // store information about the reason a payment failed. + paymentFailInfoKey = []byte("payment-fail-info") + + // paymentAttemptInfoKey is a key used in the payment's sub-bucket to + // store the info about the latest attempt that was done for the + // payment in question. + paymentAttemptInfoKey = []byte("payment-attempt-info") + + // paymentSettleInfoKey is a key used in the payment's sub-bucket to + // store the settle info of the payment. + paymentSettleInfoKey = []byte("payment-settle-info") + + // paymentHtlcsBucket is a bucket where we'll store the information + // about the HTLCs that were attempted for a payment. + paymentHtlcsBucket = []byte("payment-htlcs-bucket") + + // htlcAttemptInfoKey is a key used in a HTLC's sub-bucket to store the + // info about the attempt that was done for the HTLC in question. + htlcAttemptInfoKey = []byte("htlc-attempt-info") + + // htlcSettleInfoKey is a key used in a HTLC's sub-bucket to store the + // settle info, if any. + htlcSettleInfoKey = []byte("htlc-settle-info") + + // htlcFailInfoKey is a key used in a HTLC's sub-bucket to store + // failure information, if any. + htlcFailInfoKey = []byte("htlc-fail-info") + + byteOrder = binary.BigEndian +) + +// MigrateMPP migrates the payments to a new structure that accommodates for mpp +// payments. +func MigrateMPP(tx kvdb.RwTx) error { + log.Infof("Migrating payments to mpp structure") + + // Iterate over all payments and store their indexing keys. This is + // needed, because no modifications are allowed inside a Bucket.ForEach + // loop. + paymentsBucket := tx.ReadWriteBucket(paymentsRootBucket) + if paymentsBucket == nil { + return nil + } + + var paymentKeys [][]byte + err := paymentsBucket.ForEach(func(k, v []byte) error { + paymentKeys = append(paymentKeys, k) + return nil + }) + if err != nil { + return err + } + + // With all keys retrieved, start the migration. + for _, k := range paymentKeys { + bucket := paymentsBucket.NestedReadWriteBucket(k) + + // We only expect sub-buckets to be found in + // this top-level bucket. + if bucket == nil { + return fmt.Errorf("non bucket element in " + + "payments bucket") + } + + // Fetch old format creation info. + creationInfo := bucket.Get(paymentCreationInfoKey) + if creationInfo == nil { + return fmt.Errorf("creation info not found") + } + + // Make a copy because bbolt doesn't allow this value to be + // changed in-place. + newCreationInfo := make([]byte, len(creationInfo)) + copy(newCreationInfo, creationInfo) + + // Convert to nano seconds. + timeBytes := newCreationInfo[32+8 : 32+8+8] + time := byteOrder.Uint64(timeBytes) + timeNs := time * 1000000000 + byteOrder.PutUint64(timeBytes, timeNs) + + // Write back new format creation info. + err := bucket.Put(paymentCreationInfoKey, newCreationInfo) + if err != nil { + return err + } + + // No migration needed if there is no attempt stored. + attemptInfo := bucket.Get(paymentAttemptInfoKey) + if attemptInfo == nil { + continue + } + + // Delete attempt info on the payment level. + if err := bucket.Delete(paymentAttemptInfoKey); err != nil { + return err + } + + // Save attempt id for later use. + attemptID := attemptInfo[:8] + + // Discard attempt id. It will become a bucket key in the new + // structure. + attemptInfo = attemptInfo[8:] + + // Append unknown (zero) attempt time. + var zero [8]byte + attemptInfo = append(attemptInfo, zero[:]...) + + // Create bucket that contains all htlcs. + htlcsBucket, err := bucket.CreateBucket(paymentHtlcsBucket) + if err != nil { + return err + } + + // Create an htlc for this attempt. + htlcBucket, err := htlcsBucket.CreateBucket(attemptID) + if err != nil { + return err + } + + // Save migrated attempt info. + err = htlcBucket.Put(htlcAttemptInfoKey, attemptInfo) + if err != nil { + return err + } + + // Migrate settle info. + settleInfo := bucket.Get(paymentSettleInfoKey) + if settleInfo != nil { + // Payment-level settle info can be deleted. + err := bucket.Delete(paymentSettleInfoKey) + if err != nil { + return err + } + + // Append unknown (zero) settle time. + settleInfo = append(settleInfo, zero[:]...) + + // Save settle info. + err = htlcBucket.Put(htlcSettleInfoKey, settleInfo) + if err != nil { + return err + } + + // Migration for settled htlc completed. + continue + } + + // If there is no payment-level failure reason, the payment is + // still in flight and nothing else needs to be migrated. + // Otherwise the payment-level failure reason can remain + // unchanged. + inFlight := bucket.Get(paymentFailInfoKey) == nil + if inFlight { + continue + } + + // The htlc failed. Add htlc fail info with reason unknown. We + // don't have access to the original failure reason anymore. + failInfo := []byte{ + // Fail time unknown. + 0, 0, 0, 0, 0, 0, 0, 0, + + // Zero length wire message. + 0, + + // Failure reason unknown. + 0, + + // Failure source index zero. + 0, 0, 0, 0, + } + + // Save fail info. + err = htlcBucket.Put(htlcFailInfoKey, failInfo) + if err != nil { + return err + } + } + + log.Infof("Migration of payments to mpp structure complete!") + + return nil +} diff --git a/channeldb/migration13/migration_test.go b/channeldb/migration13/migration_test.go new file mode 100644 index 0000000000..d71302fede --- /dev/null +++ b/channeldb/migration13/migration_test.go @@ -0,0 +1,123 @@ +package migration13 + +import ( + "testing" + + "github.com/lightningnetwork/lnd/channeldb/kvdb" + "github.com/lightningnetwork/lnd/channeldb/migtest" +) + +var ( + hex = migtest.Hex + + zeroTime = hex("0000000000000000") + noFailureMessage = hex("00") + failureReasonUnknown = hex("00") + zeroFailureSourceIdx = hex("00000000") + + hash1 = hex("02acee76ebd53d00824410cf6adecad4f50334dac702bd5a2d3ba01b91709f0e") + creationInfoAmt1 = hex("00000000004c4b40") + creationInfoTime1 = hex("000000005e4fb7ab") // 1582282667 (decimal) + creationInfoTimeNano1 = hex("15f565b3cccaee00") // 1582282667000000000 (decimal) + creationInfoPayReq1 = hex("00000000") + attemptInfo1 = hex("2997a72e129fc9d638ef2fa4e233567d808d4f18a4f087637582427962eb3bf800005ce600000000004c4b402102ec12e83eafe27ce6d03bbe0c0de4b79fe2b9934615c8aa7693f73d2e41b089700000000121028c2dd128c7a6c1a0fceb3e3eb5ed55e0a0ae1a939eb786b097322d830d47db75005ca4000001000000005ce600000000004c4b400000000000") + attemptID1 = hex("0000000000000001") + paymentID1 = hex("0000000000000001") + + hash2 = hex("62eb3f0a48f954e495d0c14ac63df04a67cefa59dafdbcd3d5046d1f5647840c") + preimage2 = hex("479593b7d3cbb45beb22d448451a2f3619b2095adfb38f4d92e9886e96534368") + attemptID2 = hex("00000000000003e8") + paymentID2 = hex("0000000000000002") + attemptInfo2 = hex("8de663f9bb4b8d1ebdb496d22dc1cb657a346215607308549f41b01e2adf2ce900005ce600000000005b8d802102ec12e83eafe27ce6d03bbe0c0de4b79fe2b9934615c8aa7693f73d2e41b089700000000121028c2dd128c7a6c1a0fceb3e3eb5ed55e0a0ae1a939eb786b097322d830d47db75005ca4000001000000005ce600000000005b8d8000000000010000000000000008233d281e2cbe01f0b82dd6750967c9233426b98ae6549c696365f57f86f942a3795b8d80") + creationInfoAmt2 = hex("00000000005b8d80") + creationInfoTime2 = hex("000000005e4fb97f") // 1582283135 (decimal) + creationInfoTimeNano2 = hex("15F56620C3C43600") // 1582283135000000000 (decimal) + creationInfoPayReq2 = hex("000000fc6c6e62637274363075317030796c7774367070357674346e377a6a676c39327766397773633939767630307366666e7561376a656d74376d6535373471336b3337346a387373787164717163717a70677370353835357075743937713863747374776b7735796b306a667278736e746e7a6878326a77786a636d3937346c636437327a3564757339717939717371653872336b3578733379367868667366366d6a6e706d717172306661797a677a63336a6b663571787a6c376866787a6666763578667a7679647564327275767974706571787072376868796830726a747574373033333274737774686661616e303773766b6667716b7174667275") + + hash3 = hex("62eb3f0a48f954e495d0c14ac63df04a67cefa59dafdbcd3d5046d1f5647840d") + attemptInfo3 = hex("53ce0a4c1507cc5ea00ec88b76bd43a3978ac13605497030b821af6ce9c110f300005ce600000000006acfc02102ec12e83eafe27ce6d03bbe0c0de4b79fe2b9934615c8aa7693f73d2e41b089700000000121028c2dd128c7a6c1a0fceb3e3eb5ed55e0a0ae1a939eb786b097322d830d47db75005ca4000001000000005ce600000000006acfc000000000010000000000000008233044f235354472318b381fad3e21eb5a58f5099918868b0610e7b7bcb7a4adc96acfc0") + attemptID3 = hex("00000000000003e9") + paymentID3 = hex("0000000000000003") + creationInfoAmt3 = hex("00000000006acfc0") + creationInfoTime3 = hex("000000005e4fb98d") // 1582283149 + creationInfoTimeNano3 = hex("15F56624063B4200") // 1582283149000000000 (decimal) + creationInfoPayReq3 = hex("000000fc6c6e62637274373075317030796c7776327070357674346e377a6a676c39327766397773633939767630307366666e7561376a656d74376d6535373471336b3337346a387373787364717163717a706773703578707a307964663467336572727a656372376b6e7567307474667630327a7665727a72676b70737375376d6d6564617934687973397179397173717774656479336e666c323534787a36787a75763974746767757a647473356e617a7461616a6735667772686438396b336d70753971726d7a6c3779637a306e30666e6e763077753032726632706e64636c393761646c667636376a7a6e7063677477356434366771323571326e32") + + // pre is the data in the payments root bucket in database version 12 format. + pre = map[string]interface{}{ + // A failed payment. + hash1: map[string]interface{}{ + "payment-attempt-info": attemptID1 + attemptInfo1, + "payment-creation-info": hash1 + creationInfoAmt1 + creationInfoTime1 + creationInfoPayReq1, + "payment-fail-info": hex("03"), + "payment-sequence-key": paymentID1, + }, + + // A settled payment. + hash2: map[string]interface{}{ + "payment-attempt-info": attemptID2 + attemptInfo2, + "payment-creation-info": hash2 + creationInfoAmt2 + creationInfoTime2 + creationInfoPayReq2, + "payment-sequence-key": paymentID2, + "payment-settle-info": preimage2, + }, + + // An in-flight payment. + hash3: map[string]interface{}{ + "payment-attempt-info": attemptID3 + attemptInfo3, + "payment-creation-info": hash3 + creationInfoAmt3 + creationInfoTime3 + creationInfoPayReq3, + "payment-sequence-key": paymentID3, + }, + } + + // post is the expected data after migration. + post = map[string]interface{}{ + hash1: map[string]interface{}{ + "payment-creation-info": hash1 + creationInfoAmt1 + creationInfoTimeNano1 + creationInfoPayReq1, + "payment-fail-info": hex("03"), + "payment-htlcs-bucket": map[string]interface{}{ + attemptID1: map[string]interface{}{ + "htlc-attempt-info": attemptInfo1 + zeroTime, + "htlc-fail-info": zeroTime + noFailureMessage + failureReasonUnknown + zeroFailureSourceIdx, + }, + }, + "payment-sequence-key": paymentID1, + }, + hash2: map[string]interface{}{ + "payment-creation-info": hash2 + creationInfoAmt2 + creationInfoTimeNano2 + creationInfoPayReq2, + "payment-htlcs-bucket": map[string]interface{}{ + attemptID2: map[string]interface{}{ + "htlc-attempt-info": attemptInfo2 + zeroTime, + "htlc-settle-info": preimage2 + zeroTime, + }, + }, + "payment-sequence-key": paymentID2, + }, + hash3: map[string]interface{}{ + "payment-creation-info": hash3 + creationInfoAmt3 + creationInfoTimeNano3 + creationInfoPayReq3, + "payment-htlcs-bucket": map[string]interface{}{ + attemptID3: map[string]interface{}{ + "htlc-attempt-info": attemptInfo3 + zeroTime, + }, + }, + "payment-sequence-key": paymentID3, + }, + } +) + +// TestMigrateMpp asserts that the database is properly migrated to the mpp +// payment structure. +func TestMigrateMpp(t *testing.T) { + var paymentsRootBucket = []byte("payments-root-bucket") + + migtest.ApplyMigration( + t, + func(tx kvdb.RwTx) error { + return migtest.RestoreDB(tx, paymentsRootBucket, pre) + }, + func(tx kvdb.RwTx) error { + return migtest.VerifyDB(tx, paymentsRootBucket, post) + }, + MigrateMPP, + false, + ) +} diff --git a/channeldb/migration_01_to_11/addr.go b/channeldb/migration_01_to_11/addr.go new file mode 100644 index 0000000000..2e7def07ca --- /dev/null +++ b/channeldb/migration_01_to_11/addr.go @@ -0,0 +1,221 @@ +package migration_01_to_11 + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "net" + + "github.com/lightningnetwork/lnd/tor" +) + +// addressType specifies the network protocol and version that should be used +// when connecting to a node at a particular address. +type addressType uint8 + +const ( + // tcp4Addr denotes an IPv4 TCP address. + tcp4Addr addressType = 0 + + // tcp6Addr denotes an IPv6 TCP address. + tcp6Addr addressType = 1 + + // v2OnionAddr denotes a version 2 Tor onion service address. + v2OnionAddr addressType = 2 + + // v3OnionAddr denotes a version 3 Tor (prop224) onion service address. + v3OnionAddr addressType = 3 +) + +// encodeTCPAddr serializes a TCP address into its compact raw bytes +// representation. +func encodeTCPAddr(w io.Writer, addr *net.TCPAddr) error { + var ( + addrType byte + ip []byte + ) + + if addr.IP.To4() != nil { + addrType = byte(tcp4Addr) + ip = addr.IP.To4() + } else { + addrType = byte(tcp6Addr) + ip = addr.IP.To16() + } + + if ip == nil { + return fmt.Errorf("unable to encode IP %v", addr.IP) + } + + if _, err := w.Write([]byte{addrType}); err != nil { + return err + } + + if _, err := w.Write(ip); err != nil { + return err + } + + var port [2]byte + byteOrder.PutUint16(port[:], uint16(addr.Port)) + if _, err := w.Write(port[:]); err != nil { + return err + } + + return nil +} + +// encodeOnionAddr serializes an onion address into its compact raw bytes +// representation. +func encodeOnionAddr(w io.Writer, addr *tor.OnionAddr) error { + var suffixIndex int + hostLen := len(addr.OnionService) + switch hostLen { + case tor.V2Len: + if _, err := w.Write([]byte{byte(v2OnionAddr)}); err != nil { + return err + } + suffixIndex = tor.V2Len - tor.OnionSuffixLen + case tor.V3Len: + if _, err := w.Write([]byte{byte(v3OnionAddr)}); err != nil { + return err + } + suffixIndex = tor.V3Len - tor.OnionSuffixLen + default: + return errors.New("unknown onion service length") + } + + suffix := addr.OnionService[suffixIndex:] + if suffix != tor.OnionSuffix { + return fmt.Errorf("invalid suffix \"%v\"", suffix) + } + + host, err := tor.Base32Encoding.DecodeString( + addr.OnionService[:suffixIndex], + ) + if err != nil { + return err + } + + // Sanity check the decoded length. + switch { + case hostLen == tor.V2Len && len(host) != tor.V2DecodedLen: + return fmt.Errorf("onion service %v decoded to invalid host %x", + addr.OnionService, host) + + case hostLen == tor.V3Len && len(host) != tor.V3DecodedLen: + return fmt.Errorf("onion service %v decoded to invalid host %x", + addr.OnionService, host) + } + + if _, err := w.Write(host); err != nil { + return err + } + + var port [2]byte + byteOrder.PutUint16(port[:], uint16(addr.Port)) + if _, err := w.Write(port[:]); err != nil { + return err + } + + return nil +} + +// deserializeAddr reads the serialized raw representation of an address and +// deserializes it into the actual address. This allows us to avoid address +// resolution within the channeldb package. +func deserializeAddr(r io.Reader) (net.Addr, error) { + var addrType [1]byte + if _, err := r.Read(addrType[:]); err != nil { + return nil, err + } + + var address net.Addr + switch addressType(addrType[0]) { + case tcp4Addr: + var ip [4]byte + if _, err := r.Read(ip[:]); err != nil { + return nil, err + } + + var port [2]byte + if _, err := r.Read(port[:]); err != nil { + return nil, err + } + + address = &net.TCPAddr{ + IP: net.IP(ip[:]), + Port: int(binary.BigEndian.Uint16(port[:])), + } + case tcp6Addr: + var ip [16]byte + if _, err := r.Read(ip[:]); err != nil { + return nil, err + } + + var port [2]byte + if _, err := r.Read(port[:]); err != nil { + return nil, err + } + + address = &net.TCPAddr{ + IP: net.IP(ip[:]), + Port: int(binary.BigEndian.Uint16(port[:])), + } + case v2OnionAddr: + var h [tor.V2DecodedLen]byte + if _, err := r.Read(h[:]); err != nil { + return nil, err + } + + var p [2]byte + if _, err := r.Read(p[:]); err != nil { + return nil, err + } + + onionService := tor.Base32Encoding.EncodeToString(h[:]) + onionService += tor.OnionSuffix + port := int(binary.BigEndian.Uint16(p[:])) + + address = &tor.OnionAddr{ + OnionService: onionService, + Port: port, + } + case v3OnionAddr: + var h [tor.V3DecodedLen]byte + if _, err := r.Read(h[:]); err != nil { + return nil, err + } + + var p [2]byte + if _, err := r.Read(p[:]); err != nil { + return nil, err + } + + onionService := tor.Base32Encoding.EncodeToString(h[:]) + onionService += tor.OnionSuffix + port := int(binary.BigEndian.Uint16(p[:])) + + address = &tor.OnionAddr{ + OnionService: onionService, + Port: port, + } + default: + return nil, ErrUnknownAddressType + } + + return address, nil +} + +// serializeAddr serializes an address into its raw bytes representation so that +// it can be deserialized without requiring address resolution. +func serializeAddr(w io.Writer, address net.Addr) error { + switch addr := address.(type) { + case *net.TCPAddr: + return encodeTCPAddr(w, addr) + case *tor.OnionAddr: + return encodeOnionAddr(w, addr) + default: + return ErrUnknownAddressType + } +} diff --git a/channeldb/migration_01_to_11/channel.go b/channeldb/migration_01_to_11/channel.go new file mode 100644 index 0000000000..e67c0c69ed --- /dev/null +++ b/channeldb/migration_01_to_11/channel.go @@ -0,0 +1,751 @@ +package migration_01_to_11 + +import ( + "errors" + "fmt" + "io" + "strconv" + "strings" + "sync" + + "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" + "github.com/lightningnetwork/lnd/keychain" + "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/shachain" +) + +var ( + // closedChannelBucket stores summarization information concerning + // previously open, but now closed channels. + closedChannelBucket = []byte("closed-chan-bucket") + + // openChanBucket stores all the currently open channels. This bucket + // has a second, nested bucket which is keyed by a node's ID. Within + // that node ID bucket, all attributes required to track, update, and + // close a channel are stored. + // + // openChan -> nodeID -> chanPoint + // + // TODO(roasbeef): flesh out comment + openChannelBucket = []byte("open-chan-bucket") +) + +// ChannelType is an enum-like type that describes one of several possible +// channel types. Each open channel is associated with a particular type as the +// channel type may determine how higher level operations are conducted such as +// fee negotiation, channel closing, the format of HTLCs, etc. +// TODO(roasbeef): split up per-chain? +type ChannelType uint8 + +const ( + // NOTE: iota isn't used here for this enum needs to be stable + // long-term as it will be persisted to the database. + + // SingleFunder represents a channel wherein one party solely funds the + // entire capacity of the channel. + SingleFunder ChannelType = 0 +) + +// ChannelConstraints represents a set of constraints meant to allow a node to +// limit their exposure, enact flow control and ensure that all HTLCs are +// economically relevant. This struct will be mirrored for both sides of the +// channel, as each side will enforce various constraints that MUST be adhered +// to for the life time of the channel. The parameters for each of these +// constraints are static for the duration of the channel, meaning the channel +// must be torn down for them to change. +type ChannelConstraints struct { + // DustLimit is the threshold (in satoshis) below which any outputs + // should be trimmed. When an output is trimmed, it isn't materialized + // as an actual output, but is instead burned to miner's fees. + DustLimit btcutil.Amount + + // ChanReserve is an absolute reservation on the channel for the + // owner of this set of constraints. This means that the current + // settled balance for this node CANNOT dip below the reservation + // amount. This acts as a defense against costless attacks when + // either side no longer has any skin in the game. + ChanReserve btcutil.Amount + + // MaxPendingAmount is the maximum pending HTLC value that the + // owner of these constraints can offer the remote node at a + // particular time. + MaxPendingAmount lnwire.MilliSatoshi + + // MinHTLC is the minimum HTLC value that the owner of these + // constraints can offer the remote node. If any HTLCs below this + // amount are offered, then the HTLC will be rejected. This, in + // tandem with the dust limit allows a node to regulate the + // smallest HTLC that it deems economically relevant. + MinHTLC lnwire.MilliSatoshi + + // MaxAcceptedHtlcs is the maximum number of HTLCs that the owner of + // this set of constraints can offer the remote node. This allows each + // node to limit their over all exposure to HTLCs that may need to be + // acted upon in the case of a unilateral channel closure or a contract + // breach. + MaxAcceptedHtlcs uint16 + + // CsvDelay is the relative time lock delay expressed in blocks. Any + // settled outputs that pay to the owner of this channel configuration + // MUST ensure that the delay branch uses this value as the relative + // time lock. Similarly, any HTLC's offered by this node should use + // this value as well. + CsvDelay uint16 +} + +// ChannelConfig is a struct that houses the various configuration opens for +// channels. Each side maintains an instance of this configuration file as it +// governs: how the funding and commitment transaction to be created, the +// nature of HTLC's allotted, the keys to be used for delivery, and relative +// time lock parameters. +type ChannelConfig struct { + // ChannelConstraints is the set of constraints that must be upheld for + // the duration of the channel for the owner of this channel + // configuration. Constraints govern a number of flow control related + // parameters, also including the smallest HTLC that will be accepted + // by a participant. + ChannelConstraints + + // MultiSigKey is the key to be used within the 2-of-2 output script + // for the owner of this channel config. + MultiSigKey keychain.KeyDescriptor + + // RevocationBasePoint is the base public key to be used when deriving + // revocation keys for the remote node's commitment transaction. This + // will be combined along with a per commitment secret to derive a + // unique revocation key for each state. + RevocationBasePoint keychain.KeyDescriptor + + // PaymentBasePoint is the base public key to be used when deriving + // the key used within the non-delayed pay-to-self output on the + // commitment transaction for a node. This will be combined with a + // tweak derived from the per-commitment point to ensure unique keys + // for each commitment transaction. + PaymentBasePoint keychain.KeyDescriptor + + // DelayBasePoint is the base public key to be used when deriving the + // key used within the delayed pay-to-self output on the commitment + // transaction for a node. This will be combined with a tweak derived + // from the per-commitment point to ensure unique keys for each + // commitment transaction. + DelayBasePoint keychain.KeyDescriptor + + // HtlcBasePoint is the base public key to be used when deriving the + // local HTLC key. The derived key (combined with the tweak derived + // from the per-commitment point) is used within the "to self" clause + // within any HTLC output scripts. + HtlcBasePoint keychain.KeyDescriptor +} + +// ChannelCommitment is a snapshot of the commitment state at a particular +// point in the commitment chain. With each state transition, a snapshot of the +// current state along with all non-settled HTLCs are recorded. These snapshots +// detail the state of the _remote_ party's commitment at a particular state +// number. For ourselves (the local node) we ONLY store our most recent +// (unrevoked) state for safety purposes. +type ChannelCommitment struct { + // CommitHeight is the update number that this ChannelDelta represents + // the total number of commitment updates to this point. This can be + // viewed as sort of a "commitment height" as this number is + // monotonically increasing. + CommitHeight uint64 + + // LocalLogIndex is the cumulative log index index of the local node at + // this point in the commitment chain. This value will be incremented + // for each _update_ added to the local update log. + LocalLogIndex uint64 + + // LocalHtlcIndex is the current local running HTLC index. This value + // will be incremented for each outgoing HTLC the local node offers. + LocalHtlcIndex uint64 + + // RemoteLogIndex is the cumulative log index index of the remote node + // at this point in the commitment chain. This value will be + // incremented for each _update_ added to the remote update log. + RemoteLogIndex uint64 + + // RemoteHtlcIndex is the current remote running HTLC index. This value + // will be incremented for each outgoing HTLC the remote node offers. + RemoteHtlcIndex uint64 + + // LocalBalance is the current available settled balance within the + // channel directly spendable by us. + LocalBalance lnwire.MilliSatoshi + + // RemoteBalance is the current available settled balance within the + // channel directly spendable by the remote node. + RemoteBalance lnwire.MilliSatoshi + + // CommitFee is the amount calculated to be paid in fees for the + // current set of commitment transactions. The fee amount is persisted + // with the channel in order to allow the fee amount to be removed and + // recalculated with each channel state update, including updates that + // happen after a system restart. + CommitFee btcutil.Amount + + // FeePerKw is the min satoshis/kilo-weight that should be paid within + // the commitment transaction for the entire duration of the channel's + // lifetime. This field may be updated during normal operation of the + // channel as on-chain conditions change. + // + // TODO(halseth): make this SatPerKWeight. Cannot be done atm because + // this will cause the import cycle lnwallet<->channeldb. Fee + // estimation stuff should be in its own package. + FeePerKw btcutil.Amount + + // CommitTx is the latest version of the commitment state, broadcast + // able by us. + CommitTx *wire.MsgTx + + // CommitSig is one half of the signature required to fully complete + // the script for the commitment transaction above. This is the + // signature signed by the remote party for our version of the + // commitment transactions. + CommitSig []byte + + // Htlcs is the set of HTLC's that are pending at this particular + // commitment height. + Htlcs []HTLC + + // TODO(roasbeef): pending commit pointer? + // * lets just walk through +} + +// ChannelStatus is a bit vector used to indicate whether an OpenChannel is in +// the default usable state, or a state where it shouldn't be used. +type ChannelStatus uint8 + +var ( + // ChanStatusDefault is the normal state of an open channel. + ChanStatusDefault ChannelStatus + + // ChanStatusBorked indicates that the channel has entered an + // irreconcilable state, triggered by a state desynchronization or + // channel breach. Channels in this state should never be added to the + // htlc switch. + ChanStatusBorked ChannelStatus = 1 + + // ChanStatusCommitBroadcasted indicates that a commitment for this + // channel has been broadcasted. + ChanStatusCommitBroadcasted ChannelStatus = 1 << 1 + + // ChanStatusLocalDataLoss indicates that we have lost channel state + // for this channel, and broadcasting our latest commitment might be + // considered a breach. + // + // TODO(halseh): actually enforce that we are not force closing such a + // channel. + ChanStatusLocalDataLoss ChannelStatus = 1 << 2 + + // ChanStatusRestored is a status flag that signals that the channel + // has been restored, and doesn't have all the fields a typical channel + // will have. + ChanStatusRestored ChannelStatus = 1 << 3 +) + +// chanStatusStrings maps a ChannelStatus to a human friendly string that +// describes that status. +var chanStatusStrings = map[ChannelStatus]string{ + ChanStatusDefault: "ChanStatusDefault", + ChanStatusBorked: "ChanStatusBorked", + ChanStatusCommitBroadcasted: "ChanStatusCommitBroadcasted", + ChanStatusLocalDataLoss: "ChanStatusLocalDataLoss", + ChanStatusRestored: "ChanStatusRestored", +} + +// orderedChanStatusFlags is an in-order list of all that channel status flags. +var orderedChanStatusFlags = []ChannelStatus{ + ChanStatusDefault, + ChanStatusBorked, + ChanStatusCommitBroadcasted, + ChanStatusLocalDataLoss, + ChanStatusRestored, +} + +// String returns a human-readable representation of the ChannelStatus. +func (c ChannelStatus) String() string { + // If no flags are set, then this is the default case. + if c == 0 { + return chanStatusStrings[ChanStatusDefault] + } + + // Add individual bit flags. + statusStr := "" + for _, flag := range orderedChanStatusFlags { + if c&flag == flag { + statusStr += chanStatusStrings[flag] + "|" + c -= flag + } + } + + // Remove anything to the right of the final bar, including it as well. + statusStr = strings.TrimRight(statusStr, "|") + + // Add any remaining flags which aren't accounted for as hex. + if c != 0 { + statusStr += "|0x" + strconv.FormatUint(uint64(c), 16) + } + + // If this was purely an unknown flag, then remove the extra bar at the + // start of the string. + statusStr = strings.TrimLeft(statusStr, "|") + + return statusStr +} + +// OpenChannel encapsulates the persistent and dynamic state of an open channel +// with a remote node. An open channel supports several options for on-disk +// serialization depending on the exact context. Full (upon channel creation) +// state commitments, and partial (due to a commitment update) writes are +// supported. Each partial write due to a state update appends the new update +// to an on-disk log, which can then subsequently be queried in order to +// "time-travel" to a prior state. +type OpenChannel struct { + // ChanType denotes which type of channel this is. + ChanType ChannelType + + // ChainHash is a hash which represents the blockchain that this + // channel will be opened within. This value is typically the genesis + // hash. In the case that the original chain went through a contentious + // hard-fork, then this value will be tweaked using the unique fork + // point on each branch. + ChainHash chainhash.Hash + + // FundingOutpoint is the outpoint of the final funding transaction. + // This value uniquely and globally identifies the channel within the + // target blockchain as specified by the chain hash parameter. + FundingOutpoint wire.OutPoint + + // ShortChannelID encodes the exact location in the chain in which the + // channel was initially confirmed. This includes: the block height, + // transaction index, and the output within the target transaction. + ShortChannelID lnwire.ShortChannelID + + // IsPending indicates whether a channel's funding transaction has been + // confirmed. + IsPending bool + + // IsInitiator is a bool which indicates if we were the original + // initiator for the channel. This value may affect how higher levels + // negotiate fees, or close the channel. + IsInitiator bool + + // FundingBroadcastHeight is the height in which the funding + // transaction was broadcast. This value can be used by higher level + // sub-systems to determine if a channel is stale and/or should have + // been confirmed before a certain height. + FundingBroadcastHeight uint32 + + // NumConfsRequired is the number of confirmations a channel's funding + // transaction must have received in order to be considered available + // for normal transactional use. + NumConfsRequired uint16 + + // ChannelFlags holds the flags that were sent as part of the + // open_channel message. + ChannelFlags lnwire.FundingFlag + + // IdentityPub is the identity public key of the remote node this + // channel has been established with. + IdentityPub *btcec.PublicKey + + // Capacity is the total capacity of this channel. + Capacity btcutil.Amount + + // TotalMSatSent is the total number of milli-satoshis we've sent + // within this channel. + TotalMSatSent lnwire.MilliSatoshi + + // TotalMSatReceived is the total number of milli-satoshis we've + // received within this channel. + TotalMSatReceived lnwire.MilliSatoshi + + // LocalChanCfg is the channel configuration for the local node. + LocalChanCfg ChannelConfig + + // RemoteChanCfg is the channel configuration for the remote node. + RemoteChanCfg ChannelConfig + + // LocalCommitment is the current local commitment state for the local + // party. This is stored distinct from the state of the remote party + // as there are certain asymmetric parameters which affect the + // structure of each commitment. + LocalCommitment ChannelCommitment + + // RemoteCommitment is the current remote commitment state for the + // remote party. This is stored distinct from the state of the local + // party as there are certain asymmetric parameters which affect the + // structure of each commitment. + RemoteCommitment ChannelCommitment + + // RemoteCurrentRevocation is the current revocation for their + // commitment transaction. However, since this the derived public key, + // we don't yet have the private key so we aren't yet able to verify + // that it's actually in the hash chain. + RemoteCurrentRevocation *btcec.PublicKey + + // RemoteNextRevocation is the revocation key to be used for the *next* + // commitment transaction we create for the local node. Within the + // specification, this value is referred to as the + // per-commitment-point. + RemoteNextRevocation *btcec.PublicKey + + // RevocationProducer is used to generate the revocation in such a way + // that remote side might store it efficiently and have the ability to + // restore the revocation by index if needed. Current implementation of + // secret producer is shachain producer. + RevocationProducer shachain.Producer + + // RevocationStore is used to efficiently store the revocations for + // previous channels states sent to us by remote side. Current + // implementation of secret store is shachain store. + RevocationStore shachain.Store + + // FundingTxn is the transaction containing this channel's funding + // outpoint. Upon restarts, this txn will be rebroadcast if the channel + // is found to be pending. + // + // NOTE: This value will only be populated for single-funder channels + // for which we are the initiator. + FundingTxn *wire.MsgTx + + // TODO(roasbeef): eww + Db *DB + + // TODO(roasbeef): just need to store local and remote HTLC's? + + sync.RWMutex +} + +// ShortChanID returns the current ShortChannelID of this channel. +func (c *OpenChannel) ShortChanID() lnwire.ShortChannelID { + c.RLock() + defer c.RUnlock() + + return c.ShortChannelID +} + +// HTLC is the on-disk representation of a hash time-locked contract. HTLCs are +// contained within ChannelDeltas which encode the current state of the +// commitment between state updates. +// +// TODO(roasbeef): save space by using smaller ints at tail end? +type HTLC struct { + // Signature is the signature for the second level covenant transaction + // for this HTLC. The second level transaction is a timeout tx in the + // case that this is an outgoing HTLC, and a success tx in the case + // that this is an incoming HTLC. + // + // TODO(roasbeef): make [64]byte instead? + Signature []byte + + // RHash is the payment hash of the HTLC. + RHash [32]byte + + // Amt is the amount of milli-satoshis this HTLC escrows. + Amt lnwire.MilliSatoshi + + // RefundTimeout is the absolute timeout on the HTLC that the sender + // must wait before reclaiming the funds in limbo. + RefundTimeout uint32 + + // OutputIndex is the output index for this particular HTLC output + // within the commitment transaction. + OutputIndex int32 + + // Incoming denotes whether we're the receiver or the sender of this + // HTLC. + Incoming bool + + // OnionBlob is an opaque blob which is used to complete multi-hop + // routing. + OnionBlob []byte + + // HtlcIndex is the HTLC counter index of this active, outstanding + // HTLC. This differs from the LogIndex, as the HtlcIndex is only + // incremented for each offered HTLC, while they LogIndex is + // incremented for each update (includes settle+fail). + HtlcIndex uint64 + + // LogIndex is the cumulative log index of this HTLC. This differs + // from the HtlcIndex as this will be incremented for each new log + // update added. + LogIndex uint64 +} + +// CircuitKey is used by a channel to uniquely identify the HTLCs it receives +// from the switch, and is used to purge our in-memory state of HTLCs that have +// already been processed by a link. Two list of CircuitKeys are included in +// each CommitDiff to allow a link to determine which in-memory htlcs directed +// the opening and closing of circuits in the switch's circuit map. +type CircuitKey struct { + // ChanID is the short chanid indicating the HTLC's origin. + // + // NOTE: It is fine for this value to be blank, as this indicates a + // locally-sourced payment. + ChanID lnwire.ShortChannelID + + // HtlcID is the unique htlc index predominately assigned by links, + // though can also be assigned by switch in the case of locally-sourced + // payments. + HtlcID uint64 +} + +// String returns a string representation of the CircuitKey. +func (k CircuitKey) String() string { + return fmt.Sprintf("(Chan ID=%s, HTLC ID=%d)", k.ChanID, k.HtlcID) +} + +// ClosureType is an enum like structure that details exactly _how_ a channel +// was closed. Three closure types are currently possible: none, cooperative, +// local force close, remote force close, and (remote) breach. +type ClosureType uint8 + +const ( + // RemoteForceClose indicates that the remote peer has unilaterally + // broadcast their current commitment state on-chain. + RemoteForceClose ClosureType = 4 +) + +// ChannelCloseSummary contains the final state of a channel at the point it +// was closed. Once a channel is closed, all the information pertaining to that +// channel within the openChannelBucket is deleted, and a compact summary is +// put in place instead. +type ChannelCloseSummary struct { + // ChanPoint is the outpoint for this channel's funding transaction, + // and is used as a unique identifier for the channel. + ChanPoint wire.OutPoint + + // ShortChanID encodes the exact location in the chain in which the + // channel was initially confirmed. This includes: the block height, + // transaction index, and the output within the target transaction. + ShortChanID lnwire.ShortChannelID + + // ChainHash is the hash of the genesis block that this channel resides + // within. + ChainHash chainhash.Hash + + // ClosingTXID is the txid of the transaction which ultimately closed + // this channel. + ClosingTXID chainhash.Hash + + // RemotePub is the public key of the remote peer that we formerly had + // a channel with. + RemotePub *btcec.PublicKey + + // Capacity was the total capacity of the channel. + Capacity btcutil.Amount + + // CloseHeight is the height at which the funding transaction was + // spent. + CloseHeight uint32 + + // SettledBalance is our total balance settled balance at the time of + // channel closure. This _does not_ include the sum of any outputs that + // have been time-locked as a result of the unilateral channel closure. + SettledBalance btcutil.Amount + + // TimeLockedBalance is the sum of all the time-locked outputs at the + // time of channel closure. If we triggered the force closure of this + // channel, then this value will be non-zero if our settled output is + // above the dust limit. If we were on the receiving side of a channel + // force closure, then this value will be non-zero if we had any + // outstanding outgoing HTLC's at the time of channel closure. + TimeLockedBalance btcutil.Amount + + // CloseType details exactly _how_ the channel was closed. Five closure + // types are possible: cooperative, local force, remote force, breach + // and funding canceled. + CloseType ClosureType + + // IsPending indicates whether this channel is in the 'pending close' + // state, which means the channel closing transaction has been + // confirmed, but not yet been fully resolved. In the case of a channel + // that has been cooperatively closed, it will go straight into the + // fully resolved state as soon as the closing transaction has been + // confirmed. However, for channels that have been force closed, they'll + // stay marked as "pending" until _all_ the pending funds have been + // swept. + IsPending bool + + // RemoteCurrentRevocation is the current revocation for their + // commitment transaction. However, since this is the derived public key, + // we don't yet have the private key so we aren't yet able to verify + // that it's actually in the hash chain. + RemoteCurrentRevocation *btcec.PublicKey + + // RemoteNextRevocation is the revocation key to be used for the *next* + // commitment transaction we create for the local node. Within the + // specification, this value is referred to as the + // per-commitment-point. + RemoteNextRevocation *btcec.PublicKey + + // LocalChanCfg is the channel configuration for the local node. + LocalChanConfig ChannelConfig + + // LastChanSyncMsg is the ChannelReestablish message for this channel + // for the state at the point where it was closed. + LastChanSyncMsg *lnwire.ChannelReestablish +} + +func serializeChannelCloseSummary(w io.Writer, cs *ChannelCloseSummary) error { + err := WriteElements(w, + cs.ChanPoint, cs.ShortChanID, cs.ChainHash, cs.ClosingTXID, + cs.CloseHeight, cs.RemotePub, cs.Capacity, cs.SettledBalance, + cs.TimeLockedBalance, cs.CloseType, cs.IsPending, + ) + if err != nil { + return err + } + + // If this is a close channel summary created before the addition of + // the new fields, then we can exit here. + if cs.RemoteCurrentRevocation == nil { + return WriteElements(w, false) + } + + // If fields are present, write boolean to indicate this, and continue. + if err := WriteElements(w, true); err != nil { + return err + } + + if err := WriteElements(w, cs.RemoteCurrentRevocation); err != nil { + return err + } + + if err := writeChanConfig(w, &cs.LocalChanConfig); err != nil { + return err + } + + // The RemoteNextRevocation field is optional, as it's possible for a + // channel to be closed before we learn of the next unrevoked + // revocation point for the remote party. Write a boolen indicating + // whether this field is present or not. + if err := WriteElements(w, cs.RemoteNextRevocation != nil); err != nil { + return err + } + + // Write the field, if present. + if cs.RemoteNextRevocation != nil { + if err = WriteElements(w, cs.RemoteNextRevocation); err != nil { + return err + } + } + + // Write whether the channel sync message is present. + if err := WriteElements(w, cs.LastChanSyncMsg != nil); err != nil { + return err + } + + // Write the channel sync message, if present. + if cs.LastChanSyncMsg != nil { + if err := WriteElements(w, cs.LastChanSyncMsg); err != nil { + return err + } + } + + return nil +} + +func deserializeCloseChannelSummary(r io.Reader) (*ChannelCloseSummary, error) { + c := &ChannelCloseSummary{} + + err := ReadElements(r, + &c.ChanPoint, &c.ShortChanID, &c.ChainHash, &c.ClosingTXID, + &c.CloseHeight, &c.RemotePub, &c.Capacity, &c.SettledBalance, + &c.TimeLockedBalance, &c.CloseType, &c.IsPending, + ) + if err != nil { + return nil, err + } + + // We'll now check to see if the channel close summary was encoded with + // any of the additional optional fields. + var hasNewFields bool + err = ReadElements(r, &hasNewFields) + if err != nil { + return nil, err + } + + // If fields are not present, we can return. + if !hasNewFields { + return c, nil + } + + // Otherwise read the new fields. + if err := ReadElements(r, &c.RemoteCurrentRevocation); err != nil { + return nil, err + } + + if err := readChanConfig(r, &c.LocalChanConfig); err != nil { + return nil, err + } + + // Finally, we'll attempt to read the next unrevoked commitment point + // for the remote party. If we closed the channel before receiving a + // funding locked message then this might not be present. A boolean + // indicating whether the field is present will come first. + var hasRemoteNextRevocation bool + err = ReadElements(r, &hasRemoteNextRevocation) + if err != nil { + return nil, err + } + + // If this field was written, read it. + if hasRemoteNextRevocation { + err = ReadElements(r, &c.RemoteNextRevocation) + if err != nil { + return nil, err + } + } + + // Check if we have a channel sync message to read. + var hasChanSyncMsg bool + err = ReadElements(r, &hasChanSyncMsg) + if err == io.EOF { + return c, nil + } else if err != nil { + return nil, err + } + + // If a chan sync message is present, read it. + if hasChanSyncMsg { + // We must pass in reference to a lnwire.Message for the codec + // to support it. + var msg lnwire.Message + if err := ReadElements(r, &msg); err != nil { + return nil, err + } + + chanSync, ok := msg.(*lnwire.ChannelReestablish) + if !ok { + return nil, errors.New("unable cast db Message to " + + "ChannelReestablish") + } + c.LastChanSyncMsg = chanSync + } + + return c, nil +} + +func writeChanConfig(b io.Writer, c *ChannelConfig) error { + return WriteElements(b, + c.DustLimit, c.MaxPendingAmount, c.ChanReserve, c.MinHTLC, + c.MaxAcceptedHtlcs, c.CsvDelay, c.MultiSigKey, + c.RevocationBasePoint, c.PaymentBasePoint, c.DelayBasePoint, + c.HtlcBasePoint, + ) +} + +func readChanConfig(b io.Reader, c *ChannelConfig) error { + return ReadElements(b, + &c.DustLimit, &c.MaxPendingAmount, &c.ChanReserve, + &c.MinHTLC, &c.MaxAcceptedHtlcs, &c.CsvDelay, + &c.MultiSigKey, &c.RevocationBasePoint, + &c.PaymentBasePoint, &c.DelayBasePoint, + &c.HtlcBasePoint, + ) +} diff --git a/channeldb/migration_01_to_11/channel_test.go b/channeldb/migration_01_to_11/channel_test.go new file mode 100644 index 0000000000..1380828e98 --- /dev/null +++ b/channeldb/migration_01_to_11/channel_test.go @@ -0,0 +1,221 @@ +package migration_01_to_11 + +import ( + "bytes" + "io/ioutil" + "math/rand" + "os" + + "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" + _ "github.com/btcsuite/btcwallet/walletdb/bdb" + "github.com/lightningnetwork/lnd/keychain" + "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/shachain" +) + +var ( + key = [chainhash.HashSize]byte{ + 0x81, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda, + 0x68, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17, + 0xd, 0xe7, 0x93, 0xe4, 0xb7, 0x25, 0xb8, 0x4d, + 0x1e, 0xb, 0x4c, 0xf9, 0x9e, 0xc5, 0x8c, 0xe9, + } + rev = [chainhash.HashSize]byte{ + 0x51, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda, + 0x48, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17, + 0x2d, 0xe7, 0x93, 0xe4, + } + testTx = &wire.MsgTx{ + Version: 1, + TxIn: []*wire.TxIn{ + { + PreviousOutPoint: wire.OutPoint{ + Hash: chainhash.Hash{}, + Index: 0xffffffff, + }, + SignatureScript: []byte{0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62}, + Sequence: 0xffffffff, + }, + }, + TxOut: []*wire.TxOut{ + { + Value: 5000000000, + PkScript: []byte{ + 0x41, // OP_DATA_65 + 0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5, + 0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42, + 0x81, 0xbe, 0x98, 0x8e, 0x2d, 0xa0, 0xb6, 0xc1, + 0xc6, 0xa5, 0x9d, 0xc2, 0x26, 0xc2, 0x86, 0x24, + 0xe1, 0x81, 0x75, 0xe8, 0x51, 0xc9, 0x6b, 0x97, + 0x3d, 0x81, 0xb0, 0x1c, 0xc3, 0x1f, 0x04, 0x78, + 0x34, 0xbc, 0x06, 0xd6, 0xd6, 0xed, 0xf6, 0x20, + 0xd1, 0x84, 0x24, 0x1a, 0x6a, 0xed, 0x8b, 0x63, + 0xa6, // 65-byte signature + 0xac, // OP_CHECKSIG + }, + }, + }, + LockTime: 5, + } + privKey, pubKey = btcec.PrivKeyFromBytes(btcec.S256(), key[:]) +) + +// makeTestDB creates a new instance of the ChannelDB for testing purposes. A +// callback which cleans up the created temporary directories is also returned +// and intended to be executed after the test completes. +func makeTestDB() (*DB, func(), error) { + // First, create a temporary directory to be used for the duration of + // this test. + tempDirName, err := ioutil.TempDir("", "channeldb") + if err != nil { + return nil, nil, err + } + + // Next, create channeldb for the first time. + cdb, err := Open(tempDirName) + if err != nil { + return nil, nil, err + } + + cleanUp := func() { + cdb.Close() + os.RemoveAll(tempDirName) + } + + return cdb, cleanUp, nil +} + +func createTestChannelState(cdb *DB) (*OpenChannel, error) { + // Simulate 1000 channel updates. + producer, err := shachain.NewRevocationProducerFromBytes(key[:]) + if err != nil { + return nil, err + } + store := shachain.NewRevocationStore() + for i := 0; i < 1; i++ { + preImage, err := producer.AtIndex(uint64(i)) + if err != nil { + return nil, err + } + + if err := store.AddNextEntry(preImage); err != nil { + return nil, err + } + } + + localCfg := ChannelConfig{ + ChannelConstraints: ChannelConstraints{ + DustLimit: btcutil.Amount(rand.Int63()), + MaxPendingAmount: lnwire.MilliSatoshi(rand.Int63()), + ChanReserve: btcutil.Amount(rand.Int63()), + MinHTLC: lnwire.MilliSatoshi(rand.Int63()), + MaxAcceptedHtlcs: uint16(rand.Int31()), + CsvDelay: uint16(rand.Int31()), + }, + MultiSigKey: keychain.KeyDescriptor{ + PubKey: privKey.PubKey(), + }, + RevocationBasePoint: keychain.KeyDescriptor{ + PubKey: privKey.PubKey(), + }, + PaymentBasePoint: keychain.KeyDescriptor{ + PubKey: privKey.PubKey(), + }, + DelayBasePoint: keychain.KeyDescriptor{ + PubKey: privKey.PubKey(), + }, + HtlcBasePoint: keychain.KeyDescriptor{ + PubKey: privKey.PubKey(), + }, + } + remoteCfg := ChannelConfig{ + ChannelConstraints: ChannelConstraints{ + DustLimit: btcutil.Amount(rand.Int63()), + MaxPendingAmount: lnwire.MilliSatoshi(rand.Int63()), + ChanReserve: btcutil.Amount(rand.Int63()), + MinHTLC: lnwire.MilliSatoshi(rand.Int63()), + MaxAcceptedHtlcs: uint16(rand.Int31()), + CsvDelay: uint16(rand.Int31()), + }, + MultiSigKey: keychain.KeyDescriptor{ + PubKey: privKey.PubKey(), + KeyLocator: keychain.KeyLocator{ + Family: keychain.KeyFamilyMultiSig, + Index: 9, + }, + }, + RevocationBasePoint: keychain.KeyDescriptor{ + PubKey: privKey.PubKey(), + KeyLocator: keychain.KeyLocator{ + Family: keychain.KeyFamilyRevocationBase, + Index: 8, + }, + }, + PaymentBasePoint: keychain.KeyDescriptor{ + PubKey: privKey.PubKey(), + KeyLocator: keychain.KeyLocator{ + Family: keychain.KeyFamilyPaymentBase, + Index: 7, + }, + }, + DelayBasePoint: keychain.KeyDescriptor{ + PubKey: privKey.PubKey(), + KeyLocator: keychain.KeyLocator{ + Family: keychain.KeyFamilyDelayBase, + Index: 6, + }, + }, + HtlcBasePoint: keychain.KeyDescriptor{ + PubKey: privKey.PubKey(), + KeyLocator: keychain.KeyLocator{ + Family: keychain.KeyFamilyHtlcBase, + Index: 5, + }, + }, + } + + chanID := lnwire.NewShortChanIDFromInt(uint64(rand.Int63())) + + return &OpenChannel{ + ChanType: SingleFunder, + ChainHash: key, + FundingOutpoint: wire.OutPoint{Hash: key, Index: rand.Uint32()}, + ShortChannelID: chanID, + IsInitiator: true, + IsPending: true, + IdentityPub: pubKey, + Capacity: btcutil.Amount(10000), + LocalChanCfg: localCfg, + RemoteChanCfg: remoteCfg, + TotalMSatSent: 8, + TotalMSatReceived: 2, + LocalCommitment: ChannelCommitment{ + CommitHeight: 0, + LocalBalance: lnwire.MilliSatoshi(9000), + RemoteBalance: lnwire.MilliSatoshi(3000), + CommitFee: btcutil.Amount(rand.Int63()), + FeePerKw: btcutil.Amount(5000), + CommitTx: testTx, + CommitSig: bytes.Repeat([]byte{1}, 71), + }, + RemoteCommitment: ChannelCommitment{ + CommitHeight: 0, + LocalBalance: lnwire.MilliSatoshi(3000), + RemoteBalance: lnwire.MilliSatoshi(9000), + CommitFee: btcutil.Amount(rand.Int63()), + FeePerKw: btcutil.Amount(5000), + CommitTx: testTx, + CommitSig: bytes.Repeat([]byte{1}, 71), + }, + NumConfsRequired: 4, + RemoteCurrentRevocation: privKey.PubKey(), + RemoteNextRevocation: privKey.PubKey(), + RevocationProducer: producer, + RevocationStore: store, + Db: cdb, + FundingTxn: testTx, + }, nil +} diff --git a/channeldb/migration_01_to_11/codec.go b/channeldb/migration_01_to_11/codec.go new file mode 100644 index 0000000000..1727c8c997 --- /dev/null +++ b/channeldb/migration_01_to_11/codec.go @@ -0,0 +1,448 @@ +package migration_01_to_11 + +import ( + "encoding/binary" + "fmt" + "io" + "net" + + "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" + "github.com/lightningnetwork/lnd/keychain" + "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/shachain" +) + +// writeOutpoint writes an outpoint to the passed writer using the minimal +// amount of bytes possible. +func writeOutpoint(w io.Writer, o *wire.OutPoint) error { + if _, err := w.Write(o.Hash[:]); err != nil { + return err + } + if err := binary.Write(w, byteOrder, o.Index); err != nil { + return err + } + + return nil +} + +// readOutpoint reads an outpoint from the passed reader that was previously +// written using the writeOutpoint struct. +func readOutpoint(r io.Reader, o *wire.OutPoint) error { + if _, err := io.ReadFull(r, o.Hash[:]); err != nil { + return err + } + if err := binary.Read(r, byteOrder, &o.Index); err != nil { + return err + } + + return nil +} + +// UnknownElementType is an error returned when the codec is unable to encode or +// decode a particular type. +type UnknownElementType struct { + method string + element interface{} +} + +// Error returns the name of the method that encountered the error, as well as +// the type that was unsupported. +func (e UnknownElementType) Error() string { + return fmt.Sprintf("Unknown type in %s: %T", e.method, e.element) +} + +// WriteElement is a one-stop shop to write the big endian representation of +// any element which is to be serialized for storage on disk. The passed +// io.Writer should be backed by an appropriately sized byte slice, or be able +// to dynamically expand to accommodate additional data. +func WriteElement(w io.Writer, element interface{}) error { + switch e := element.(type) { + case keychain.KeyDescriptor: + if err := binary.Write(w, byteOrder, e.Family); err != nil { + return err + } + if err := binary.Write(w, byteOrder, e.Index); err != nil { + return err + } + + if e.PubKey != nil { + if err := binary.Write(w, byteOrder, true); err != nil { + return fmt.Errorf("error writing serialized element: %s", err) + } + + return WriteElement(w, e.PubKey) + } + + return binary.Write(w, byteOrder, false) + case ChannelType: + if err := binary.Write(w, byteOrder, e); err != nil { + return err + } + + case chainhash.Hash: + if _, err := w.Write(e[:]); err != nil { + return err + } + + case wire.OutPoint: + return writeOutpoint(w, &e) + + case lnwire.ShortChannelID: + if err := binary.Write(w, byteOrder, e.ToUint64()); err != nil { + return err + } + + case lnwire.ChannelID: + if _, err := w.Write(e[:]); err != nil { + return err + } + + case int64, uint64: + if err := binary.Write(w, byteOrder, e); err != nil { + return err + } + + case uint32: + if err := binary.Write(w, byteOrder, e); err != nil { + return err + } + + case int32: + if err := binary.Write(w, byteOrder, e); err != nil { + return err + } + + case uint16: + if err := binary.Write(w, byteOrder, e); err != nil { + return err + } + + case uint8: + if err := binary.Write(w, byteOrder, e); err != nil { + return err + } + + case bool: + if err := binary.Write(w, byteOrder, e); err != nil { + return err + } + + case btcutil.Amount: + if err := binary.Write(w, byteOrder, uint64(e)); err != nil { + return err + } + + case lnwire.MilliSatoshi: + if err := binary.Write(w, byteOrder, uint64(e)); err != nil { + return err + } + + case *btcec.PrivateKey: + b := e.Serialize() + if _, err := w.Write(b); err != nil { + return err + } + + case *btcec.PublicKey: + b := e.SerializeCompressed() + if _, err := w.Write(b); err != nil { + return err + } + + case shachain.Producer: + return e.Encode(w) + + case shachain.Store: + return e.Encode(w) + + case *wire.MsgTx: + return e.Serialize(w) + + case [32]byte: + if _, err := w.Write(e[:]); err != nil { + return err + } + + case []byte: + if err := wire.WriteVarBytes(w, 0, e); err != nil { + return err + } + + case lnwire.Message: + if _, err := lnwire.WriteMessage(w, e, 0); err != nil { + return err + } + + case ChannelStatus: + if err := binary.Write(w, byteOrder, e); err != nil { + return err + } + + case ClosureType: + if err := binary.Write(w, byteOrder, e); err != nil { + return err + } + + case lnwire.FundingFlag: + if err := binary.Write(w, byteOrder, e); err != nil { + return err + } + + case net.Addr: + if err := serializeAddr(w, e); err != nil { + return err + } + + case []net.Addr: + if err := WriteElement(w, uint32(len(e))); err != nil { + return err + } + + for _, addr := range e { + if err := serializeAddr(w, addr); err != nil { + return err + } + } + + default: + return UnknownElementType{"WriteElement", e} + } + + return nil +} + +// WriteElements is writes each element in the elements slice to the passed +// io.Writer using WriteElement. +func WriteElements(w io.Writer, elements ...interface{}) error { + for _, element := range elements { + err := WriteElement(w, element) + if err != nil { + return err + } + } + return nil +} + +// ReadElement is a one-stop utility function to deserialize any datastructure +// encoded using the serialization format of the database. +func ReadElement(r io.Reader, element interface{}) error { + switch e := element.(type) { + case *keychain.KeyDescriptor: + if err := binary.Read(r, byteOrder, &e.Family); err != nil { + return err + } + if err := binary.Read(r, byteOrder, &e.Index); err != nil { + return err + } + + var hasPubKey bool + if err := binary.Read(r, byteOrder, &hasPubKey); err != nil { + return err + } + + if hasPubKey { + return ReadElement(r, &e.PubKey) + } + + case *ChannelType: + if err := binary.Read(r, byteOrder, e); err != nil { + return err + } + + case *chainhash.Hash: + if _, err := io.ReadFull(r, e[:]); err != nil { + return err + } + + case *wire.OutPoint: + return readOutpoint(r, e) + + case *lnwire.ShortChannelID: + var a uint64 + if err := binary.Read(r, byteOrder, &a); err != nil { + return err + } + *e = lnwire.NewShortChanIDFromInt(a) + + case *lnwire.ChannelID: + if _, err := io.ReadFull(r, e[:]); err != nil { + return err + } + + case *int64, *uint64: + if err := binary.Read(r, byteOrder, e); err != nil { + return err + } + + case *uint32: + if err := binary.Read(r, byteOrder, e); err != nil { + return err + } + + case *int32: + if err := binary.Read(r, byteOrder, e); err != nil { + return err + } + + case *uint16: + if err := binary.Read(r, byteOrder, e); err != nil { + return err + } + + case *uint8: + if err := binary.Read(r, byteOrder, e); err != nil { + return err + } + + case *bool: + if err := binary.Read(r, byteOrder, e); err != nil { + return err + } + + case *btcutil.Amount: + var a uint64 + if err := binary.Read(r, byteOrder, &a); err != nil { + return err + } + + *e = btcutil.Amount(a) + + case *lnwire.MilliSatoshi: + var a uint64 + if err := binary.Read(r, byteOrder, &a); err != nil { + return err + } + + *e = lnwire.MilliSatoshi(a) + + case **btcec.PrivateKey: + var b [btcec.PrivKeyBytesLen]byte + if _, err := io.ReadFull(r, b[:]); err != nil { + return err + } + + priv, _ := btcec.PrivKeyFromBytes(btcec.S256(), b[:]) + *e = priv + + case **btcec.PublicKey: + var b [btcec.PubKeyBytesLenCompressed]byte + if _, err := io.ReadFull(r, b[:]); err != nil { + return err + } + + pubKey, err := btcec.ParsePubKey(b[:], btcec.S256()) + if err != nil { + return err + } + *e = pubKey + + case *shachain.Producer: + var root [32]byte + if _, err := io.ReadFull(r, root[:]); err != nil { + return err + } + + // TODO(roasbeef): remove + producer, err := shachain.NewRevocationProducerFromBytes(root[:]) + if err != nil { + return err + } + + *e = producer + + case *shachain.Store: + store, err := shachain.NewRevocationStoreFromBytes(r) + if err != nil { + return err + } + + *e = store + + case **wire.MsgTx: + tx := wire.NewMsgTx(2) + if err := tx.Deserialize(r); err != nil { + return err + } + + *e = tx + + case *[32]byte: + if _, err := io.ReadFull(r, e[:]); err != nil { + return err + } + + case *[]byte: + bytes, err := wire.ReadVarBytes(r, 0, 66000, "[]byte") + if err != nil { + return err + } + + *e = bytes + + case *lnwire.Message: + msg, err := lnwire.ReadMessage(r, 0) + if err != nil { + return err + } + + *e = msg + + case *ChannelStatus: + if err := binary.Read(r, byteOrder, e); err != nil { + return err + } + + case *ClosureType: + if err := binary.Read(r, byteOrder, e); err != nil { + return err + } + + case *lnwire.FundingFlag: + if err := binary.Read(r, byteOrder, e); err != nil { + return err + } + + case *net.Addr: + addr, err := deserializeAddr(r) + if err != nil { + return err + } + *e = addr + + case *[]net.Addr: + var numAddrs uint32 + if err := ReadElement(r, &numAddrs); err != nil { + return err + } + + *e = make([]net.Addr, numAddrs) + for i := uint32(0); i < numAddrs; i++ { + addr, err := deserializeAddr(r) + if err != nil { + return err + } + (*e)[i] = addr + } + + default: + return UnknownElementType{"ReadElement", e} + } + + return nil +} + +// ReadElements deserializes a variable number of elements into the passed +// io.Reader, with each element being deserialized according to the ReadElement +// function. +func ReadElements(r io.Reader, elements ...interface{}) error { + for _, element := range elements { + err := ReadElement(r, element) + if err != nil { + return err + } + } + return nil +} diff --git a/channeldb/migration_01_to_11/db.go b/channeldb/migration_01_to_11/db.go new file mode 100644 index 0000000000..ed1239fd20 --- /dev/null +++ b/channeldb/migration_01_to_11/db.go @@ -0,0 +1,216 @@ +package migration_01_to_11 + +import ( + "bytes" + "encoding/binary" + "fmt" + "os" + "path/filepath" + "time" + + "github.com/lightningnetwork/lnd/channeldb/kvdb" +) + +const ( + dbName = "channel.db" + dbFilePermission = 0600 +) + +// migration is a function which takes a prior outdated version of the database +// instances and mutates the key/bucket structure to arrive at a more +// up-to-date version of the database. +type migration func(tx kvdb.RwTx) error + +var ( + // Big endian is the preferred byte order, due to cursor scans over + // integer keys iterating in order. + byteOrder = binary.BigEndian +) + +// DB is the primary datastore for the lnd daemon. The database stores +// information related to nodes, routing data, open/closed channels, fee +// schedules, and reputation data. +type DB struct { + kvdb.Backend + dbPath string + graph *ChannelGraph + now func() time.Time +} + +// Open opens an existing channeldb. Any necessary schemas migrations due to +// updates will take place as necessary. +func Open(dbPath string, modifiers ...OptionModifier) (*DB, error) { + path := filepath.Join(dbPath, dbName) + + if !fileExists(path) { + if err := createChannelDB(dbPath); err != nil { + return nil, err + } + } + + opts := DefaultOptions() + for _, modifier := range modifiers { + modifier(&opts) + } + + // Specify bbolt freelist options to reduce heap pressure in case the + // freelist grows to be very large. + bdb, err := kvdb.Open(kvdb.BoltBackendName, path, opts.NoFreelistSync) + if err != nil { + return nil, err + } + + chanDB := &DB{ + Backend: bdb, + dbPath: dbPath, + now: time.Now, + } + chanDB.graph = newChannelGraph( + chanDB, opts.RejectCacheSize, opts.ChannelCacheSize, + ) + + return chanDB, nil +} + +// createChannelDB creates and initializes a fresh version of channeldb. In +// the case that the target path has not yet been created or doesn't yet exist, +// then the path is created. Additionally, all required top-level buckets used +// within the database are created. +func createChannelDB(dbPath string) error { + if !fileExists(dbPath) { + if err := os.MkdirAll(dbPath, 0700); err != nil { + return err + } + } + + path := filepath.Join(dbPath, dbName) + bdb, err := kvdb.Create(kvdb.BoltBackendName, path, false) + if err != nil { + return err + } + + err = kvdb.Update(bdb, func(tx kvdb.RwTx) error { + if _, err := tx.CreateTopLevelBucket(openChannelBucket); err != nil { + return err + } + if _, err := tx.CreateTopLevelBucket(closedChannelBucket); err != nil { + return err + } + + if _, err := tx.CreateTopLevelBucket(invoiceBucket); err != nil { + return err + } + + if _, err := tx.CreateTopLevelBucket(paymentBucket); err != nil { + return err + } + + nodes, err := tx.CreateTopLevelBucket(nodeBucket) + if err != nil { + return err + } + _, err = nodes.CreateBucket(aliasIndexBucket) + if err != nil { + return err + } + _, err = nodes.CreateBucket(nodeUpdateIndexBucket) + if err != nil { + return err + } + + edges, err := tx.CreateTopLevelBucket(edgeBucket) + if err != nil { + return err + } + if _, err := edges.CreateBucket(edgeIndexBucket); err != nil { + return err + } + if _, err := edges.CreateBucket(edgeUpdateIndexBucket); err != nil { + return err + } + if _, err := edges.CreateBucket(channelPointBucket); err != nil { + return err + } + if _, err := edges.CreateBucket(zombieBucket); err != nil { + return err + } + + graphMeta, err := tx.CreateTopLevelBucket(graphMetaBucket) + if err != nil { + return err + } + _, err = graphMeta.CreateBucket(pruneLogBucket) + if err != nil { + return err + } + + if _, err := tx.CreateTopLevelBucket(metaBucket); err != nil { + return err + } + + meta := &Meta{ + DbVersionNumber: 0, + } + return putMeta(meta, tx) + }) + if err != nil { + return fmt.Errorf("unable to create new channeldb") + } + + return bdb.Close() +} + +// fileExists returns true if the file exists, and false otherwise. +func fileExists(path string) bool { + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + return false + } + } + + return true +} + +// FetchClosedChannels attempts to fetch all closed channels from the database. +// The pendingOnly bool toggles if channels that aren't yet fully closed should +// be returned in the response or not. When a channel was cooperatively closed, +// it becomes fully closed after a single confirmation. When a channel was +// forcibly closed, it will become fully closed after _all_ the pending funds +// (if any) have been swept. +func (d *DB) FetchClosedChannels(pendingOnly bool) ([]*ChannelCloseSummary, error) { + var chanSummaries []*ChannelCloseSummary + + if err := kvdb.View(d, func(tx kvdb.ReadTx) error { + closeBucket := tx.ReadBucket(closedChannelBucket) + if closeBucket == nil { + return ErrNoClosedChannels + } + + return closeBucket.ForEach(func(chanID []byte, summaryBytes []byte) error { + summaryReader := bytes.NewReader(summaryBytes) + chanSummary, err := deserializeCloseChannelSummary(summaryReader) + if err != nil { + return err + } + + // If the query specified to only include pending + // channels, then we'll skip any channels which aren't + // currently pending. + if !chanSummary.IsPending && pendingOnly { + return nil + } + + chanSummaries = append(chanSummaries, chanSummary) + return nil + }) + }); err != nil { + return nil, err + } + + return chanSummaries, nil +} + +// ChannelGraph returns a new instance of the directed channel graph. +func (d *DB) ChannelGraph() *ChannelGraph { + return d.graph +} diff --git a/channeldb/migration_01_to_11/error.go b/channeldb/migration_01_to_11/error.go new file mode 100644 index 0000000000..d096ae8bca --- /dev/null +++ b/channeldb/migration_01_to_11/error.go @@ -0,0 +1,56 @@ +package migration_01_to_11 + +import ( + "fmt" +) + +var ( + // ErrNoInvoicesCreated is returned when we don't have invoices in + // our database to return. + ErrNoInvoicesCreated = fmt.Errorf("there are no existing invoices") + + // ErrNoPaymentsCreated is returned when bucket of payments hasn't been + // created. + ErrNoPaymentsCreated = fmt.Errorf("there are no existing payments") + + // ErrGraphNotFound is returned when at least one of the components of + // graph doesn't exist. + ErrGraphNotFound = fmt.Errorf("graph bucket not initialized") + + // ErrSourceNodeNotSet is returned if the source node of the graph + // hasn't been added The source node is the center node within a + // star-graph. + ErrSourceNodeNotSet = fmt.Errorf("source node does not exist") + + // ErrGraphNodeNotFound is returned when we're unable to find the target + // node. + ErrGraphNodeNotFound = fmt.Errorf("unable to find node") + + // ErrEdgeNotFound is returned when an edge for the target chanID + // can't be found. + ErrEdgeNotFound = fmt.Errorf("edge not found") + + // ErrUnknownAddressType is returned when a node's addressType is not + // an expected value. + ErrUnknownAddressType = fmt.Errorf("address type cannot be resolved") + + // ErrNoClosedChannels is returned when a node is queries for all the + // channels it has closed, but it hasn't yet closed any channels. + ErrNoClosedChannels = fmt.Errorf("no channel have been closed yet") + + // ErrEdgePolicyOptionalFieldNotFound is an error returned if a channel + // policy field is not found in the db even though its message flags + // indicate it should be. + ErrEdgePolicyOptionalFieldNotFound = fmt.Errorf("optional field not " + + "present") +) + +// ErrTooManyExtraOpaqueBytes creates an error which should be returned if the +// caller attempts to write an announcement message which bares too many extra +// opaque bytes. We limit this value in order to ensure that we don't waste +// disk space due to nodes unnecessarily padding out their announcements with +// garbage data. +func ErrTooManyExtraOpaqueBytes(numBytes int) error { + return fmt.Errorf("max allowed number of opaque bytes is %v, received "+ + "%v bytes", MaxAllowedExtraOpaqueBytes, numBytes) +} diff --git a/channeldb/migration_01_to_11/graph.go b/channeldb/migration_01_to_11/graph.go new file mode 100644 index 0000000000..14436f586a --- /dev/null +++ b/channeldb/migration_01_to_11/graph.go @@ -0,0 +1,1179 @@ +package migration_01_to_11 + +import ( + "bytes" + "encoding/binary" + "fmt" + "image/color" + "io" + "net" + "time" + + "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" + "github.com/lightningnetwork/lnd/channeldb/kvdb" + "github.com/lightningnetwork/lnd/lnwire" +) + +var ( + // nodeBucket is a bucket which houses all the vertices or nodes within + // the channel graph. This bucket has a single-sub bucket which adds an + // additional index from pubkey -> alias. Within the top-level of this + // bucket, the key space maps a node's compressed public key to the + // serialized information for that node. Additionally, there's a + // special key "source" which stores the pubkey of the source node. The + // source node is used as the starting point for all graph/queries and + // traversals. The graph is formed as a star-graph with the source node + // at the center. + // + // maps: pubKey -> nodeInfo + // maps: source -> selfPubKey + nodeBucket = []byte("graph-node") + + // nodeUpdateIndexBucket is a sub-bucket of the nodeBucket. This bucket + // will be used to quickly look up the "freshness" of a node's last + // update to the network. The bucket only contains keys, and no values, + // it's mapping: + // + // maps: updateTime || nodeID -> nil + nodeUpdateIndexBucket = []byte("graph-node-update-index") + + // sourceKey is a special key that resides within the nodeBucket. The + // sourceKey maps a key to the public key of the "self node". + sourceKey = []byte("source") + + // aliasIndexBucket is a sub-bucket that's nested within the main + // nodeBucket. This bucket maps the public key of a node to its + // current alias. This bucket is provided as it can be used within a + // future UI layer to add an additional degree of confirmation. + aliasIndexBucket = []byte("alias") + + // edgeBucket is a bucket which houses all of the edge or channel + // information within the channel graph. This bucket essentially acts + // as an adjacency list, which in conjunction with a range scan, can be + // used to iterate over all the incoming and outgoing edges for a + // particular node. Key in the bucket use a prefix scheme which leads + // with the node's public key and sends with the compact edge ID. + // For each chanID, there will be two entries within the bucket, as the + // graph is directed: nodes may have different policies w.r.t to fees + // for their respective directions. + // + // maps: pubKey || chanID -> channel edge policy for node + edgeBucket = []byte("graph-edge") + + // unknownPolicy is represented as an empty slice. It is + // used as the value in edgeBucket for unknown channel edge policies. + // Unknown policies are still stored in the database to enable efficient + // lookup of incoming channel edges. + unknownPolicy = []byte{} + + // edgeIndexBucket is an index which can be used to iterate all edges + // in the bucket, grouping them according to their in/out nodes. + // Additionally, the items in this bucket also contain the complete + // edge information for a channel. The edge information includes the + // capacity of the channel, the nodes that made the channel, etc. This + // bucket resides within the edgeBucket above. Creation of an edge + // proceeds in two phases: first the edge is added to the edge index, + // afterwards the edgeBucket can be updated with the latest details of + // the edge as they are announced on the network. + // + // maps: chanID -> pubKey1 || pubKey2 || restofEdgeInfo + edgeIndexBucket = []byte("edge-index") + + // edgeUpdateIndexBucket is a sub-bucket of the main edgeBucket. This + // bucket contains an index which allows us to gauge the "freshness" of + // a channel's last updates. + // + // maps: updateTime || chanID -> nil + edgeUpdateIndexBucket = []byte("edge-update-index") + + // channelPointBucket maps a channel's full outpoint (txid:index) to + // its short 8-byte channel ID. This bucket resides within the + // edgeBucket above, and can be used to quickly remove an edge due to + // the outpoint being spent, or to query for existence of a channel. + // + // maps: outPoint -> chanID + channelPointBucket = []byte("chan-index") + + // zombieBucket is a sub-bucket of the main edgeBucket bucket + // responsible for maintaining an index of zombie channels. Each entry + // exists within the bucket as follows: + // + // maps: chanID -> pubKey1 || pubKey2 + // + // The chanID represents the channel ID of the edge that is marked as a + // zombie and is used as the key, which maps to the public keys of the + // edge's participants. + zombieBucket = []byte("zombie-index") + + // disabledEdgePolicyBucket is a sub-bucket of the main edgeBucket bucket + // responsible for maintaining an index of disabled edge policies. Each + // entry exists within the bucket as follows: + // + // maps: -> []byte{} + // + // The chanID represents the channel ID of the edge and the direction is + // one byte representing the direction of the edge. The main purpose of + // this index is to allow pruning disabled channels in a fast way without + // the need to iterate all over the graph. + disabledEdgePolicyBucket = []byte("disabled-edge-policy-index") + + // graphMetaBucket is a top-level bucket which stores various meta-deta + // related to the on-disk channel graph. Data stored in this bucket + // includes the block to which the graph has been synced to, the total + // number of channels, etc. + graphMetaBucket = []byte("graph-meta") + + // pruneLogBucket is a bucket within the graphMetaBucket that stores + // a mapping from the block height to the hash for the blocks used to + // prune the graph. + // Once a new block is discovered, any channels that have been closed + // (by spending the outpoint) can safely be removed from the graph, and + // the block is added to the prune log. We need to keep such a log for + // the case where a reorg happens, and we must "rewind" the state of the + // graph by removing channels that were previously confirmed. In such a + // case we'll remove all entries from the prune log with a block height + // that no longer exists. + pruneLogBucket = []byte("prune-log") +) + +const ( + // MaxAllowedExtraOpaqueBytes is the largest amount of opaque bytes that + // we'll permit to be written to disk. We limit this as otherwise, it + // would be possible for a node to create a ton of updates and slowly + // fill our disk, and also waste bandwidth due to relaying. + MaxAllowedExtraOpaqueBytes = 10000 +) + +// ChannelGraph is a persistent, on-disk graph representation of the Lightning +// Network. This struct can be used to implement path finding algorithms on top +// of, and also to update a node's view based on information received from the +// p2p network. Internally, the graph is stored using a modified adjacency list +// representation with some added object interaction possible with each +// serialized edge/node. The graph is stored is directed, meaning that are two +// edges stored for each channel: an inbound/outbound edge for each node pair. +// Nodes, edges, and edge information can all be added to the graph +// independently. Edge removal results in the deletion of all edge information +// for that edge. +type ChannelGraph struct { + db *DB +} + +// newChannelGraph allocates a new ChannelGraph backed by a DB instance. The +// returned instance has its own unique reject cache and channel cache. +func newChannelGraph(db *DB, rejectCacheSize, chanCacheSize int) *ChannelGraph { + return &ChannelGraph{ + db: db, + } +} + +// SourceNode returns the source node of the graph. The source node is treated +// as the center node within a star-graph. This method may be used to kick off +// a path finding algorithm in order to explore the reachability of another +// node based off the source node. +func (c *ChannelGraph) SourceNode() (*LightningNode, error) { + var source *LightningNode + err := kvdb.View(c.db, func(tx kvdb.ReadTx) error { + // First grab the nodes bucket which stores the mapping from + // pubKey to node information. + nodes := tx.ReadBucket(nodeBucket) + if nodes == nil { + return ErrGraphNotFound + } + + node, err := c.sourceNode(nodes) + if err != nil { + return err + } + source = node + + return nil + }) + if err != nil { + return nil, err + } + + return source, nil +} + +// sourceNode uses an existing database transaction and returns the source node +// of the graph. The source node is treated as the center node within a +// star-graph. This method may be used to kick off a path finding algorithm in +// order to explore the reachability of another node based off the source node. +func (c *ChannelGraph) sourceNode(nodes kvdb.ReadBucket) (*LightningNode, error) { + selfPub := nodes.Get(sourceKey) + if selfPub == nil { + return nil, ErrSourceNodeNotSet + } + + // With the pubKey of the source node retrieved, we're able to + // fetch the full node information. + node, err := fetchLightningNode(nodes, selfPub) + if err != nil { + return nil, err + } + node.db = c.db + + return &node, nil +} + +// SetSourceNode sets the source node within the graph database. The source +// node is to be used as the center of a star-graph within path finding +// algorithms. +func (c *ChannelGraph) SetSourceNode(node *LightningNode) error { + nodePubBytes := node.PubKeyBytes[:] + + return kvdb.Update(c.db, func(tx kvdb.RwTx) error { + // First grab the nodes bucket which stores the mapping from + // pubKey to node information. + nodes, err := tx.CreateTopLevelBucket(nodeBucket) + if err != nil { + return err + } + + // Next we create the mapping from source to the targeted + // public key. + if err := nodes.Put(sourceKey, nodePubBytes); err != nil { + return err + } + + // Finally, we commit the information of the lightning node + // itself. + return addLightningNode(tx, node) + }) +} + +func addLightningNode(tx kvdb.RwTx, node *LightningNode) error { + nodes, err := tx.CreateTopLevelBucket(nodeBucket) + if err != nil { + return err + } + + aliases, err := nodes.CreateBucketIfNotExists(aliasIndexBucket) + if err != nil { + return err + } + + updateIndex, err := nodes.CreateBucketIfNotExists( + nodeUpdateIndexBucket, + ) + if err != nil { + return err + } + + return putLightningNode(nodes, aliases, updateIndex, node) +} + +// updateEdgePolicy attempts to update an edge's policy within the relevant +// buckets using an existing database transaction. The returned boolean will be +// true if the updated policy belongs to node1, and false if the policy belonged +// to node2. +func updateEdgePolicy(tx kvdb.RwTx, edge *ChannelEdgePolicy) (bool, error) { + edges, err := tx.CreateTopLevelBucket(edgeBucket) + if err != nil { + return false, ErrEdgeNotFound + + } + edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket) + if edgeIndex == nil { + return false, ErrEdgeNotFound + } + nodes, err := tx.CreateTopLevelBucket(nodeBucket) + if err != nil { + return false, err + } + + // Create the channelID key be converting the channel ID + // integer into a byte slice. + var chanID [8]byte + byteOrder.PutUint64(chanID[:], edge.ChannelID) + + // With the channel ID, we then fetch the value storing the two + // nodes which connect this channel edge. + nodeInfo := edgeIndex.Get(chanID[:]) + if nodeInfo == nil { + return false, ErrEdgeNotFound + } + + // Depending on the flags value passed above, either the first + // or second edge policy is being updated. + var fromNode, toNode []byte + var isUpdate1 bool + if edge.ChannelFlags&lnwire.ChanUpdateDirection == 0 { + fromNode = nodeInfo[:33] + toNode = nodeInfo[33:66] + isUpdate1 = true + } else { + fromNode = nodeInfo[33:66] + toNode = nodeInfo[:33] + isUpdate1 = false + } + + // Finally, with the direction of the edge being updated + // identified, we update the on-disk edge representation. + err = putChanEdgePolicy(edges, nodes, edge, fromNode, toNode) + if err != nil { + return false, err + } + + return isUpdate1, nil +} + +// LightningNode represents an individual vertex/node within the channel graph. +// A node is connected to other nodes by one or more channel edges emanating +// from it. As the graph is directed, a node will also have an incoming edge +// attached to it for each outgoing edge. +type LightningNode struct { + // PubKeyBytes is the raw bytes of the public key of the target node. + PubKeyBytes [33]byte + pubKey *btcec.PublicKey + + // HaveNodeAnnouncement indicates whether we received a node + // announcement for this particular node. If true, the remaining fields + // will be set, if false only the PubKey is known for this node. + HaveNodeAnnouncement bool + + // LastUpdate is the last time the vertex information for this node has + // been updated. + LastUpdate time.Time + + // Address is the TCP address this node is reachable over. + Addresses []net.Addr + + // Color is the selected color for the node. + Color color.RGBA + + // Alias is a nick-name for the node. The alias can be used to confirm + // a node's identity or to serve as a short ID for an address book. + Alias string + + // AuthSigBytes is the raw signature under the advertised public key + // which serves to authenticate the attributes announced by this node. + AuthSigBytes []byte + + // Features is the list of protocol features supported by this node. + Features *lnwire.FeatureVector + + // ExtraOpaqueData is the set of data that was appended to this + // message, some of which we may not actually know how to iterate or + // parse. By holding onto this data, we ensure that we're able to + // properly validate the set of signatures that cover these new fields, + // and ensure we're able to make upgrades to the network in a forwards + // compatible manner. + ExtraOpaqueData []byte + + db *DB + + // TODO(roasbeef): discovery will need storage to keep it's last IP + // address and re-announce if interface changes? + + // TODO(roasbeef): add update method and fetch? +} + +// PubKey is the node's long-term identity public key. This key will be used to +// authenticated any advertisements/updates sent by the node. +// +// NOTE: By having this method to access an attribute, we ensure we only need +// to fully deserialize the pubkey if absolutely necessary. +func (l *LightningNode) PubKey() (*btcec.PublicKey, error) { + if l.pubKey != nil { + return l.pubKey, nil + } + + key, err := btcec.ParsePubKey(l.PubKeyBytes[:], btcec.S256()) + if err != nil { + return nil, err + } + l.pubKey = key + + return key, nil +} + +// ChannelEdgeInfo represents a fully authenticated channel along with all its +// unique attributes. Once an authenticated channel announcement has been +// processed on the network, then an instance of ChannelEdgeInfo encapsulating +// the channels attributes is stored. The other portions relevant to routing +// policy of a channel are stored within a ChannelEdgePolicy for each direction +// of the channel. +type ChannelEdgeInfo struct { + // ChannelID is the unique channel ID for the channel. The first 3 + // bytes are the block height, the next 3 the index within the block, + // and the last 2 bytes are the output index for the channel. + ChannelID uint64 + + // ChainHash is the hash that uniquely identifies the chain that this + // channel was opened within. + // + // TODO(roasbeef): need to modify db keying for multi-chain + // * must add chain hash to prefix as well + ChainHash chainhash.Hash + + // NodeKey1Bytes is the raw public key of the first node. + NodeKey1Bytes [33]byte + + // NodeKey2Bytes is the raw public key of the first node. + NodeKey2Bytes [33]byte + + // BitcoinKey1Bytes is the raw public key of the first node. + BitcoinKey1Bytes [33]byte + + // BitcoinKey2Bytes is the raw public key of the first node. + BitcoinKey2Bytes [33]byte + + // Features is an opaque byte slice that encodes the set of channel + // specific features that this channel edge supports. + Features []byte + + // AuthProof is the authentication proof for this channel. This proof + // contains a set of signatures binding four identities, which attests + // to the legitimacy of the advertised channel. + AuthProof *ChannelAuthProof + + // ChannelPoint is the funding outpoint of the channel. This can be + // used to uniquely identify the channel within the channel graph. + ChannelPoint wire.OutPoint + + // Capacity is the total capacity of the channel, this is determined by + // the value output in the outpoint that created this channel. + Capacity btcutil.Amount + + // ExtraOpaqueData is the set of data that was appended to this + // message, some of which we may not actually know how to iterate or + // parse. By holding onto this data, we ensure that we're able to + // properly validate the set of signatures that cover these new fields, + // and ensure we're able to make upgrades to the network in a forwards + // compatible manner. + ExtraOpaqueData []byte +} + +// ChannelAuthProof is the authentication proof (the signature portion) for a +// channel. Using the four signatures contained in the struct, and some +// auxiliary knowledge (the funding script, node identities, and outpoint) nodes +// on the network are able to validate the authenticity and existence of a +// channel. Each of these signatures signs the following digest: chanID || +// nodeID1 || nodeID2 || bitcoinKey1|| bitcoinKey2 || 2-byte-feature-len || +// features. +type ChannelAuthProof struct { + // NodeSig1Bytes are the raw bytes of the first node signature encoded + // in DER format. + NodeSig1Bytes []byte + + // NodeSig2Bytes are the raw bytes of the second node signature + // encoded in DER format. + NodeSig2Bytes []byte + + // BitcoinSig1Bytes are the raw bytes of the first bitcoin signature + // encoded in DER format. + BitcoinSig1Bytes []byte + + // BitcoinSig2Bytes are the raw bytes of the second bitcoin signature + // encoded in DER format. + BitcoinSig2Bytes []byte +} + +// IsEmpty check is the authentication proof is empty Proof is empty if at +// least one of the signatures are equal to nil. +func (c *ChannelAuthProof) IsEmpty() bool { + return len(c.NodeSig1Bytes) == 0 || + len(c.NodeSig2Bytes) == 0 || + len(c.BitcoinSig1Bytes) == 0 || + len(c.BitcoinSig2Bytes) == 0 +} + +// ChannelEdgePolicy represents a *directed* edge within the channel graph. For +// each channel in the database, there are two distinct edges: one for each +// possible direction of travel along the channel. The edges themselves hold +// information concerning fees, and minimum time-lock information which is +// utilized during path finding. +type ChannelEdgePolicy struct { + // SigBytes is the raw bytes of the signature of the channel edge + // policy. We'll only parse these if the caller needs to access the + // signature for validation purposes. Do not set SigBytes directly, but + // use SetSigBytes instead to make sure that the cache is invalidated. + SigBytes []byte + + // ChannelID is the unique channel ID for the channel. The first 3 + // bytes are the block height, the next 3 the index within the block, + // and the last 2 bytes are the output index for the channel. + ChannelID uint64 + + // LastUpdate is the last time an authenticated edge for this channel + // was received. + LastUpdate time.Time + + // MessageFlags is a bitfield which indicates the presence of optional + // fields (like max_htlc) in the policy. + MessageFlags lnwire.ChanUpdateMsgFlags + + // ChannelFlags is a bitfield which signals the capabilities of the + // channel as well as the directed edge this update applies to. + ChannelFlags lnwire.ChanUpdateChanFlags + + // TimeLockDelta is the number of blocks this node will subtract from + // the expiry of an incoming HTLC. This value expresses the time buffer + // the node would like to HTLC exchanges. + TimeLockDelta uint16 + + // MinHTLC is the smallest value HTLC this node will accept, expressed + // in millisatoshi. + MinHTLC lnwire.MilliSatoshi + + // MaxHTLC is the largest value HTLC this node will accept, expressed + // in millisatoshi. + MaxHTLC lnwire.MilliSatoshi + + // FeeBaseMSat is the base HTLC fee that will be charged for forwarding + // ANY HTLC, expressed in mSAT's. + FeeBaseMSat lnwire.MilliSatoshi + + // FeeProportionalMillionths is the rate that the node will charge for + // HTLCs for each millionth of a satoshi forwarded. + FeeProportionalMillionths lnwire.MilliSatoshi + + // Node is the LightningNode that this directed edge leads to. Using + // this pointer the channel graph can further be traversed. + Node *LightningNode + + // ExtraOpaqueData is the set of data that was appended to this + // message, some of which we may not actually know how to iterate or + // parse. By holding onto this data, we ensure that we're able to + // properly validate the set of signatures that cover these new fields, + // and ensure we're able to make upgrades to the network in a forwards + // compatible manner. + ExtraOpaqueData []byte +} + +// IsDisabled determines whether the edge has the disabled bit set. +func (c *ChannelEdgePolicy) IsDisabled() bool { + return c.ChannelFlags&lnwire.ChanUpdateDisabled == + lnwire.ChanUpdateDisabled +} + +func putLightningNode(nodeBucket kvdb.RwBucket, aliasBucket kvdb.RwBucket, + updateIndex kvdb.RwBucket, node *LightningNode) error { + + var ( + scratch [16]byte + b bytes.Buffer + ) + + pub, err := node.PubKey() + if err != nil { + return err + } + nodePub := pub.SerializeCompressed() + + // If the node has the update time set, write it, else write 0. + updateUnix := uint64(0) + if node.LastUpdate.Unix() > 0 { + updateUnix = uint64(node.LastUpdate.Unix()) + } + + byteOrder.PutUint64(scratch[:8], updateUnix) + if _, err := b.Write(scratch[:8]); err != nil { + return err + } + + if _, err := b.Write(nodePub); err != nil { + return err + } + + // If we got a node announcement for this node, we will have the rest + // of the data available. If not we don't have more data to write. + if !node.HaveNodeAnnouncement { + // Write HaveNodeAnnouncement=0. + byteOrder.PutUint16(scratch[:2], 0) + if _, err := b.Write(scratch[:2]); err != nil { + return err + } + + return nodeBucket.Put(nodePub, b.Bytes()) + } + + // Write HaveNodeAnnouncement=1. + byteOrder.PutUint16(scratch[:2], 1) + if _, err := b.Write(scratch[:2]); err != nil { + return err + } + + if err := binary.Write(&b, byteOrder, node.Color.R); err != nil { + return err + } + if err := binary.Write(&b, byteOrder, node.Color.G); err != nil { + return err + } + if err := binary.Write(&b, byteOrder, node.Color.B); err != nil { + return err + } + + if err := wire.WriteVarString(&b, 0, node.Alias); err != nil { + return err + } + + if err := node.Features.Encode(&b); err != nil { + return err + } + + numAddresses := uint16(len(node.Addresses)) + byteOrder.PutUint16(scratch[:2], numAddresses) + if _, err := b.Write(scratch[:2]); err != nil { + return err + } + + for _, address := range node.Addresses { + if err := serializeAddr(&b, address); err != nil { + return err + } + } + + sigLen := len(node.AuthSigBytes) + if sigLen > 80 { + return fmt.Errorf("max sig len allowed is 80, had %v", + sigLen) + } + + err = wire.WriteVarBytes(&b, 0, node.AuthSigBytes) + if err != nil { + return err + } + + if len(node.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes { + return ErrTooManyExtraOpaqueBytes(len(node.ExtraOpaqueData)) + } + err = wire.WriteVarBytes(&b, 0, node.ExtraOpaqueData) + if err != nil { + return err + } + + if err := aliasBucket.Put(nodePub, []byte(node.Alias)); err != nil { + return err + } + + // With the alias bucket updated, we'll now update the index that + // tracks the time series of node updates. + var indexKey [8 + 33]byte + byteOrder.PutUint64(indexKey[:8], updateUnix) + copy(indexKey[8:], nodePub) + + // If there was already an old index entry for this node, then we'll + // delete the old one before we write the new entry. + if nodeBytes := nodeBucket.Get(nodePub); nodeBytes != nil { + // Extract out the old update time to we can reconstruct the + // prior index key to delete it from the index. + oldUpdateTime := nodeBytes[:8] + + var oldIndexKey [8 + 33]byte + copy(oldIndexKey[:8], oldUpdateTime) + copy(oldIndexKey[8:], nodePub) + + if err := updateIndex.Delete(oldIndexKey[:]); err != nil { + return err + } + } + + if err := updateIndex.Put(indexKey[:], nil); err != nil { + return err + } + + return nodeBucket.Put(nodePub, b.Bytes()) +} + +func fetchLightningNode(nodeBucket kvdb.ReadBucket, + nodePub []byte) (LightningNode, error) { + + nodeBytes := nodeBucket.Get(nodePub) + if nodeBytes == nil { + return LightningNode{}, ErrGraphNodeNotFound + } + + nodeReader := bytes.NewReader(nodeBytes) + return deserializeLightningNode(nodeReader) +} + +func deserializeLightningNode(r io.Reader) (LightningNode, error) { + var ( + node LightningNode + scratch [8]byte + err error + ) + + if _, err := r.Read(scratch[:]); err != nil { + return LightningNode{}, err + } + + unix := int64(byteOrder.Uint64(scratch[:])) + node.LastUpdate = time.Unix(unix, 0) + + if _, err := io.ReadFull(r, node.PubKeyBytes[:]); err != nil { + return LightningNode{}, err + } + + if _, err := r.Read(scratch[:2]); err != nil { + return LightningNode{}, err + } + + hasNodeAnn := byteOrder.Uint16(scratch[:2]) + if hasNodeAnn == 1 { + node.HaveNodeAnnouncement = true + } else { + node.HaveNodeAnnouncement = false + } + + // The rest of the data is optional, and will only be there if we got a node + // announcement for this node. + if !node.HaveNodeAnnouncement { + return node, nil + } + + // We did get a node announcement for this node, so we'll have the rest + // of the data available. + if err := binary.Read(r, byteOrder, &node.Color.R); err != nil { + return LightningNode{}, err + } + if err := binary.Read(r, byteOrder, &node.Color.G); err != nil { + return LightningNode{}, err + } + if err := binary.Read(r, byteOrder, &node.Color.B); err != nil { + return LightningNode{}, err + } + + node.Alias, err = wire.ReadVarString(r, 0) + if err != nil { + return LightningNode{}, err + } + + fv := lnwire.NewFeatureVector(nil, nil) + err = fv.Decode(r) + if err != nil { + return LightningNode{}, err + } + node.Features = fv + + if _, err := r.Read(scratch[:2]); err != nil { + return LightningNode{}, err + } + numAddresses := int(byteOrder.Uint16(scratch[:2])) + + var addresses []net.Addr + for i := 0; i < numAddresses; i++ { + address, err := deserializeAddr(r) + if err != nil { + return LightningNode{}, err + } + addresses = append(addresses, address) + } + node.Addresses = addresses + + node.AuthSigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig") + if err != nil { + return LightningNode{}, err + } + + // We'll try and see if there are any opaque bytes left, if not, then + // we'll ignore the EOF error and return the node as is. + node.ExtraOpaqueData, err = wire.ReadVarBytes( + r, 0, MaxAllowedExtraOpaqueBytes, "blob", + ) + switch { + case err == io.ErrUnexpectedEOF: + case err == io.EOF: + case err != nil: + return LightningNode{}, err + } + + return node, nil +} + +func deserializeChanEdgeInfo(r io.Reader) (ChannelEdgeInfo, error) { + var ( + err error + edgeInfo ChannelEdgeInfo + ) + + if _, err := io.ReadFull(r, edgeInfo.NodeKey1Bytes[:]); err != nil { + return ChannelEdgeInfo{}, err + } + if _, err := io.ReadFull(r, edgeInfo.NodeKey2Bytes[:]); err != nil { + return ChannelEdgeInfo{}, err + } + if _, err := io.ReadFull(r, edgeInfo.BitcoinKey1Bytes[:]); err != nil { + return ChannelEdgeInfo{}, err + } + if _, err := io.ReadFull(r, edgeInfo.BitcoinKey2Bytes[:]); err != nil { + return ChannelEdgeInfo{}, err + } + + edgeInfo.Features, err = wire.ReadVarBytes(r, 0, 900, "features") + if err != nil { + return ChannelEdgeInfo{}, err + } + + proof := &ChannelAuthProof{} + + proof.NodeSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs") + if err != nil { + return ChannelEdgeInfo{}, err + } + proof.NodeSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs") + if err != nil { + return ChannelEdgeInfo{}, err + } + proof.BitcoinSig1Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs") + if err != nil { + return ChannelEdgeInfo{}, err + } + proof.BitcoinSig2Bytes, err = wire.ReadVarBytes(r, 0, 80, "sigs") + if err != nil { + return ChannelEdgeInfo{}, err + } + + if !proof.IsEmpty() { + edgeInfo.AuthProof = proof + } + + edgeInfo.ChannelPoint = wire.OutPoint{} + if err := readOutpoint(r, &edgeInfo.ChannelPoint); err != nil { + return ChannelEdgeInfo{}, err + } + if err := binary.Read(r, byteOrder, &edgeInfo.Capacity); err != nil { + return ChannelEdgeInfo{}, err + } + if err := binary.Read(r, byteOrder, &edgeInfo.ChannelID); err != nil { + return ChannelEdgeInfo{}, err + } + + if _, err := io.ReadFull(r, edgeInfo.ChainHash[:]); err != nil { + return ChannelEdgeInfo{}, err + } + + // We'll try and see if there are any opaque bytes left, if not, then + // we'll ignore the EOF error and return the edge as is. + edgeInfo.ExtraOpaqueData, err = wire.ReadVarBytes( + r, 0, MaxAllowedExtraOpaqueBytes, "blob", + ) + switch { + case err == io.ErrUnexpectedEOF: + case err == io.EOF: + case err != nil: + return ChannelEdgeInfo{}, err + } + + return edgeInfo, nil +} + +func putChanEdgePolicy(edges, nodes kvdb.RwBucket, edge *ChannelEdgePolicy, + from, to []byte) error { + + var edgeKey [33 + 8]byte + copy(edgeKey[:], from) + byteOrder.PutUint64(edgeKey[33:], edge.ChannelID) + + var b bytes.Buffer + if err := serializeChanEdgePolicy(&b, edge, to); err != nil { + return err + } + + // Before we write out the new edge, we'll create a new entry in the + // update index in order to keep it fresh. + updateUnix := uint64(edge.LastUpdate.Unix()) + var indexKey [8 + 8]byte + byteOrder.PutUint64(indexKey[:8], updateUnix) + byteOrder.PutUint64(indexKey[8:], edge.ChannelID) + + updateIndex, err := edges.CreateBucketIfNotExists(edgeUpdateIndexBucket) + if err != nil { + return err + } + + // If there was already an entry for this edge, then we'll need to + // delete the old one to ensure we don't leave around any after-images. + // An unknown policy value does not have a update time recorded, so + // it also does not need to be removed. + if edgeBytes := edges.Get(edgeKey[:]); edgeBytes != nil && + !bytes.Equal(edgeBytes[:], unknownPolicy) { + + // In order to delete the old entry, we'll need to obtain the + // *prior* update time in order to delete it. To do this, we'll + // need to deserialize the existing policy within the database + // (now outdated by the new one), and delete its corresponding + // entry within the update index. We'll ignore any + // ErrEdgePolicyOptionalFieldNotFound error, as we only need + // the channel ID and update time to delete the entry. + // TODO(halseth): get rid of these invalid policies in a + // migration. + oldEdgePolicy, err := deserializeChanEdgePolicy( + bytes.NewReader(edgeBytes), nodes, + ) + if err != nil && err != ErrEdgePolicyOptionalFieldNotFound { + return err + } + + oldUpdateTime := uint64(oldEdgePolicy.LastUpdate.Unix()) + + var oldIndexKey [8 + 8]byte + byteOrder.PutUint64(oldIndexKey[:8], oldUpdateTime) + byteOrder.PutUint64(oldIndexKey[8:], edge.ChannelID) + + if err := updateIndex.Delete(oldIndexKey[:]); err != nil { + return err + } + } + + if err := updateIndex.Put(indexKey[:], nil); err != nil { + return err + } + + updateEdgePolicyDisabledIndex( + edges, edge.ChannelID, + edge.ChannelFlags&lnwire.ChanUpdateDirection > 0, + edge.IsDisabled(), + ) + + return edges.Put(edgeKey[:], b.Bytes()[:]) +} + +// updateEdgePolicyDisabledIndex is used to update the disabledEdgePolicyIndex +// bucket by either add a new disabled ChannelEdgePolicy or remove an existing +// one. +// The direction represents the direction of the edge and disabled is used for +// deciding whether to remove or add an entry to the bucket. +// In general a channel is disabled if two entries for the same chanID exist +// in this bucket. +// Maintaining the bucket this way allows a fast retrieval of disabled +// channels, for example when prune is needed. +func updateEdgePolicyDisabledIndex(edges kvdb.RwBucket, chanID uint64, + direction bool, disabled bool) error { + + var disabledEdgeKey [8 + 1]byte + byteOrder.PutUint64(disabledEdgeKey[0:], chanID) + if direction { + disabledEdgeKey[8] = 1 + } + + disabledEdgePolicyIndex, err := edges.CreateBucketIfNotExists( + disabledEdgePolicyBucket, + ) + if err != nil { + return err + } + + if disabled { + return disabledEdgePolicyIndex.Put(disabledEdgeKey[:], []byte{}) + } + + return disabledEdgePolicyIndex.Delete(disabledEdgeKey[:]) +} + +// putChanEdgePolicyUnknown marks the edge policy as unknown +// in the edges bucket. +func putChanEdgePolicyUnknown(edges kvdb.RwBucket, channelID uint64, + from []byte) error { + + var edgeKey [33 + 8]byte + copy(edgeKey[:], from) + byteOrder.PutUint64(edgeKey[33:], channelID) + + if edges.Get(edgeKey[:]) != nil { + return fmt.Errorf("Cannot write unknown policy for channel %v "+ + " when there is already a policy present", channelID) + } + + return edges.Put(edgeKey[:], unknownPolicy) +} + +func fetchChanEdgePolicy(edges kvdb.ReadBucket, chanID []byte, + nodePub []byte, nodes kvdb.ReadBucket) (*ChannelEdgePolicy, error) { + + var edgeKey [33 + 8]byte + copy(edgeKey[:], nodePub) + copy(edgeKey[33:], chanID[:]) + + edgeBytes := edges.Get(edgeKey[:]) + if edgeBytes == nil { + return nil, ErrEdgeNotFound + } + + // No need to deserialize unknown policy. + if bytes.Equal(edgeBytes[:], unknownPolicy) { + return nil, nil + } + + edgeReader := bytes.NewReader(edgeBytes) + + ep, err := deserializeChanEdgePolicy(edgeReader, nodes) + switch { + // If the db policy was missing an expected optional field, we return + // nil as if the policy was unknown. + case err == ErrEdgePolicyOptionalFieldNotFound: + return nil, nil + + case err != nil: + return nil, err + } + + return ep, nil +} + +func serializeChanEdgePolicy(w io.Writer, edge *ChannelEdgePolicy, + to []byte) error { + + err := wire.WriteVarBytes(w, 0, edge.SigBytes) + if err != nil { + return err + } + + if err := binary.Write(w, byteOrder, edge.ChannelID); err != nil { + return err + } + + var scratch [8]byte + updateUnix := uint64(edge.LastUpdate.Unix()) + byteOrder.PutUint64(scratch[:], updateUnix) + if _, err := w.Write(scratch[:]); err != nil { + return err + } + + if err := binary.Write(w, byteOrder, edge.MessageFlags); err != nil { + return err + } + if err := binary.Write(w, byteOrder, edge.ChannelFlags); err != nil { + return err + } + if err := binary.Write(w, byteOrder, edge.TimeLockDelta); err != nil { + return err + } + if err := binary.Write(w, byteOrder, uint64(edge.MinHTLC)); err != nil { + return err + } + if err := binary.Write(w, byteOrder, uint64(edge.FeeBaseMSat)); err != nil { + return err + } + if err := binary.Write(w, byteOrder, uint64(edge.FeeProportionalMillionths)); err != nil { + return err + } + + if _, err := w.Write(to); err != nil { + return err + } + + // If the max_htlc field is present, we write it. To be compatible with + // older versions that wasn't aware of this field, we write it as part + // of the opaque data. + // TODO(halseth): clean up when moving to TLV. + var opaqueBuf bytes.Buffer + if edge.MessageFlags.HasMaxHtlc() { + err := binary.Write(&opaqueBuf, byteOrder, uint64(edge.MaxHTLC)) + if err != nil { + return err + } + } + + if len(edge.ExtraOpaqueData) > MaxAllowedExtraOpaqueBytes { + return ErrTooManyExtraOpaqueBytes(len(edge.ExtraOpaqueData)) + } + if _, err := opaqueBuf.Write(edge.ExtraOpaqueData); err != nil { + return err + } + + if err := wire.WriteVarBytes(w, 0, opaqueBuf.Bytes()); err != nil { + return err + } + return nil +} + +func deserializeChanEdgePolicy(r io.Reader, + nodes kvdb.ReadBucket) (*ChannelEdgePolicy, error) { + + edge := &ChannelEdgePolicy{} + + var err error + edge.SigBytes, err = wire.ReadVarBytes(r, 0, 80, "sig") + if err != nil { + return nil, err + } + + if err := binary.Read(r, byteOrder, &edge.ChannelID); err != nil { + return nil, err + } + + var scratch [8]byte + if _, err := r.Read(scratch[:]); err != nil { + return nil, err + } + unix := int64(byteOrder.Uint64(scratch[:])) + edge.LastUpdate = time.Unix(unix, 0) + + if err := binary.Read(r, byteOrder, &edge.MessageFlags); err != nil { + return nil, err + } + if err := binary.Read(r, byteOrder, &edge.ChannelFlags); err != nil { + return nil, err + } + if err := binary.Read(r, byteOrder, &edge.TimeLockDelta); err != nil { + return nil, err + } + + var n uint64 + if err := binary.Read(r, byteOrder, &n); err != nil { + return nil, err + } + edge.MinHTLC = lnwire.MilliSatoshi(n) + + if err := binary.Read(r, byteOrder, &n); err != nil { + return nil, err + } + edge.FeeBaseMSat = lnwire.MilliSatoshi(n) + + if err := binary.Read(r, byteOrder, &n); err != nil { + return nil, err + } + edge.FeeProportionalMillionths = lnwire.MilliSatoshi(n) + + var pub [33]byte + if _, err := r.Read(pub[:]); err != nil { + return nil, err + } + + node, err := fetchLightningNode(nodes, pub[:]) + if err != nil { + return nil, fmt.Errorf("unable to fetch node: %x, %v", + pub[:], err) + } + edge.Node = &node + + // We'll try and see if there are any opaque bytes left, if not, then + // we'll ignore the EOF error and return the edge as is. + edge.ExtraOpaqueData, err = wire.ReadVarBytes( + r, 0, MaxAllowedExtraOpaqueBytes, "blob", + ) + switch { + case err == io.ErrUnexpectedEOF: + case err == io.EOF: + case err != nil: + return nil, err + } + + // See if optional fields are present. + if edge.MessageFlags.HasMaxHtlc() { + // The max_htlc field should be at the beginning of the opaque + // bytes. + opq := edge.ExtraOpaqueData + + // If the max_htlc field is not present, it might be old data + // stored before this field was validated. We'll return the + // edge along with an error. + if len(opq) < 8 { + return edge, ErrEdgePolicyOptionalFieldNotFound + } + + maxHtlc := byteOrder.Uint64(opq[:8]) + edge.MaxHTLC = lnwire.MilliSatoshi(maxHtlc) + + // Exclude the parsed field from the rest of the opaque data. + edge.ExtraOpaqueData = opq[8:] + } + + return edge, nil +} diff --git a/channeldb/migration_01_to_11/graph_test.go b/channeldb/migration_01_to_11/graph_test.go new file mode 100644 index 0000000000..dc21fccfdd --- /dev/null +++ b/channeldb/migration_01_to_11/graph_test.go @@ -0,0 +1,57 @@ +package migration_01_to_11 + +import ( + "image/color" + "math/big" + prand "math/rand" + "net" + "time" + + "github.com/btcsuite/btcd/btcec" + "github.com/lightningnetwork/lnd/lnwire" +) + +var ( + testAddr = &net.TCPAddr{IP: (net.IP)([]byte{0xA, 0x0, 0x0, 0x1}), + Port: 9000} + anotherAddr, _ = net.ResolveTCPAddr("tcp", + "[2001:db8:85a3:0:0:8a2e:370:7334]:80") + testAddrs = []net.Addr{testAddr, anotherAddr} + + testSig = &btcec.Signature{ + R: new(big.Int), + S: new(big.Int), + } + _, _ = testSig.R.SetString("63724406601629180062774974542967536251589935445068131219452686511677818569431", 10) + _, _ = testSig.S.SetString("18801056069249825825291287104931333862866033135609736119018462340006816851118", 10) + + testFeatures = lnwire.NewFeatureVector(nil, nil) +) + +func createLightningNode(db *DB, priv *btcec.PrivateKey) (*LightningNode, error) { + updateTime := prand.Int63() + + pub := priv.PubKey().SerializeCompressed() + n := &LightningNode{ + HaveNodeAnnouncement: true, + AuthSigBytes: testSig.Serialize(), + LastUpdate: time.Unix(updateTime, 0), + Color: color.RGBA{1, 2, 3, 0}, + Alias: "kek" + string(pub[:]), + Features: testFeatures, + Addresses: testAddrs, + db: db, + } + copy(n.PubKeyBytes[:], priv.PubKey().SerializeCompressed()) + + return n, nil +} + +func createTestVertex(db *DB) (*LightningNode, error) { + priv, err := btcec.NewPrivateKey(btcec.S256()) + if err != nil { + return nil, err + } + + return createLightningNode(db, priv) +} diff --git a/channeldb/migration_01_to_11/invoices.go b/channeldb/migration_01_to_11/invoices.go new file mode 100644 index 0000000000..7e56489b9a --- /dev/null +++ b/channeldb/migration_01_to_11/invoices.go @@ -0,0 +1,550 @@ +package migration_01_to_11 + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "time" + + "github.com/btcsuite/btcd/wire" + "github.com/lightningnetwork/lnd/channeldb/kvdb" + "github.com/lightningnetwork/lnd/lntypes" + "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/tlv" +) + +var ( + + // invoiceBucket is the name of the bucket within the database that + // stores all data related to invoices no matter their final state. + // Within the invoice bucket, each invoice is keyed by its invoice ID + // which is a monotonically increasing uint32. + invoiceBucket = []byte("invoices") + + // addIndexBucket is an index bucket that we'll use to create a + // monotonically increasing set of add indexes. Each time we add a new + // invoice, this sequence number will be incremented and then populated + // within the new invoice. + // + // In addition to this sequence number, we map: + // + // addIndexNo => invoiceKey + addIndexBucket = []byte("invoice-add-index") + + // settleIndexBucket is an index bucket that we'll use to create a + // monotonically increasing integer for tracking a "settle index". Each + // time an invoice is settled, this sequence number will be incremented + // as populate within the newly settled invoice. + // + // In addition to this sequence number, we map: + // + // settleIndexNo => invoiceKey + settleIndexBucket = []byte("invoice-settle-index") +) + +const ( + // MaxMemoSize is maximum size of the memo field within invoices stored + // in the database. + MaxMemoSize = 1024 + + // MaxReceiptSize is the maximum size of the payment receipt stored + // within the database along side incoming/outgoing invoices. + MaxReceiptSize = 1024 + + // MaxPaymentRequestSize is the max size of a payment request for + // this invoice. + // TODO(halseth): determine the max length payment request when field + // lengths are final. + MaxPaymentRequestSize = 4096 + + // A set of tlv type definitions used to serialize invoice htlcs to the + // database. + chanIDType tlv.Type = 1 + htlcIDType tlv.Type = 3 + amtType tlv.Type = 5 + acceptHeightType tlv.Type = 7 + acceptTimeType tlv.Type = 9 + resolveTimeType tlv.Type = 11 + expiryHeightType tlv.Type = 13 + stateType tlv.Type = 15 +) + +// ContractState describes the state the invoice is in. +type ContractState uint8 + +const ( + // ContractOpen means the invoice has only been created. + ContractOpen ContractState = 0 + + // ContractSettled means the htlc is settled and the invoice has been + // paid. + ContractSettled ContractState = 1 + + // ContractCanceled means the invoice has been canceled. + ContractCanceled ContractState = 2 + + // ContractAccepted means the HTLC has been accepted but not settled + // yet. + ContractAccepted ContractState = 3 +) + +// String returns a human readable identifier for the ContractState type. +func (c ContractState) String() string { + switch c { + case ContractOpen: + return "Open" + case ContractSettled: + return "Settled" + case ContractCanceled: + return "Canceled" + case ContractAccepted: + return "Accepted" + } + + return "Unknown" +} + +// ContractTerm is a companion struct to the Invoice struct. This struct houses +// the necessary conditions required before the invoice can be considered fully +// settled by the payee. +type ContractTerm struct { + // PaymentPreimage is the preimage which is to be revealed in the + // occasion that an HTLC paying to the hash of this preimage is + // extended. + PaymentPreimage lntypes.Preimage + + // Value is the expected amount of milli-satoshis to be paid to an HTLC + // which can be satisfied by the above preimage. + Value lnwire.MilliSatoshi + + // State describes the state the invoice is in. + State ContractState +} + +// Invoice is a payment invoice generated by a payee in order to request +// payment for some good or service. The inclusion of invoices within Lightning +// creates a payment work flow for merchants very similar to that of the +// existing financial system within PayPal, etc. Invoices are added to the +// database when a payment is requested, then can be settled manually once the +// payment is received at the upper layer. For record keeping purposes, +// invoices are never deleted from the database, instead a bit is toggled +// denoting the invoice has been fully settled. Within the database, all +// invoices must have a unique payment hash which is generated by taking the +// sha256 of the payment preimage. +type Invoice struct { + // Memo is an optional memo to be stored along side an invoice. The + // memo may contain further details pertaining to the invoice itself, + // or any other message which fits within the size constraints. + Memo []byte + + // Receipt is an optional field dedicated for storing a + // cryptographically binding receipt of payment. + // + // TODO(roasbeef): document scheme. + Receipt []byte + + // PaymentRequest is an optional field where a payment request created + // for this invoice can be stored. + PaymentRequest []byte + + // FinalCltvDelta is the minimum required number of blocks before htlc + // expiry when the invoice is accepted. + FinalCltvDelta int32 + + // Expiry defines how long after creation this invoice should expire. + Expiry time.Duration + + // CreationDate is the exact time the invoice was created. + CreationDate time.Time + + // SettleDate is the exact time the invoice was settled. + SettleDate time.Time + + // Terms are the contractual payment terms of the invoice. Once all the + // terms have been satisfied by the payer, then the invoice can be + // considered fully fulfilled. + // + // TODO(roasbeef): later allow for multiple terms to fulfill the final + // invoice: payment fragmentation, etc. + Terms ContractTerm + + // AddIndex is an auto-incrementing integer that acts as a + // monotonically increasing sequence number for all invoices created. + // Clients can then use this field as a "checkpoint" of sorts when + // implementing a streaming RPC to notify consumers of instances where + // an invoice has been added before they re-connected. + // + // NOTE: This index starts at 1. + AddIndex uint64 + + // SettleIndex is an auto-incrementing integer that acts as a + // monotonically increasing sequence number for all settled invoices. + // Clients can then use this field as a "checkpoint" of sorts when + // implementing a streaming RPC to notify consumers of instances where + // an invoice has been settled before they re-connected. + // + // NOTE: This index starts at 1. + SettleIndex uint64 + + // AmtPaid is the final amount that we ultimately accepted for pay for + // this invoice. We specify this value independently as it's possible + // that the invoice originally didn't specify an amount, or the sender + // overpaid. + AmtPaid lnwire.MilliSatoshi + + // Htlcs records all htlcs that paid to this invoice. Some of these + // htlcs may have been marked as canceled. + Htlcs map[CircuitKey]*InvoiceHTLC +} + +// HtlcState defines the states an htlc paying to an invoice can be in. +type HtlcState uint8 + +// InvoiceHTLC contains details about an htlc paying to this invoice. +type InvoiceHTLC struct { + // Amt is the amount that is carried by this htlc. + Amt lnwire.MilliSatoshi + + // AcceptHeight is the block height at which the invoice registry + // decided to accept this htlc as a payment to the invoice. At this + // height, the invoice cltv delay must have been met. + AcceptHeight uint32 + + // AcceptTime is the wall clock time at which the invoice registry + // decided to accept the htlc. + AcceptTime time.Time + + // ResolveTime is the wall clock time at which the invoice registry + // decided to settle the htlc. + ResolveTime time.Time + + // Expiry is the expiry height of this htlc. + Expiry uint32 + + // State indicates the state the invoice htlc is currently in. A + // canceled htlc isn't just removed from the invoice htlcs map, because + // we need AcceptHeight to properly cancel the htlc back. + State HtlcState +} + +func validateInvoice(i *Invoice) error { + if len(i.Memo) > MaxMemoSize { + return fmt.Errorf("max length a memo is %v, and invoice "+ + "of length %v was provided", MaxMemoSize, len(i.Memo)) + } + if len(i.Receipt) > MaxReceiptSize { + return fmt.Errorf("max length a receipt is %v, and invoice "+ + "of length %v was provided", MaxReceiptSize, + len(i.Receipt)) + } + if len(i.PaymentRequest) > MaxPaymentRequestSize { + return fmt.Errorf("max length of payment request is %v, length "+ + "provided was %v", MaxPaymentRequestSize, + len(i.PaymentRequest)) + } + return nil +} + +// FetchAllInvoices returns all invoices currently stored within the database. +// If the pendingOnly param is true, then only unsettled invoices will be +// returned, skipping all invoices that are fully settled. +func (d *DB) FetchAllInvoices(pendingOnly bool) ([]Invoice, error) { + var invoices []Invoice + + err := kvdb.View(d, func(tx kvdb.ReadTx) error { + invoiceB := tx.ReadBucket(invoiceBucket) + if invoiceB == nil { + return ErrNoInvoicesCreated + } + + // Iterate through the entire key space of the top-level + // invoice bucket. If key with a non-nil value stores the next + // invoice ID which maps to the corresponding invoice. + return invoiceB.ForEach(func(k, v []byte) error { + if v == nil { + return nil + } + + invoiceReader := bytes.NewReader(v) + invoice, err := deserializeInvoice(invoiceReader) + if err != nil { + return err + } + + if pendingOnly && + invoice.Terms.State == ContractSettled { + + return nil + } + + invoices = append(invoices, invoice) + + return nil + }) + }) + if err != nil { + return nil, err + } + + return invoices, nil +} + +// serializeInvoice serializes an invoice to a writer. +// +// Note: this function is in use for a migration. Before making changes that +// would modify the on disk format, make a copy of the original code and store +// it with the migration. +func serializeInvoice(w io.Writer, i *Invoice) error { + if err := wire.WriteVarBytes(w, 0, i.Memo[:]); err != nil { + return err + } + if err := wire.WriteVarBytes(w, 0, i.Receipt[:]); err != nil { + return err + } + if err := wire.WriteVarBytes(w, 0, i.PaymentRequest[:]); err != nil { + return err + } + + if err := binary.Write(w, byteOrder, i.FinalCltvDelta); err != nil { + return err + } + + if err := binary.Write(w, byteOrder, int64(i.Expiry)); err != nil { + return err + } + + birthBytes, err := i.CreationDate.MarshalBinary() + if err != nil { + return err + } + + if err := wire.WriteVarBytes(w, 0, birthBytes); err != nil { + return err + } + + settleBytes, err := i.SettleDate.MarshalBinary() + if err != nil { + return err + } + + if err := wire.WriteVarBytes(w, 0, settleBytes); err != nil { + return err + } + + if _, err := w.Write(i.Terms.PaymentPreimage[:]); err != nil { + return err + } + + var scratch [8]byte + byteOrder.PutUint64(scratch[:], uint64(i.Terms.Value)) + if _, err := w.Write(scratch[:]); err != nil { + return err + } + + if err := binary.Write(w, byteOrder, i.Terms.State); err != nil { + return err + } + + if err := binary.Write(w, byteOrder, i.AddIndex); err != nil { + return err + } + if err := binary.Write(w, byteOrder, i.SettleIndex); err != nil { + return err + } + if err := binary.Write(w, byteOrder, int64(i.AmtPaid)); err != nil { + return err + } + + if err := serializeHtlcs(w, i.Htlcs); err != nil { + return err + } + + return nil +} + +// serializeHtlcs serializes a map containing circuit keys and invoice htlcs to +// a writer. +func serializeHtlcs(w io.Writer, htlcs map[CircuitKey]*InvoiceHTLC) error { + for key, htlc := range htlcs { + // Encode the htlc in a tlv stream. + chanID := key.ChanID.ToUint64() + amt := uint64(htlc.Amt) + acceptTime := uint64(htlc.AcceptTime.UnixNano()) + resolveTime := uint64(htlc.ResolveTime.UnixNano()) + state := uint8(htlc.State) + + tlvStream, err := tlv.NewStream( + tlv.MakePrimitiveRecord(chanIDType, &chanID), + tlv.MakePrimitiveRecord(htlcIDType, &key.HtlcID), + tlv.MakePrimitiveRecord(amtType, &amt), + tlv.MakePrimitiveRecord( + acceptHeightType, &htlc.AcceptHeight, + ), + tlv.MakePrimitiveRecord(acceptTimeType, &acceptTime), + tlv.MakePrimitiveRecord(resolveTimeType, &resolveTime), + tlv.MakePrimitiveRecord(expiryHeightType, &htlc.Expiry), + tlv.MakePrimitiveRecord(stateType, &state), + ) + if err != nil { + return err + } + + var b bytes.Buffer + if err := tlvStream.Encode(&b); err != nil { + return err + } + + // Write the length of the tlv stream followed by the stream + // bytes. + err = binary.Write(w, byteOrder, uint64(b.Len())) + if err != nil { + return err + } + + if _, err := w.Write(b.Bytes()); err != nil { + return err + } + } + + return nil +} + +func deserializeInvoice(r io.Reader) (Invoice, error) { + var err error + invoice := Invoice{} + + // TODO(roasbeef): use read full everywhere + invoice.Memo, err = wire.ReadVarBytes(r, 0, MaxMemoSize, "") + if err != nil { + return invoice, err + } + invoice.Receipt, err = wire.ReadVarBytes(r, 0, MaxReceiptSize, "") + if err != nil { + return invoice, err + } + + invoice.PaymentRequest, err = wire.ReadVarBytes(r, 0, MaxPaymentRequestSize, "") + if err != nil { + return invoice, err + } + + if err := binary.Read(r, byteOrder, &invoice.FinalCltvDelta); err != nil { + return invoice, err + } + + var expiry int64 + if err := binary.Read(r, byteOrder, &expiry); err != nil { + return invoice, err + } + invoice.Expiry = time.Duration(expiry) + + birthBytes, err := wire.ReadVarBytes(r, 0, 300, "birth") + if err != nil { + return invoice, err + } + if err := invoice.CreationDate.UnmarshalBinary(birthBytes); err != nil { + return invoice, err + } + + settledBytes, err := wire.ReadVarBytes(r, 0, 300, "settled") + if err != nil { + return invoice, err + } + if err := invoice.SettleDate.UnmarshalBinary(settledBytes); err != nil { + return invoice, err + } + + if _, err := io.ReadFull(r, invoice.Terms.PaymentPreimage[:]); err != nil { + return invoice, err + } + var scratch [8]byte + if _, err := io.ReadFull(r, scratch[:]); err != nil { + return invoice, err + } + invoice.Terms.Value = lnwire.MilliSatoshi(byteOrder.Uint64(scratch[:])) + + if err := binary.Read(r, byteOrder, &invoice.Terms.State); err != nil { + return invoice, err + } + + if err := binary.Read(r, byteOrder, &invoice.AddIndex); err != nil { + return invoice, err + } + if err := binary.Read(r, byteOrder, &invoice.SettleIndex); err != nil { + return invoice, err + } + if err := binary.Read(r, byteOrder, &invoice.AmtPaid); err != nil { + return invoice, err + } + + invoice.Htlcs, err = deserializeHtlcs(r) + if err != nil { + return Invoice{}, err + } + + return invoice, nil +} + +// deserializeHtlcs reads a list of invoice htlcs from a reader and returns it +// as a map. +func deserializeHtlcs(r io.Reader) (map[CircuitKey]*InvoiceHTLC, error) { + htlcs := make(map[CircuitKey]*InvoiceHTLC, 0) + + for { + // Read the length of the tlv stream for this htlc. + var streamLen uint64 + if err := binary.Read(r, byteOrder, &streamLen); err != nil { + if err == io.EOF { + break + } + + return nil, err + } + + streamBytes := make([]byte, streamLen) + if _, err := r.Read(streamBytes); err != nil { + return nil, err + } + streamReader := bytes.NewReader(streamBytes) + + // Decode the contents into the htlc fields. + var ( + htlc InvoiceHTLC + key CircuitKey + chanID uint64 + state uint8 + acceptTime, resolveTime uint64 + amt uint64 + ) + tlvStream, err := tlv.NewStream( + tlv.MakePrimitiveRecord(chanIDType, &chanID), + tlv.MakePrimitiveRecord(htlcIDType, &key.HtlcID), + tlv.MakePrimitiveRecord(amtType, &amt), + tlv.MakePrimitiveRecord( + acceptHeightType, &htlc.AcceptHeight, + ), + tlv.MakePrimitiveRecord(acceptTimeType, &acceptTime), + tlv.MakePrimitiveRecord(resolveTimeType, &resolveTime), + tlv.MakePrimitiveRecord(expiryHeightType, &htlc.Expiry), + tlv.MakePrimitiveRecord(stateType, &state), + ) + if err != nil { + return nil, err + } + + if err := tlvStream.Decode(streamReader); err != nil { + return nil, err + } + + key.ChanID = lnwire.NewShortChanIDFromInt(chanID) + htlc.AcceptTime = time.Unix(0, int64(acceptTime)) + htlc.ResolveTime = time.Unix(0, int64(resolveTime)) + htlc.State = HtlcState(state) + htlc.Amt = lnwire.MilliSatoshi(amt) + + htlcs[key] = &htlc + } + + return htlcs, nil +} diff --git a/channeldb/migration_01_to_11/legacy_serialization.go b/channeldb/migration_01_to_11/legacy_serialization.go new file mode 100644 index 0000000000..5d731bff6f --- /dev/null +++ b/channeldb/migration_01_to_11/legacy_serialization.go @@ -0,0 +1,55 @@ +package migration_01_to_11 + +import ( + "io" +) + +// deserializeCloseChannelSummaryV6 reads the v6 database format for +// ChannelCloseSummary. +// +// NOTE: deprecated, only for migration. +func deserializeCloseChannelSummaryV6(r io.Reader) (*ChannelCloseSummary, error) { + c := &ChannelCloseSummary{} + + err := ReadElements(r, + &c.ChanPoint, &c.ShortChanID, &c.ChainHash, &c.ClosingTXID, + &c.CloseHeight, &c.RemotePub, &c.Capacity, &c.SettledBalance, + &c.TimeLockedBalance, &c.CloseType, &c.IsPending, + ) + if err != nil { + return nil, err + } + + // We'll now check to see if the channel close summary was encoded with + // any of the additional optional fields. + err = ReadElements(r, &c.RemoteCurrentRevocation) + switch { + case err == io.EOF: + return c, nil + + // If we got a non-eof error, then we know there's an actually issue. + // Otherwise, it may have been the case that this summary didn't have + // the set of optional fields. + case err != nil: + return nil, err + } + + if err := readChanConfig(r, &c.LocalChanConfig); err != nil { + return nil, err + } + + // Finally, we'll attempt to read the next unrevoked commitment point + // for the remote party. If we closed the channel before receiving a + // funding locked message, then this can be nil. As a result, we'll use + // the same technique to read the field, only if there's still data + // left in the buffer. + err = ReadElements(r, &c.RemoteNextRevocation) + if err != nil && err != io.EOF { + // If we got a non-eof error, then we know there's an actually + // issue. Otherwise, it may have been the case that this + // summary didn't have the set of optional fields. + return nil, err + } + + return c, nil +} diff --git a/channeldb/migration_01_to_11/log.go b/channeldb/migration_01_to_11/log.go new file mode 100644 index 0000000000..b169b5afb1 --- /dev/null +++ b/channeldb/migration_01_to_11/log.go @@ -0,0 +1,14 @@ +package migration_01_to_11 + +import ( + "github.com/btcsuite/btclog" +) + +// log is a logger that is initialized as disabled. This means the package will +// not perform any logging by default until a logger is set. +var log = btclog.Disabled + +// UseLogger uses a specified Logger to output package logging info. +func UseLogger(logger btclog.Logger) { + log = logger +} diff --git a/channeldb/migration_01_to_11/meta.go b/channeldb/migration_01_to_11/meta.go new file mode 100644 index 0000000000..95bce84bff --- /dev/null +++ b/channeldb/migration_01_to_11/meta.go @@ -0,0 +1,39 @@ +package migration_01_to_11 + +import ( + "github.com/lightningnetwork/lnd/channeldb/kvdb" +) + +var ( + // metaBucket stores all the meta information concerning the state of + // the database. + metaBucket = []byte("metadata") + + // dbVersionKey is a boltdb key and it's used for storing/retrieving + // current database version. + dbVersionKey = []byte("dbp") +) + +// Meta structure holds the database meta information. +type Meta struct { + // DbVersionNumber is the current schema version of the database. + DbVersionNumber uint32 +} + +// putMeta is an internal helper function used in order to allow callers to +// re-use a database transaction. See the publicly exported PutMeta method for +// more information. +func putMeta(meta *Meta, tx kvdb.RwTx) error { + metaBucket, err := tx.CreateTopLevelBucket(metaBucket) + if err != nil { + return err + } + + return putDbVersion(metaBucket, meta) +} + +func putDbVersion(metaBucket kvdb.RwBucket, meta *Meta) error { + scratch := make([]byte, 4) + byteOrder.PutUint32(scratch, meta.DbVersionNumber) + return metaBucket.Put(dbVersionKey, scratch) +} diff --git a/channeldb/migration_01_to_11/meta_test.go b/channeldb/migration_01_to_11/meta_test.go new file mode 100644 index 0000000000..d3850c155e --- /dev/null +++ b/channeldb/migration_01_to_11/meta_test.go @@ -0,0 +1,58 @@ +package migration_01_to_11 + +import ( + "testing" + + "github.com/go-errors/errors" + "github.com/lightningnetwork/lnd/channeldb/kvdb" +) + +// applyMigration is a helper test function that encapsulates the general steps +// which are needed to properly check the result of applying migration function. +func applyMigration(t *testing.T, beforeMigration, afterMigration func(d *DB), + migrationFunc migration, shouldFail bool) { + + cdb, cleanUp, err := makeTestDB() + defer cleanUp() + if err != nil { + t.Fatal(err) + } + + // Create a test node that will be our source node. + testNode, err := createTestVertex(cdb) + if err != nil { + t.Fatal(err) + } + graph := cdb.ChannelGraph() + if err := graph.SetSourceNode(testNode); err != nil { + t.Fatal(err) + } + + // beforeMigration usually used for populating the database + // with test data. + beforeMigration(cdb) + + defer func() { + if r := recover(); r != nil { + err = errors.New(r) + } + + if err == nil && shouldFail { + t.Fatal("error wasn't received on migration stage") + } else if err != nil && !shouldFail { + t.Fatalf("error was received on migration stage: %v", err) + } + + // afterMigration usually used for checking the database state and + // throwing the error if something went wrong. + afterMigration(cdb) + }() + + // Apply migration. + err = kvdb.Update(cdb, func(tx kvdb.RwTx) error { + return migrationFunc(tx) + }) + if err != nil { + log.Error(err) + } +} diff --git a/channeldb/migration_09_legacy_serialization.go b/channeldb/migration_01_to_11/migration_09_legacy_serialization.go similarity index 91% rename from channeldb/migration_09_legacy_serialization.go rename to channeldb/migration_01_to_11/migration_09_legacy_serialization.go index 1205cf9bd4..3589662c8c 100644 --- a/channeldb/migration_09_legacy_serialization.go +++ b/channeldb/migration_01_to_11/migration_09_legacy_serialization.go @@ -1,4 +1,4 @@ -package channeldb +package migration_01_to_11 import ( "bytes" @@ -7,10 +7,9 @@ import ( "io" "sort" - "github.com/coreos/bbolt" + "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lnwire" - "github.com/lightningnetwork/lnd/routing/route" ) var ( @@ -77,8 +76,8 @@ func (db *DB) addPayment(payment *outgoingPayment) error { } paymentBytes := b.Bytes() - return db.Batch(func(tx *bbolt.Tx) error { - payments, err := tx.CreateBucketIfNotExists(paymentBucket) + return kvdb.Update(db, func(tx kvdb.RwTx) error { + payments, err := tx.CreateTopLevelBucket(paymentBucket) if err != nil { return err } @@ -105,8 +104,8 @@ func (db *DB) addPayment(payment *outgoingPayment) error { func (db *DB) fetchAllPayments() ([]*outgoingPayment, error) { var payments []*outgoingPayment - err := db.View(func(tx *bbolt.Tx) error { - bucket := tx.Bucket(paymentBucket) + err := kvdb.View(db, func(tx kvdb.ReadTx) error { + bucket := tx.ReadBucket(paymentBucket) if bucket == nil { return ErrNoPaymentsCreated } @@ -141,7 +140,7 @@ func (db *DB) fetchAllPayments() ([]*outgoingPayment, error) { // NOTE: Deprecated. Kept around for migration purposes. func (db *DB) fetchPaymentStatus(paymentHash [32]byte) (PaymentStatus, error) { var paymentStatus = StatusUnknown - err := db.View(func(tx *bbolt.Tx) error { + err := kvdb.View(db, func(tx kvdb.ReadTx) error { var err error paymentStatus, err = fetchPaymentStatusTx(tx, paymentHash) return err @@ -159,11 +158,11 @@ func (db *DB) fetchPaymentStatus(paymentHash [32]byte) (PaymentStatus, error) { // can be composed into other atomic operations. // // NOTE: Deprecated. Kept around for migration purposes. -func fetchPaymentStatusTx(tx *bbolt.Tx, paymentHash [32]byte) (PaymentStatus, error) { +func fetchPaymentStatusTx(tx kvdb.ReadTx, paymentHash [32]byte) (PaymentStatus, error) { // The default status for all payments that aren't recorded in database. var paymentStatus = StatusUnknown - bucket := tx.Bucket(paymentStatusBucket) + bucket := tx.ReadBucket(paymentStatusBucket) if bucket == nil { return paymentStatus, nil } @@ -274,7 +273,7 @@ func serializePaymentAttemptInfoMigration9(w io.Writer, a *PaymentAttemptInfo) e return nil } -func serializeHopMigration9(w io.Writer, h *route.Hop) error { +func serializeHopMigration9(w io.Writer, h *Hop) error { if err := WriteElements(w, h.PubKeyBytes[:], h.ChannelID, h.OutgoingTimeLock, h.AmtToForward, @@ -285,7 +284,7 @@ func serializeHopMigration9(w io.Writer, h *route.Hop) error { return nil } -func serializeRouteMigration9(w io.Writer, r route.Route) error { +func serializeRouteMigration9(w io.Writer, r Route) error { if err := WriteElements(w, r.TotalTimeLock, r.TotalAmount, r.SourcePubKey[:], ); err != nil { @@ -318,8 +317,8 @@ func deserializePaymentAttemptInfoMigration9(r io.Reader) (*PaymentAttemptInfo, return a, nil } -func deserializeRouteMigration9(r io.Reader) (route.Route, error) { - rt := route.Route{} +func deserializeRouteMigration9(r io.Reader) (Route, error) { + rt := Route{} if err := ReadElements(r, &rt.TotalTimeLock, &rt.TotalAmount, ); err != nil { @@ -337,7 +336,7 @@ func deserializeRouteMigration9(r io.Reader) (route.Route, error) { return rt, err } - var hops []*route.Hop + var hops []*Hop for i := uint32(0); i < numHops; i++ { hop, err := deserializeHopMigration9(r) if err != nil { @@ -350,8 +349,8 @@ func deserializeRouteMigration9(r io.Reader) (route.Route, error) { return rt, nil } -func deserializeHopMigration9(r io.Reader) (*route.Hop, error) { - h := &route.Hop{} +func deserializeHopMigration9(r io.Reader) (*Hop, error) { + h := &Hop{} var pub []byte if err := ReadElements(r, &pub); err != nil { @@ -376,14 +375,14 @@ func deserializeHopMigration9(r io.Reader) (*route.Hop, error) { func (db *DB) fetchPaymentsMigration9() ([]*Payment, error) { var payments []*Payment - err := db.View(func(tx *bbolt.Tx) error { - paymentsBucket := tx.Bucket(paymentsRootBucket) + err := kvdb.View(db, func(tx kvdb.ReadTx) error { + paymentsBucket := tx.ReadBucket(paymentsRootBucket) if paymentsBucket == nil { return nil } return paymentsBucket.ForEach(func(k, v []byte) error { - bucket := paymentsBucket.Bucket(k) + bucket := paymentsBucket.NestedReadBucket(k) if bucket == nil { // We only expect sub-buckets to be found in // this top-level bucket. @@ -402,13 +401,13 @@ func (db *DB) fetchPaymentsMigration9() ([]*Payment, error) { // payment has was possible. These will be found in a // sub-bucket indexed by their sequence number if // available. - dup := bucket.Bucket(paymentDuplicateBucket) + dup := bucket.NestedReadBucket(paymentDuplicateBucket) if dup == nil { return nil } return dup.ForEach(func(k, v []byte) error { - subBucket := dup.Bucket(k) + subBucket := dup.NestedReadBucket(k) if subBucket == nil { // We one bucket for each duplicate to // be found. @@ -438,7 +437,7 @@ func (db *DB) fetchPaymentsMigration9() ([]*Payment, error) { return payments, nil } -func fetchPaymentMigration9(bucket *bbolt.Bucket) (*Payment, error) { +func fetchPaymentMigration9(bucket kvdb.ReadBucket) (*Payment, error) { var ( err error p = &Payment{} diff --git a/channeldb/migration_10_route_tlv_records.go b/channeldb/migration_01_to_11/migration_10_route_tlv_records.go similarity index 84% rename from channeldb/migration_10_route_tlv_records.go rename to channeldb/migration_01_to_11/migration_10_route_tlv_records.go index 2659c4a734..21a684f670 100644 --- a/channeldb/migration_10_route_tlv_records.go +++ b/channeldb/migration_01_to_11/migration_10_route_tlv_records.go @@ -1,19 +1,18 @@ -package channeldb +package migration_01_to_11 import ( "bytes" "io" - "github.com/coreos/bbolt" - "github.com/lightningnetwork/lnd/routing/route" + "github.com/lightningnetwork/lnd/channeldb/kvdb" ) -// migrateRouteSerialization migrates the way we serialize routes across the +// MigrateRouteSerialization migrates the way we serialize routes across the // entire database. At the time of writing of this migration, this includes our // payment attempts, as well as the payment results in mission control. -func migrateRouteSerialization(tx *bbolt.Tx) error { +func MigrateRouteSerialization(tx kvdb.RwTx) error { // First, we'll do all the payment attempts. - rootPaymentBucket := tx.Bucket(paymentsRootBucket) + rootPaymentBucket := tx.ReadWriteBucket(paymentsRootBucket) if rootPaymentBucket == nil { return nil } @@ -37,7 +36,7 @@ func migrateRouteSerialization(tx *bbolt.Tx) error { // Now that we have all the payment hashes, we can carry out the // migration itself. for _, payHash := range payHashes { - payHashBucket := rootPaymentBucket.Bucket(payHash) + payHashBucket := rootPaymentBucket.NestedReadWriteBucket(payHash) // First, we'll migrate the main (non duplicate) payment to // this hash. @@ -48,7 +47,7 @@ func migrateRouteSerialization(tx *bbolt.Tx) error { // Now that we've migrated the main payment, we'll also check // for any duplicate payments to the same payment hash. - dupBucket := payHashBucket.Bucket(paymentDuplicateBucket) + dupBucket := payHashBucket.NestedReadWriteBucket(paymentDuplicateBucket) // If there's no dup bucket, then we can move on to the next // payment. @@ -70,7 +69,7 @@ func migrateRouteSerialization(tx *bbolt.Tx) error { // Now in this second pass, we'll re-serialize their duplicate // payment attempts under the new encoding. for _, seqNo := range dupSeqNos { - dupPayHashBucket := dupBucket.Bucket(seqNo) + dupPayHashBucket := dupBucket.NestedReadWriteBucket(seqNo) err := migrateAttemptEncoding(tx, dupPayHashBucket) if err != nil { return err @@ -84,8 +83,8 @@ func migrateRouteSerialization(tx *bbolt.Tx) error { "existing data") resultsKey := []byte("missioncontrol-results") - err = tx.DeleteBucket(resultsKey) - if err != nil && err != bbolt.ErrBucketNotFound { + err = tx.DeleteTopLevelBucket(resultsKey) + if err != nil && err != kvdb.ErrBucketNotFound { return err } @@ -96,7 +95,7 @@ func migrateRouteSerialization(tx *bbolt.Tx) error { // migrateAttemptEncoding migrates payment attempts using the legacy format to // the new format. -func migrateAttemptEncoding(tx *bbolt.Tx, payHashBucket *bbolt.Bucket) error { +func migrateAttemptEncoding(tx kvdb.RwTx, payHashBucket kvdb.RwBucket) error { payAttemptBytes := payHashBucket.Get(paymentAttemptInfoKey) if payAttemptBytes == nil { return nil @@ -154,8 +153,8 @@ func serializePaymentAttemptInfoLegacy(w io.Writer, a *PaymentAttemptInfo) error return nil } -func deserializeHopLegacy(r io.Reader) (*route.Hop, error) { - h := &route.Hop{} +func deserializeHopLegacy(r io.Reader) (*Hop, error) { + h := &Hop{} var pub []byte if err := ReadElements(r, &pub); err != nil { @@ -172,7 +171,7 @@ func deserializeHopLegacy(r io.Reader) (*route.Hop, error) { return h, nil } -func serializeHopLegacy(w io.Writer, h *route.Hop) error { +func serializeHopLegacy(w io.Writer, h *Hop) error { if err := WriteElements(w, h.PubKeyBytes[:], h.ChannelID, h.OutgoingTimeLock, h.AmtToForward, @@ -183,8 +182,8 @@ func serializeHopLegacy(w io.Writer, h *route.Hop) error { return nil } -func deserializeRouteLegacy(r io.Reader) (route.Route, error) { - rt := route.Route{} +func deserializeRouteLegacy(r io.Reader) (Route, error) { + rt := Route{} if err := ReadElements(r, &rt.TotalTimeLock, &rt.TotalAmount, ); err != nil { @@ -202,7 +201,7 @@ func deserializeRouteLegacy(r io.Reader) (route.Route, error) { return rt, err } - var hops []*route.Hop + var hops []*Hop for i := uint32(0); i < numHops; i++ { hop, err := deserializeHopLegacy(r) if err != nil { @@ -215,7 +214,7 @@ func deserializeRouteLegacy(r io.Reader) (route.Route, error) { return rt, nil } -func serializeRouteLegacy(w io.Writer, r route.Route) error { +func serializeRouteLegacy(w io.Writer, r Route) error { if err := WriteElements(w, r.TotalTimeLock, r.TotalAmount, r.SourcePubKey[:], ); err != nil { diff --git a/channeldb/migration_11_invoices.go b/channeldb/migration_01_to_11/migration_11_invoices.go similarity index 96% rename from channeldb/migration_11_invoices.go rename to channeldb/migration_01_to_11/migration_11_invoices.go index b4e607338f..242631d86f 100644 --- a/channeldb/migration_11_invoices.go +++ b/channeldb/migration_01_to_11/migration_11_invoices.go @@ -1,4 +1,4 @@ -package channeldb +package migration_01_to_11 import ( "bytes" @@ -8,18 +8,18 @@ import ( bitcoinCfg "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/wire" - "github.com/coreos/bbolt" + "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/zpay32" litecoinCfg "github.com/ltcsuite/ltcd/chaincfg" ) -// migrateInvoices adds invoice htlcs and a separate cltv delta field to the +// MigrateInvoices adds invoice htlcs and a separate cltv delta field to the // invoices. -func migrateInvoices(tx *bbolt.Tx) error { +func MigrateInvoices(tx kvdb.RwTx) error { log.Infof("Migrating invoices to new invoice format") - invoiceB := tx.Bucket(invoiceBucket) + invoiceB := tx.ReadWriteBucket(invoiceBucket) if invoiceB == nil { return nil } diff --git a/channeldb/migration_11_invoices_test.go b/channeldb/migration_01_to_11/migration_11_invoices_test.go similarity index 92% rename from channeldb/migration_11_invoices_test.go rename to channeldb/migration_01_to_11/migration_11_invoices_test.go index 34cb1a92b5..32899d8e01 100644 --- a/channeldb/migration_11_invoices_test.go +++ b/channeldb/migration_01_to_11/migration_11_invoices_test.go @@ -1,4 +1,4 @@ -package channeldb +package migration_01_to_11 import ( "bytes" @@ -8,7 +8,7 @@ import ( "github.com/btcsuite/btcd/btcec" bitcoinCfg "github.com/btcsuite/btcd/chaincfg" - "github.com/coreos/bbolt" + "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/zpay32" litecoinCfg "github.com/ltcsuite/ltcd/chaincfg" ) @@ -26,8 +26,8 @@ var ( // beforeMigrationFuncV11 insert the test invoices in the database. func beforeMigrationFuncV11(t *testing.T, d *DB, invoices []Invoice) { - err := d.Update(func(tx *bbolt.Tx) error { - invoicesBucket, err := tx.CreateBucketIfNotExists( + err := kvdb.Update(d, func(tx kvdb.RwTx) error { + invoicesBucket, err := tx.CreateTopLevelBucket( invoiceBucket, ) if err != nil { @@ -88,15 +88,6 @@ func TestMigrateInvoices(t *testing.T) { // Verify that all invoices were migrated. afterMigrationFunc := func(d *DB) { - meta, err := d.FetchMeta(nil) - if err != nil { - t.Fatal(err) - } - - if meta.DbVersionNumber != 1 { - t.Fatal("migration 'invoices' wasn't applied") - } - dbInvoices, err := d.FetchAllInvoices(false) if err != nil { t.Fatalf("unable to fetch invoices: %v", err) @@ -123,7 +114,7 @@ func TestMigrateInvoices(t *testing.T) { applyMigration(t, func(d *DB) { beforeMigrationFuncV11(t, d, invoices) }, afterMigrationFunc, - migrateInvoices, + MigrateInvoices, false) } @@ -149,7 +140,7 @@ func TestMigrateInvoicesHodl(t *testing.T) { applyMigration(t, func(d *DB) { beforeMigrationFuncV11(t, d, invoices) }, func(d *DB) {}, - migrateInvoices, + MigrateInvoices, true) } diff --git a/channeldb/migrations.go b/channeldb/migration_01_to_11/migrations.go similarity index 91% rename from channeldb/migrations.go rename to channeldb/migration_01_to_11/migrations.go index a78d1314a3..35be510e99 100644 --- a/channeldb/migrations.go +++ b/channeldb/migration_01_to_11/migrations.go @@ -1,4 +1,4 @@ -package channeldb +package migration_01_to_11 import ( "bytes" @@ -7,21 +7,20 @@ import ( "fmt" "github.com/btcsuite/btcd/btcec" - "github.com/coreos/bbolt" + "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/lnwire" - "github.com/lightningnetwork/lnd/routing/route" ) -// migrateNodeAndEdgeUpdateIndex is a migration function that will update the +// MigrateNodeAndEdgeUpdateIndex is a migration function that will update the // database from version 0 to version 1. In version 1, we add two new indexes // (one for nodes and one for edges) to keep track of the last time a node or // edge was updated on the network. These new indexes allow us to implement the // new graph sync protocol added. -func migrateNodeAndEdgeUpdateIndex(tx *bbolt.Tx) error { +func MigrateNodeAndEdgeUpdateIndex(tx kvdb.RwTx) error { // First, we'll populating the node portion of the new index. Before we // can add new values to the index, we'll first create the new bucket // where these items will be housed. - nodes, err := tx.CreateBucketIfNotExists(nodeBucket) + nodes, err := tx.CreateTopLevelBucket(nodeBucket) if err != nil { return fmt.Errorf("unable to create node bucket: %v", err) } @@ -65,7 +64,7 @@ func migrateNodeAndEdgeUpdateIndex(tx *bbolt.Tx) error { // With the set of nodes updated, we'll now update all edges to have a // corresponding entry in the edge update index. - edges, err := tx.CreateBucketIfNotExists(edgeBucket) + edges, err := tx.CreateTopLevelBucket(edgeBucket) if err != nil { return fmt.Errorf("unable to create edge bucket: %v", err) } @@ -118,12 +117,12 @@ func migrateNodeAndEdgeUpdateIndex(tx *bbolt.Tx) error { return nil } -// migrateInvoiceTimeSeries is a database migration that assigns all existing +// MigrateInvoiceTimeSeries is a database migration that assigns all existing // invoices an index in the add and/or the settle index. Additionally, all // existing invoices will have their bytes padded out in order to encode the // add+settle index as well as the amount paid. -func migrateInvoiceTimeSeries(tx *bbolt.Tx) error { - invoices, err := tx.CreateBucketIfNotExists(invoiceBucket) +func MigrateInvoiceTimeSeries(tx kvdb.RwTx) error { + invoices, err := tx.CreateTopLevelBucket(invoiceBucket) if err != nil { return err } @@ -255,12 +254,12 @@ func migrateInvoiceTimeSeries(tx *bbolt.Tx) error { return nil } -// migrateInvoiceTimeSeriesOutgoingPayments is a follow up to the +// MigrateInvoiceTimeSeriesOutgoingPayments is a follow up to the // migrateInvoiceTimeSeries migration. As at the time of writing, the // OutgoingPayment struct embeddeds an instance of the Invoice struct. As a // result, we also need to migrate the internal invoice to the new format. -func migrateInvoiceTimeSeriesOutgoingPayments(tx *bbolt.Tx) error { - payBucket := tx.Bucket(paymentBucket) +func MigrateInvoiceTimeSeriesOutgoingPayments(tx kvdb.RwTx) error { + payBucket := tx.ReadWriteBucket(paymentBucket) if payBucket == nil { return nil } @@ -336,22 +335,22 @@ func migrateInvoiceTimeSeriesOutgoingPayments(tx *bbolt.Tx) error { return nil } -// migrateEdgePolicies is a migration function that will update the edges +// MigrateEdgePolicies is a migration function that will update the edges // bucket. It ensure that edges with unknown policies will also have an entry // in the bucket. After the migration, there will be two edge entries for // every channel, regardless of whether the policies are known. -func migrateEdgePolicies(tx *bbolt.Tx) error { - nodes := tx.Bucket(nodeBucket) +func MigrateEdgePolicies(tx kvdb.RwTx) error { + nodes := tx.ReadWriteBucket(nodeBucket) if nodes == nil { return nil } - edges := tx.Bucket(edgeBucket) + edges := tx.ReadWriteBucket(edgeBucket) if edges == nil { return nil } - edgeIndex := edges.Bucket(edgeIndexBucket) + edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket) if edgeIndex == nil { return nil } @@ -409,13 +408,13 @@ func migrateEdgePolicies(tx *bbolt.Tx) error { return nil } -// paymentStatusesMigration is a database migration intended for adding payment +// PaymentStatusesMigration is a database migration intended for adding payment // statuses for each existing payment entity in bucket to be able control // transitions of statuses and prevent cases such as double payment -func paymentStatusesMigration(tx *bbolt.Tx) error { +func PaymentStatusesMigration(tx kvdb.RwTx) error { // Get the bucket dedicated to storing statuses of payments, // where a key is payment hash, value is payment status. - paymentStatuses, err := tx.CreateBucketIfNotExists(paymentStatusBucket) + paymentStatuses, err := tx.CreateTopLevelBucket(paymentStatusBucket) if err != nil { return err } @@ -423,7 +422,7 @@ func paymentStatusesMigration(tx *bbolt.Tx) error { log.Infof("Migrating database to support payment statuses") circuitAddKey := []byte("circuit-adds") - circuits := tx.Bucket(circuitAddKey) + circuits := tx.ReadWriteBucket(circuitAddKey) if circuits != nil { log.Infof("Marking all known circuits with status InFlight") @@ -456,7 +455,7 @@ func paymentStatusesMigration(tx *bbolt.Tx) error { log.Infof("Marking all existing payments with status Completed") // Get the bucket dedicated to storing payments - bucket := tx.Bucket(paymentBucket) + bucket := tx.ReadWriteBucket(paymentBucket) if bucket == nil { return nil } @@ -492,21 +491,21 @@ func paymentStatusesMigration(tx *bbolt.Tx) error { return nil } -// migratePruneEdgeUpdateIndex is a database migration that attempts to resolve +// MigratePruneEdgeUpdateIndex is a database migration that attempts to resolve // some lingering bugs with regards to edge policies and their update index. // Stale entries within the edge update index were not being properly pruned due // to a miscalculation on the offset of an edge's policy last update. This // migration also fixes the case where the public keys within edge policies were // being serialized with an extra byte, causing an even greater error when // attempting to perform the offset calculation described earlier. -func migratePruneEdgeUpdateIndex(tx *bbolt.Tx) error { +func MigratePruneEdgeUpdateIndex(tx kvdb.RwTx) error { // To begin the migration, we'll retrieve the update index bucket. If it // does not exist, we have nothing left to do so we can simply exit. - edges := tx.Bucket(edgeBucket) + edges := tx.ReadWriteBucket(edgeBucket) if edges == nil { return nil } - edgeUpdateIndex := edges.Bucket(edgeUpdateIndexBucket) + edgeUpdateIndex := edges.NestedReadWriteBucket(edgeUpdateIndexBucket) if edgeUpdateIndex == nil { return nil } @@ -522,7 +521,7 @@ func migratePruneEdgeUpdateIndex(tx *bbolt.Tx) error { return fmt.Errorf("unable to create/fetch edge index " + "bucket") } - nodes, err := tx.CreateBucketIfNotExists(nodeBucket) + nodes, err := tx.CreateTopLevelBucket(nodeBucket) if err != nil { return fmt.Errorf("unable to make node bucket") } @@ -610,11 +609,11 @@ func migratePruneEdgeUpdateIndex(tx *bbolt.Tx) error { return nil } -// migrateOptionalChannelCloseSummaryFields migrates the serialized format of +// MigrateOptionalChannelCloseSummaryFields migrates the serialized format of // ChannelCloseSummary to a format where optional fields' presence is indicated // with boolean markers. -func migrateOptionalChannelCloseSummaryFields(tx *bbolt.Tx) error { - closedChanBucket := tx.Bucket(closedChannelBucket) +func MigrateOptionalChannelCloseSummaryFields(tx kvdb.RwTx) error { + closedChanBucket := tx.ReadWriteBucket(closedChannelBucket) if closedChanBucket == nil { return nil } @@ -669,14 +668,14 @@ func migrateOptionalChannelCloseSummaryFields(tx *bbolt.Tx) error { var messageStoreBucket = []byte("message-store") -// migrateGossipMessageStoreKeys migrates the key format for gossip messages +// MigrateGossipMessageStoreKeys migrates the key format for gossip messages // found in the message store to a new one that takes into consideration the of // the message being stored. -func migrateGossipMessageStoreKeys(tx *bbolt.Tx) error { +func MigrateGossipMessageStoreKeys(tx kvdb.RwTx) error { // We'll start by retrieving the bucket in which these messages are // stored within. If there isn't one, there's nothing left for us to do // so we can avoid the migration. - messageStore := tx.Bucket(messageStoreBucket) + messageStore := tx.ReadWriteBucket(messageStoreBucket) if messageStore == nil { return nil } @@ -739,7 +738,7 @@ func migrateGossipMessageStoreKeys(tx *bbolt.Tx) error { return nil } -// migrateOutgoingPayments moves the OutgoingPayments into a new bucket format +// MigrateOutgoingPayments moves the OutgoingPayments into a new bucket format // where they all reside in a top-level bucket indexed by the payment hash. In // this sub-bucket we store information relevant to this payment, such as the // payment status. @@ -748,10 +747,10 @@ func migrateGossipMessageStoreKeys(tx *bbolt.Tx) error { // InFlight (we have no PaymentAttemptInfo available for pre-migration // payments) we delete those statuses, so only Completed payments remain in the // new bucket structure. -func migrateOutgoingPayments(tx *bbolt.Tx) error { +func MigrateOutgoingPayments(tx kvdb.RwTx) error { log.Infof("Migrating outgoing payments to new bucket structure") - oldPayments := tx.Bucket(paymentBucket) + oldPayments := tx.ReadWriteBucket(paymentBucket) // Return early if there are no payments to migrate. if oldPayments == nil { @@ -759,7 +758,7 @@ func migrateOutgoingPayments(tx *bbolt.Tx) error { return nil } - newPayments, err := tx.CreateBucket(paymentsRootBucket) + newPayments, err := tx.CreateTopLevelBucket(paymentsRootBucket) if err != nil { return err } @@ -768,7 +767,7 @@ func migrateOutgoingPayments(tx *bbolt.Tx) error { // only attempt to fetch it if needed. sourcePub := func() ([33]byte, error) { var pub [33]byte - nodes := tx.Bucket(nodeBucket) + nodes := tx.ReadWriteBucket(nodeBucket) if nodes == nil { return pub, ErrGraphNotFound } @@ -817,14 +816,14 @@ func migrateOutgoingPayments(tx *bbolt.Tx) error { // Do the same for the PaymentAttemptInfo. totalAmt := payment.Terms.Value + payment.Fee - rt := route.Route{ + rt := Route{ TotalTimeLock: payment.TimeLockLength, TotalAmount: totalAmt, SourcePubKey: sourcePubKey, - Hops: []*route.Hop{}, + Hops: []*Hop{}, } for _, hop := range payment.Path { - rt.Hops = append(rt.Hops, &route.Hop{ + rt.Hops = append(rt.Hops, &Hop{ PubKeyBytes: hop, AmtToForward: totalAmt, }) @@ -863,8 +862,8 @@ func migrateOutgoingPayments(tx *bbolt.Tx) error { // from a database containing duplicate payments to a payment // hash. To keep this information, we store such duplicate // payments in a sub-bucket. - if err == bbolt.ErrBucketExists { - pHashBucket := newPayments.Bucket(paymentHash[:]) + if err == kvdb.ErrBucketExists { + pHashBucket := newPayments.NestedReadWriteBucket(paymentHash[:]) // Create a bucket for duplicate payments within this // payment hash's bucket. @@ -923,14 +922,14 @@ func migrateOutgoingPayments(tx *bbolt.Tx) error { // Now we delete the old buckets. Deleting the payment status buckets // deletes all payment statuses other than Complete. - err = tx.DeleteBucket(paymentStatusBucket) - if err != nil && err != bbolt.ErrBucketNotFound { + err = tx.DeleteTopLevelBucket(paymentStatusBucket) + if err != nil && err != kvdb.ErrBucketNotFound { return err } // Finally delete the old payment bucket. - err = tx.DeleteBucket(paymentBucket) - if err != nil && err != bbolt.ErrBucketNotFound { + err = tx.DeleteTopLevelBucket(paymentBucket) + if err != nil && err != kvdb.ErrBucketNotFound { return err } diff --git a/channeldb/migrations_test.go b/channeldb/migration_01_to_11/migrations_test.go similarity index 92% rename from channeldb/migrations_test.go rename to channeldb/migration_01_to_11/migrations_test.go index 93bf602f0f..c2531b8fbd 100644 --- a/channeldb/migrations_test.go +++ b/channeldb/migration_01_to_11/migrations_test.go @@ -1,4 +1,4 @@ -package channeldb +package migration_01_to_11 import ( "bytes" @@ -11,12 +11,11 @@ import ( "time" "github.com/btcsuite/btcutil" - "github.com/coreos/bbolt" "github.com/davecgh/go-spew/spew" "github.com/go-errors/errors" + "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lnwire" - "github.com/lightningnetwork/lnd/routing/route" ) // TestPaymentStatusesMigration checks that already completed payments will have @@ -60,8 +59,8 @@ func TestPaymentStatusesMigration(t *testing.T) { // locally-sourced payment should end up with an InFlight // status, while the other should remain unchanged, which // defaults to Grounded. - err = d.Update(func(tx *bbolt.Tx) error { - circuits, err := tx.CreateBucketIfNotExists( + err = kvdb.Update(d, func(tx kvdb.RwTx) error { + circuits, err := tx.CreateTopLevelBucket( []byte("circuit-adds"), ) if err != nil { @@ -135,15 +134,6 @@ func TestPaymentStatusesMigration(t *testing.T) { // Verify that the created payment status is "Completed" for our one // fake payment. afterMigrationFunc := func(d *DB) { - meta, err := d.FetchMeta(nil) - if err != nil { - t.Fatal(err) - } - - if meta.DbVersionNumber != 1 { - t.Fatal("migration 'paymentStatusesMigration' wasn't applied") - } - // Check that our completed payments were migrated. paymentStatus, err := d.fetchPaymentStatus(paymentHash) if err != nil { @@ -197,7 +187,7 @@ func TestPaymentStatusesMigration(t *testing.T) { applyMigration(t, beforeMigrationFunc, afterMigrationFunc, - paymentStatusesMigration, + PaymentStatusesMigration, false) } @@ -387,8 +377,8 @@ func TestMigrateOptionalChannelCloseSummaryFields(t *testing.T) { // Get the old serialization format for this test's // close summary, and it to the closed channel bucket. old := test.oldSerialization(test.closeSummary) - err = d.Update(func(tx *bbolt.Tx) error { - closedChanBucket, err := tx.CreateBucketIfNotExists( + err = kvdb.Update(d, func(tx kvdb.RwTx) error { + closedChanBucket, err := tx.CreateTopLevelBucket( closedChannelBucket, ) if err != nil { @@ -404,15 +394,6 @@ func TestMigrateOptionalChannelCloseSummaryFields(t *testing.T) { // After the migration it should be found in the new format. afterMigrationFunc := func(d *DB) { - meta, err := d.FetchMeta(nil) - if err != nil { - t.Fatal(err) - } - - if meta.DbVersionNumber != 1 { - t.Fatal("migration wasn't applied") - } - // We generate the new serialized version, to check // against what is found in the DB. var b bytes.Buffer @@ -423,8 +404,8 @@ func TestMigrateOptionalChannelCloseSummaryFields(t *testing.T) { newSerialization := b.Bytes() var dbSummary []byte - err = d.View(func(tx *bbolt.Tx) error { - closedChanBucket := tx.Bucket(closedChannelBucket) + err = kvdb.View(d, func(tx kvdb.ReadTx) error { + closedChanBucket := tx.ReadBucket(closedChannelBucket) if closedChanBucket == nil { return errors.New("unable to find bucket") } @@ -469,7 +450,7 @@ func TestMigrateOptionalChannelCloseSummaryFields(t *testing.T) { applyMigration(t, beforeMigrationFunc, afterMigrationFunc, - migrateOptionalChannelCloseSummaryFields, + MigrateOptionalChannelCloseSummaryFields, false) } } @@ -501,8 +482,8 @@ func TestMigrateGossipMessageStoreKeys(t *testing.T) { t.Fatalf("unable to serialize message: %v", err) } - err := db.Update(func(tx *bbolt.Tx) error { - messageStore, err := tx.CreateBucketIfNotExists( + err := kvdb.Update(db, func(tx kvdb.RwTx) error { + messageStore, err := tx.CreateTopLevelBucket( messageStoreBucket, ) if err != nil { @@ -521,17 +502,9 @@ func TestMigrateGossipMessageStoreKeys(t *testing.T) { // 2. We can find the message under its new key. // 3. The message matches the original. afterMigration := func(db *DB) { - meta, err := db.FetchMeta(nil) - if err != nil { - t.Fatalf("unable to fetch db version: %v", err) - } - if meta.DbVersionNumber != 1 { - t.Fatalf("migration should have succeeded but didn't") - } - var rawMsg []byte - err = db.View(func(tx *bbolt.Tx) error { - messageStore := tx.Bucket(messageStoreBucket) + err := kvdb.View(db, func(tx kvdb.ReadTx) error { + messageStore := tx.ReadBucket(messageStoreBucket) if messageStore == nil { return errors.New("message store bucket not " + "found") @@ -565,7 +538,7 @@ func TestMigrateGossipMessageStoreKeys(t *testing.T) { applyMigration( t, beforeMigration, afterMigration, - migrateGossipMessageStoreKeys, false, + MigrateGossipMessageStoreKeys, false, ) } @@ -617,15 +590,6 @@ func TestOutgoingPaymentsMigration(t *testing.T) { // Verify that all payments were migrated. afterMigrationFunc := func(d *DB) { - meta, err := d.FetchMeta(nil) - if err != nil { - t.Fatal(err) - } - - if meta.DbVersionNumber != 1 { - t.Fatal("migration 'paymentStatusesMigration' wasn't applied") - } - sentPayments, err := d.fetchPaymentsMigration9() if err != nil { t.Fatalf("unable to fetch sent payments: %v", err) @@ -702,8 +666,8 @@ func TestOutgoingPaymentsMigration(t *testing.T) { // Finally, check that the payment sequence number is updated // to reflect the migrated payments. - err = d.View(func(tx *bbolt.Tx) error { - payments := tx.Bucket(paymentsRootBucket) + err = kvdb.Update(d, func(tx kvdb.RwTx) error { + payments := tx.ReadWriteBucket(paymentsRootBucket) if payments == nil { return fmt.Errorf("payments bucket not found") } @@ -724,7 +688,7 @@ func TestOutgoingPaymentsMigration(t *testing.T) { applyMigration(t, beforeMigrationFunc, afterMigrationFunc, - migrateOutgoingPayments, + MigrateOutgoingPayments, false) } @@ -749,25 +713,25 @@ func makeRandPaymentCreationInfo() (*PaymentCreationInfo, error) { func TestPaymentRouteSerialization(t *testing.T) { t.Parallel() - legacyHop1 := &route.Hop{ - PubKeyBytes: route.NewVertex(pub), + legacyHop1 := &Hop{ + PubKeyBytes: NewVertex(pub), ChannelID: 12345, OutgoingTimeLock: 111, LegacyPayload: true, AmtToForward: 555, } - legacyHop2 := &route.Hop{ - PubKeyBytes: route.NewVertex(pub), + legacyHop2 := &Hop{ + PubKeyBytes: NewVertex(pub), ChannelID: 12345, OutgoingTimeLock: 111, LegacyPayload: true, AmtToForward: 555, } - legacyRoute := route.Route{ + legacyRoute := Route{ TotalTimeLock: 123, TotalAmount: 1234567, - SourcePubKey: route.NewVertex(pub), - Hops: []*route.Hop{legacyHop1, legacyHop2}, + SourcePubKey: NewVertex(pub), + Hops: []*Hop{legacyHop1, legacyHop2}, } const numPayments = 4 @@ -782,8 +746,8 @@ func TestPaymentRouteSerialization(t *testing.T) { // We'll first add a series of fake payments, using the existing legacy // serialization format. beforeMigrationFunc := func(d *DB) { - err := d.Update(func(tx *bbolt.Tx) error { - paymentsBucket, err := tx.CreateBucket( + err := kvdb.Update(d, func(tx kvdb.RwTx) error { + paymentsBucket, err := tx.CreateTopLevelBucket( paymentsRootBucket, ) if err != nil { @@ -834,7 +798,7 @@ func TestPaymentRouteSerialization(t *testing.T) { // the proper bucket. If this is the duplicate // payment, then we'll grab the dup bucket, // otherwise, we'll use the top level bucket. - var payHashBucket *bbolt.Bucket + var payHashBucket kvdb.RwBucket if i < numPayments-1 { payHashBucket, err = paymentsBucket.CreateBucket( payInfo.PaymentHash[:], @@ -843,7 +807,7 @@ func TestPaymentRouteSerialization(t *testing.T) { t.Fatalf("unable to create payments bucket: %v", err) } } else { - payHashBucket = paymentsBucket.Bucket( + payHashBucket = paymentsBucket.NestedReadWriteBucket( payInfo.PaymentHash[:], ) dupPayBucket, err := payHashBucket.CreateBucket( @@ -947,6 +911,17 @@ func TestPaymentRouteSerialization(t *testing.T) { applyMigration(t, beforeMigrationFunc, afterMigrationFunc, - migrateRouteSerialization, + MigrateRouteSerialization, false) } + +// TestNotCoveredMigrations only references migrations that are not referenced +// anywhere else in this package. This prevents false positives when linting +// with unused. +func TestNotCoveredMigrations(t *testing.T) { + _ = MigrateNodeAndEdgeUpdateIndex + _ = MigrateInvoiceTimeSeries + _ = MigrateInvoiceTimeSeriesOutgoingPayments + _ = MigrateEdgePolicies + _ = MigratePruneEdgeUpdateIndex +} diff --git a/channeldb/migration_01_to_11/options.go b/channeldb/migration_01_to_11/options.go new file mode 100644 index 0000000000..03b287e040 --- /dev/null +++ b/channeldb/migration_01_to_11/options.go @@ -0,0 +1,41 @@ +package migration_01_to_11 + +const ( + // DefaultRejectCacheSize is the default number of rejectCacheEntries to + // cache for use in the rejection cache of incoming gossip traffic. This + // produces a cache size of around 1MB. + DefaultRejectCacheSize = 50000 + + // DefaultChannelCacheSize is the default number of ChannelEdges cached + // in order to reply to gossip queries. This produces a cache size of + // around 40MB. + DefaultChannelCacheSize = 20000 +) + +// Options holds parameters for tuning and customizing a channeldb.DB. +type Options struct { + // RejectCacheSize is the maximum number of rejectCacheEntries to hold + // in the rejection cache. + RejectCacheSize int + + // ChannelCacheSize is the maximum number of ChannelEdges to hold in the + // channel cache. + ChannelCacheSize int + + // NoFreelistSync, if true, prevents the database from syncing its + // freelist to disk, resulting in improved performance at the expense of + // increased startup time. + NoFreelistSync bool +} + +// DefaultOptions returns an Options populated with default values. +func DefaultOptions() Options { + return Options{ + RejectCacheSize: DefaultRejectCacheSize, + ChannelCacheSize: DefaultChannelCacheSize, + NoFreelistSync: true, + } +} + +// OptionModifier is a function signature for modifying the default Options. +type OptionModifier func(*Options) diff --git a/channeldb/migration_01_to_11/payment_control.go b/channeldb/migration_01_to_11/payment_control.go new file mode 100644 index 0000000000..80acf9cea8 --- /dev/null +++ b/channeldb/migration_01_to_11/payment_control.go @@ -0,0 +1,21 @@ +package migration_01_to_11 + +import "github.com/lightningnetwork/lnd/channeldb/kvdb" + +// fetchPaymentStatus fetches the payment status of the payment. If the payment +// isn't found, it will default to "StatusUnknown". +func fetchPaymentStatus(bucket kvdb.ReadBucket) PaymentStatus { + if bucket.Get(paymentSettleInfoKey) != nil { + return StatusSucceeded + } + + if bucket.Get(paymentFailInfoKey) != nil { + return StatusFailed + } + + if bucket.Get(paymentCreationInfoKey) != nil { + return StatusInFlight + } + + return StatusUnknown +} diff --git a/channeldb/migration_01_to_11/payments.go b/channeldb/migration_01_to_11/payments.go new file mode 100644 index 0000000000..e0b185f391 --- /dev/null +++ b/channeldb/migration_01_to_11/payments.go @@ -0,0 +1,621 @@ +package migration_01_to_11 + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "sort" + "time" + + "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/wire" + "github.com/lightningnetwork/lnd/channeldb/kvdb" + "github.com/lightningnetwork/lnd/lntypes" + "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/tlv" +) + +var ( + // paymentsRootBucket is the name of the top-level bucket within the + // database that stores all data related to payments. Within this + // bucket, each payment hash its own sub-bucket keyed by its payment + // hash. + // + // Bucket hierarchy: + // + // root-bucket + // | + // |-- + // | |--sequence-key: + // | |--creation-info-key: + // | |--attempt-info-key: + // | |--settle-info-key: + // | |--fail-info-key: + // | | + // | |--duplicate-bucket (only for old, completed payments) + // | | + // | |-- + // | | |--sequence-key: + // | | |--creation-info-key: + // | | |--attempt-info-key: + // | | |--settle-info-key: + // | | |--fail-info-key: + // | | + // | |-- + // | | | + // | ... ... + // | + // |-- + // | | + // | ... + // ... + // + paymentsRootBucket = []byte("payments-root-bucket") + + // paymentDublicateBucket is the name of a optional sub-bucket within + // the payment hash bucket, that is used to hold duplicate payments to + // a payment hash. This is needed to support information from earlier + // versions of lnd, where it was possible to pay to a payment hash more + // than once. + paymentDuplicateBucket = []byte("payment-duplicate-bucket") + + // paymentSequenceKey is a key used in the payment's sub-bucket to + // store the sequence number of the payment. + paymentSequenceKey = []byte("payment-sequence-key") + + // paymentCreationInfoKey is a key used in the payment's sub-bucket to + // store the creation info of the payment. + paymentCreationInfoKey = []byte("payment-creation-info") + + // paymentAttemptInfoKey is a key used in the payment's sub-bucket to + // store the info about the latest attempt that was done for the + // payment in question. + paymentAttemptInfoKey = []byte("payment-attempt-info") + + // paymentSettleInfoKey is a key used in the payment's sub-bucket to + // store the settle info of the payment. + paymentSettleInfoKey = []byte("payment-settle-info") + + // paymentFailInfoKey is a key used in the payment's sub-bucket to + // store information about the reason a payment failed. + paymentFailInfoKey = []byte("payment-fail-info") +) + +// FailureReason encodes the reason a payment ultimately failed. +type FailureReason byte + +const ( + // FailureReasonTimeout indicates that the payment did timeout before a + // successful payment attempt was made. + FailureReasonTimeout FailureReason = 0 + + // FailureReasonNoRoute indicates no successful route to the + // destination was found during path finding. + FailureReasonNoRoute FailureReason = 1 + + // FailureReasonError indicates that an unexpected error happened during + // payment. + FailureReasonError FailureReason = 2 + + // FailureReasonIncorrectPaymentDetails indicates that either the hash + // is unknown or the final cltv delta or amount is incorrect. + FailureReasonIncorrectPaymentDetails FailureReason = 3 + + // TODO(halseth): cancel state. + + // TODO(joostjager): Add failure reasons for: + // LocalLiquidityInsufficient, RemoteCapacityInsufficient. +) + +// String returns a human readable FailureReason +func (r FailureReason) String() string { + switch r { + case FailureReasonTimeout: + return "timeout" + case FailureReasonNoRoute: + return "no_route" + case FailureReasonError: + return "error" + case FailureReasonIncorrectPaymentDetails: + return "incorrect_payment_details" + } + + return "unknown" +} + +// PaymentStatus represent current status of payment +type PaymentStatus byte + +const ( + // StatusUnknown is the status where a payment has never been initiated + // and hence is unknown. + StatusUnknown PaymentStatus = 0 + + // StatusInFlight is the status where a payment has been initiated, but + // a response has not been received. + StatusInFlight PaymentStatus = 1 + + // StatusSucceeded is the status where a payment has been initiated and + // the payment was completed successfully. + StatusSucceeded PaymentStatus = 2 + + // StatusFailed is the status where a payment has been initiated and a + // failure result has come back. + StatusFailed PaymentStatus = 3 +) + +// Bytes returns status as slice of bytes. +func (ps PaymentStatus) Bytes() []byte { + return []byte{byte(ps)} +} + +// FromBytes sets status from slice of bytes. +func (ps *PaymentStatus) FromBytes(status []byte) error { + if len(status) != 1 { + return errors.New("payment status is empty") + } + + switch PaymentStatus(status[0]) { + case StatusUnknown, StatusInFlight, StatusSucceeded, StatusFailed: + *ps = PaymentStatus(status[0]) + default: + return errors.New("unknown payment status") + } + + return nil +} + +// String returns readable representation of payment status. +func (ps PaymentStatus) String() string { + switch ps { + case StatusUnknown: + return "Unknown" + case StatusInFlight: + return "In Flight" + case StatusSucceeded: + return "Succeeded" + case StatusFailed: + return "Failed" + default: + return "Unknown" + } +} + +// PaymentCreationInfo is the information necessary to have ready when +// initiating a payment, moving it into state InFlight. +type PaymentCreationInfo struct { + // PaymentHash is the hash this payment is paying to. + PaymentHash lntypes.Hash + + // Value is the amount we are paying. + Value lnwire.MilliSatoshi + + // CreatingDate is the time when this payment was initiated. + CreationDate time.Time + + // PaymentRequest is the full payment request, if any. + PaymentRequest []byte +} + +// PaymentAttemptInfo contains information about a specific payment attempt for +// a given payment. This information is used by the router to handle any errors +// coming back after an attempt is made, and to query the switch about the +// status of a payment. For settled payment this will be the information for +// the succeeding payment attempt. +type PaymentAttemptInfo struct { + // PaymentID is the unique ID used for this attempt. + PaymentID uint64 + + // SessionKey is the ephemeral key used for this payment attempt. + SessionKey *btcec.PrivateKey + + // Route is the route attempted to send the HTLC. + Route Route +} + +// Payment is a wrapper around a payment's PaymentCreationInfo, +// PaymentAttemptInfo, and preimage. All payments will have the +// PaymentCreationInfo set, the PaymentAttemptInfo will be set only if at least +// one payment attempt has been made, while only completed payments will have a +// non-zero payment preimage. +type Payment struct { + // sequenceNum is a unique identifier used to sort the payments in + // order of creation. + sequenceNum uint64 + + // Status is the current PaymentStatus of this payment. + Status PaymentStatus + + // Info holds all static information about this payment, and is + // populated when the payment is initiated. + Info *PaymentCreationInfo + + // Attempt is the information about the last payment attempt made. + // + // NOTE: Can be nil if no attempt is yet made. + Attempt *PaymentAttemptInfo + + // PaymentPreimage is the preimage of a successful payment. This serves + // as a proof of payment. It will only be non-nil for settled payments. + // + // NOTE: Can be nil if payment is not settled. + PaymentPreimage *lntypes.Preimage + + // Failure is a failure reason code indicating the reason the payment + // failed. It is only non-nil for failed payments. + // + // NOTE: Can be nil if payment is not failed. + Failure *FailureReason +} + +// FetchPayments returns all sent payments found in the DB. +func (db *DB) FetchPayments() ([]*Payment, error) { + var payments []*Payment + + err := kvdb.View(db, func(tx kvdb.ReadTx) error { + paymentsBucket := tx.ReadBucket(paymentsRootBucket) + if paymentsBucket == nil { + return nil + } + + return paymentsBucket.ForEach(func(k, v []byte) error { + bucket := paymentsBucket.NestedReadBucket(k) + if bucket == nil { + // We only expect sub-buckets to be found in + // this top-level bucket. + return fmt.Errorf("non bucket element in " + + "payments bucket") + } + + p, err := fetchPayment(bucket) + if err != nil { + return err + } + + payments = append(payments, p) + + // For older versions of lnd, duplicate payments to a + // payment has was possible. These will be found in a + // sub-bucket indexed by their sequence number if + // available. + dup := bucket.NestedReadBucket(paymentDuplicateBucket) + if dup == nil { + return nil + } + + return dup.ForEach(func(k, v []byte) error { + subBucket := dup.NestedReadBucket(k) + if subBucket == nil { + // We one bucket for each duplicate to + // be found. + return fmt.Errorf("non bucket element" + + "in duplicate bucket") + } + + p, err := fetchPayment(subBucket) + if err != nil { + return err + } + + payments = append(payments, p) + return nil + }) + }) + }) + if err != nil { + return nil, err + } + + // Before returning, sort the payments by their sequence number. + sort.Slice(payments, func(i, j int) bool { + return payments[i].sequenceNum < payments[j].sequenceNum + }) + + return payments, nil +} + +func fetchPayment(bucket kvdb.ReadBucket) (*Payment, error) { + var ( + err error + p = &Payment{} + ) + + seqBytes := bucket.Get(paymentSequenceKey) + if seqBytes == nil { + return nil, fmt.Errorf("sequence number not found") + } + + p.sequenceNum = binary.BigEndian.Uint64(seqBytes) + + // Get the payment status. + p.Status = fetchPaymentStatus(bucket) + + // Get the PaymentCreationInfo. + b := bucket.Get(paymentCreationInfoKey) + if b == nil { + return nil, fmt.Errorf("creation info not found") + } + + r := bytes.NewReader(b) + p.Info, err = deserializePaymentCreationInfo(r) + if err != nil { + return nil, err + + } + + // Get the PaymentAttemptInfo. This can be unset. + b = bucket.Get(paymentAttemptInfoKey) + if b != nil { + r = bytes.NewReader(b) + p.Attempt, err = deserializePaymentAttemptInfo(r) + if err != nil { + return nil, err + } + } + + // Get the payment preimage. This is only found for + // completed payments. + b = bucket.Get(paymentSettleInfoKey) + if b != nil { + var preimg lntypes.Preimage + copy(preimg[:], b[:]) + p.PaymentPreimage = &preimg + } + + // Get failure reason if available. + b = bucket.Get(paymentFailInfoKey) + if b != nil { + reason := FailureReason(b[0]) + p.Failure = &reason + } + + return p, nil +} + +func serializePaymentCreationInfo(w io.Writer, c *PaymentCreationInfo) error { + var scratch [8]byte + + if _, err := w.Write(c.PaymentHash[:]); err != nil { + return err + } + + byteOrder.PutUint64(scratch[:], uint64(c.Value)) + if _, err := w.Write(scratch[:]); err != nil { + return err + } + + byteOrder.PutUint64(scratch[:], uint64(c.CreationDate.Unix())) + if _, err := w.Write(scratch[:]); err != nil { + return err + } + + byteOrder.PutUint32(scratch[:4], uint32(len(c.PaymentRequest))) + if _, err := w.Write(scratch[:4]); err != nil { + return err + } + + if _, err := w.Write(c.PaymentRequest[:]); err != nil { + return err + } + + return nil +} + +func deserializePaymentCreationInfo(r io.Reader) (*PaymentCreationInfo, error) { + var scratch [8]byte + + c := &PaymentCreationInfo{} + + if _, err := io.ReadFull(r, c.PaymentHash[:]); err != nil { + return nil, err + } + + if _, err := io.ReadFull(r, scratch[:]); err != nil { + return nil, err + } + c.Value = lnwire.MilliSatoshi(byteOrder.Uint64(scratch[:])) + + if _, err := io.ReadFull(r, scratch[:]); err != nil { + return nil, err + } + c.CreationDate = time.Unix(int64(byteOrder.Uint64(scratch[:])), 0) + + if _, err := io.ReadFull(r, scratch[:4]); err != nil { + return nil, err + } + + reqLen := uint32(byteOrder.Uint32(scratch[:4])) + payReq := make([]byte, reqLen) + if reqLen > 0 { + if _, err := io.ReadFull(r, payReq[:]); err != nil { + return nil, err + } + } + c.PaymentRequest = payReq + + return c, nil +} + +func serializePaymentAttemptInfo(w io.Writer, a *PaymentAttemptInfo) error { + if err := WriteElements(w, a.PaymentID, a.SessionKey); err != nil { + return err + } + + if err := SerializeRoute(w, a.Route); err != nil { + return err + } + + return nil +} + +func deserializePaymentAttemptInfo(r io.Reader) (*PaymentAttemptInfo, error) { + a := &PaymentAttemptInfo{} + err := ReadElements(r, &a.PaymentID, &a.SessionKey) + if err != nil { + return nil, err + } + a.Route, err = DeserializeRoute(r) + if err != nil { + return nil, err + } + return a, nil +} + +func serializeHop(w io.Writer, h *Hop) error { + if err := WriteElements(w, + h.PubKeyBytes[:], h.ChannelID, h.OutgoingTimeLock, + h.AmtToForward, + ); err != nil { + return err + } + + if err := binary.Write(w, byteOrder, h.LegacyPayload); err != nil { + return err + } + + // For legacy payloads, we don't need to write any TLV records, so + // we'll write a zero indicating the our serialized TLV map has no + // records. + if h.LegacyPayload { + return WriteElements(w, uint32(0)) + } + + // Otherwise, we'll transform our slice of records into a map of the + // raw bytes, then serialize them in-line with a length (number of + // elements) prefix. + mapRecords, err := tlv.RecordsToMap(h.TLVRecords) + if err != nil { + return err + } + + numRecords := uint32(len(mapRecords)) + if err := WriteElements(w, numRecords); err != nil { + return err + } + + for recordType, rawBytes := range mapRecords { + if err := WriteElements(w, recordType); err != nil { + return err + } + + if err := wire.WriteVarBytes(w, 0, rawBytes); err != nil { + return err + } + } + + return nil +} + +// maxOnionPayloadSize is the largest Sphinx payload possible, so we don't need +// to read/write a TLV stream larger than this. +const maxOnionPayloadSize = 1300 + +func deserializeHop(r io.Reader) (*Hop, error) { + h := &Hop{} + + var pub []byte + if err := ReadElements(r, &pub); err != nil { + return nil, err + } + copy(h.PubKeyBytes[:], pub) + + if err := ReadElements(r, + &h.ChannelID, &h.OutgoingTimeLock, &h.AmtToForward, + ); err != nil { + return nil, err + } + + // TODO(roasbeef): change field to allow LegacyPayload false to be the + // legacy default? + err := binary.Read(r, byteOrder, &h.LegacyPayload) + if err != nil { + return nil, err + } + + var numElements uint32 + if err := ReadElements(r, &numElements); err != nil { + return nil, err + } + + // If there're no elements, then we can return early. + if numElements == 0 { + return h, nil + } + + tlvMap := make(map[uint64][]byte) + for i := uint32(0); i < numElements; i++ { + var tlvType uint64 + if err := ReadElements(r, &tlvType); err != nil { + return nil, err + } + + rawRecordBytes, err := wire.ReadVarBytes( + r, 0, maxOnionPayloadSize, "tlv", + ) + if err != nil { + return nil, err + } + + tlvMap[tlvType] = rawRecordBytes + } + + h.TLVRecords = tlv.MapToRecords(tlvMap) + + return h, nil +} + +// SerializeRoute serializes a route. +func SerializeRoute(w io.Writer, r Route) error { + if err := WriteElements(w, + r.TotalTimeLock, r.TotalAmount, r.SourcePubKey[:], + ); err != nil { + return err + } + + if err := WriteElements(w, uint32(len(r.Hops))); err != nil { + return err + } + + for _, h := range r.Hops { + if err := serializeHop(w, h); err != nil { + return err + } + } + + return nil +} + +// DeserializeRoute deserializes a route. +func DeserializeRoute(r io.Reader) (Route, error) { + rt := Route{} + if err := ReadElements(r, + &rt.TotalTimeLock, &rt.TotalAmount, + ); err != nil { + return rt, err + } + + var pub []byte + if err := ReadElements(r, &pub); err != nil { + return rt, err + } + copy(rt.SourcePubKey[:], pub) + + var numHops uint32 + if err := ReadElements(r, &numHops); err != nil { + return rt, err + } + + var hops []*Hop + for i := uint32(0); i < numHops; i++ { + hop, err := deserializeHop(r) + if err != nil { + return rt, err + } + hops = append(hops, hop) + } + rt.Hops = hops + + return rt, nil +} diff --git a/channeldb/migration_01_to_11/payments_test.go b/channeldb/migration_01_to_11/payments_test.go new file mode 100644 index 0000000000..c5584079f4 --- /dev/null +++ b/channeldb/migration_01_to_11/payments_test.go @@ -0,0 +1,108 @@ +package migration_01_to_11 + +import ( + "bytes" + "fmt" + "math/rand" + "time" + + "github.com/btcsuite/btcd/btcec" + "github.com/lightningnetwork/lnd/lnwire" +) + +var ( + priv, _ = btcec.NewPrivateKey(btcec.S256()) + pub = priv.PubKey() +) + +func makeFakePayment() *outgoingPayment { + fakeInvoice := &Invoice{ + // Use single second precision to avoid false positive test + // failures due to the monotonic time component. + CreationDate: time.Unix(time.Now().Unix(), 0), + Memo: []byte("fake memo"), + Receipt: []byte("fake receipt"), + PaymentRequest: []byte(""), + } + + copy(fakeInvoice.Terms.PaymentPreimage[:], rev[:]) + fakeInvoice.Terms.Value = lnwire.NewMSatFromSatoshis(10000) + + fakePath := make([][33]byte, 3) + for i := 0; i < 3; i++ { + copy(fakePath[i][:], bytes.Repeat([]byte{byte(i)}, 33)) + } + + fakePayment := &outgoingPayment{ + Invoice: *fakeInvoice, + Fee: 101, + Path: fakePath, + TimeLockLength: 1000, + } + copy(fakePayment.PaymentPreimage[:], rev[:]) + return fakePayment +} + +// randomBytes creates random []byte with length in range [minLen, maxLen) +func randomBytes(minLen, maxLen int) ([]byte, error) { + randBuf := make([]byte, minLen+rand.Intn(maxLen-minLen)) + + if _, err := rand.Read(randBuf); err != nil { + return nil, fmt.Errorf("Internal error. "+ + "Cannot generate random string: %v", err) + } + + return randBuf, nil +} + +func makeRandomFakePayment() (*outgoingPayment, error) { + var err error + fakeInvoice := &Invoice{ + // Use single second precision to avoid false positive test + // failures due to the monotonic time component. + CreationDate: time.Unix(time.Now().Unix(), 0), + } + + fakeInvoice.Memo, err = randomBytes(1, 50) + if err != nil { + return nil, err + } + + fakeInvoice.Receipt, err = randomBytes(1, 50) + if err != nil { + return nil, err + } + + fakeInvoice.PaymentRequest, err = randomBytes(1, 50) + if err != nil { + return nil, err + } + + preImg, err := randomBytes(32, 33) + if err != nil { + return nil, err + } + copy(fakeInvoice.Terms.PaymentPreimage[:], preImg) + + fakeInvoice.Terms.Value = lnwire.MilliSatoshi(rand.Intn(10000)) + + fakePathLen := 1 + rand.Intn(5) + fakePath := make([][33]byte, fakePathLen) + for i := 0; i < fakePathLen; i++ { + b, err := randomBytes(33, 34) + if err != nil { + return nil, err + } + copy(fakePath[i][:], b) + } + + fakePayment := &outgoingPayment{ + Invoice: *fakeInvoice, + Fee: lnwire.MilliSatoshi(rand.Intn(1001)), + Path: fakePath, + TimeLockLength: uint32(rand.Intn(10000)), + } + copy(fakePayment.PaymentPreimage[:], fakeInvoice.Terms.PaymentPreimage[:]) + + return fakePayment, nil +} diff --git a/channeldb/migration_01_to_11/route.go b/channeldb/migration_01_to_11/route.go new file mode 100644 index 0000000000..1dbfff60c4 --- /dev/null +++ b/channeldb/migration_01_to_11/route.go @@ -0,0 +1,330 @@ +package migration_01_to_11 + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "fmt" + "io" + "strconv" + "strings" + + "github.com/btcsuite/btcd/btcec" + sphinx "github.com/lightningnetwork/lightning-onion" + "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/record" + "github.com/lightningnetwork/lnd/tlv" +) + +// VertexSize is the size of the array to store a vertex. +const VertexSize = 33 + +// ErrNoRouteHopsProvided is returned when a caller attempts to construct a new +// sphinx packet, but provides an empty set of hops for each route. +var ErrNoRouteHopsProvided = fmt.Errorf("empty route hops provided") + +// Vertex is a simple alias for the serialization of a compressed Bitcoin +// public key. +type Vertex [VertexSize]byte + +// NewVertex returns a new Vertex given a public key. +func NewVertex(pub *btcec.PublicKey) Vertex { + var v Vertex + copy(v[:], pub.SerializeCompressed()) + return v +} + +// NewVertexFromBytes returns a new Vertex based on a serialized pubkey in a +// byte slice. +func NewVertexFromBytes(b []byte) (Vertex, error) { + vertexLen := len(b) + if vertexLen != VertexSize { + return Vertex{}, fmt.Errorf("invalid vertex length of %v, "+ + "want %v", vertexLen, VertexSize) + } + + var v Vertex + copy(v[:], b) + return v, nil +} + +// NewVertexFromStr returns a new Vertex given its hex-encoded string format. +func NewVertexFromStr(v string) (Vertex, error) { + // Return error if hex string is of incorrect length. + if len(v) != VertexSize*2 { + return Vertex{}, fmt.Errorf("invalid vertex string length of "+ + "%v, want %v", len(v), VertexSize*2) + } + + vertex, err := hex.DecodeString(v) + if err != nil { + return Vertex{}, err + } + + return NewVertexFromBytes(vertex) +} + +// String returns a human readable version of the Vertex which is the +// hex-encoding of the serialized compressed public key. +func (v Vertex) String() string { + return fmt.Sprintf("%x", v[:]) +} + +// Hop represents an intermediate or final node of the route. This naming +// is in line with the definition given in BOLT #4: Onion Routing Protocol. +// The struct houses the channel along which this hop can be reached and +// the values necessary to create the HTLC that needs to be sent to the +// next hop. It is also used to encode the per-hop payload included within +// the Sphinx packet. +type Hop struct { + // PubKeyBytes is the raw bytes of the public key of the target node. + PubKeyBytes Vertex + + // ChannelID is the unique channel ID for the channel. The first 3 + // bytes are the block height, the next 3 the index within the block, + // and the last 2 bytes are the output index for the channel. + ChannelID uint64 + + // OutgoingTimeLock is the timelock value that should be used when + // crafting the _outgoing_ HTLC from this hop. + OutgoingTimeLock uint32 + + // AmtToForward is the amount that this hop will forward to the next + // hop. This value is less than the value that the incoming HTLC + // carries as a fee will be subtracted by the hop. + AmtToForward lnwire.MilliSatoshi + + // TLVRecords if non-nil are a set of additional TLV records that + // should be included in the forwarding instructions for this node. + TLVRecords []tlv.Record + + // LegacyPayload if true, then this signals that this node doesn't + // understand the new TLV payload, so we must instead use the legacy + // payload. + LegacyPayload bool +} + +// PackHopPayload writes to the passed io.Writer, the series of byes that can +// be placed directly into the per-hop payload (EOB) for this hop. This will +// include the required routing fields, as well as serializing any of the +// passed optional TLVRecords. nextChanID is the unique channel ID that +// references the _outgoing_ channel ID that follows this hop. This field +// follows the same semantics as the NextAddress field in the onion: it should +// be set to zero to indicate the terminal hop. +func (h *Hop) PackHopPayload(w io.Writer, nextChanID uint64) error { + // If this is a legacy payload, then we'll exit here as this method + // shouldn't be called. + if h.LegacyPayload == true { + return fmt.Errorf("cannot pack hop payloads for legacy " + + "payloads") + } + + // Otherwise, we'll need to make a new stream that includes our + // required routing fields, as well as these optional values. + var records []tlv.Record + + // Every hop must have an amount to forward and CLTV expiry. + amt := uint64(h.AmtToForward) + records = append(records, + record.NewAmtToFwdRecord(&amt), + record.NewLockTimeRecord(&h.OutgoingTimeLock), + ) + + // BOLT 04 says the next_hop_id should be omitted for the final hop, + // but present for all others. + // + // TODO(conner): test using hop.Exit once available + if nextChanID != 0 { + records = append(records, + record.NewNextHopIDRecord(&nextChanID), + ) + } + + // Append any custom types destined for this hop. + records = append(records, h.TLVRecords...) + + // To ensure we produce a canonical stream, we'll sort the records + // before encoding them as a stream in the hop payload. + tlv.SortRecords(records) + + tlvStream, err := tlv.NewStream(records...) + if err != nil { + return err + } + + return tlvStream.Encode(w) +} + +// Route represents a path through the channel graph which runs over one or +// more channels in succession. This struct carries all the information +// required to craft the Sphinx onion packet, and send the payment along the +// first hop in the path. A route is only selected as valid if all the channels +// have sufficient capacity to carry the initial payment amount after fees are +// accounted for. +type Route struct { + // TotalTimeLock is the cumulative (final) time lock across the entire + // route. This is the CLTV value that should be extended to the first + // hop in the route. All other hops will decrement the time-lock as + // advertised, leaving enough time for all hops to wait for or present + // the payment preimage to complete the payment. + TotalTimeLock uint32 + + // TotalAmount is the total amount of funds required to complete a + // payment over this route. This value includes the cumulative fees at + // each hop. As a result, the HTLC extended to the first-hop in the + // route will need to have at least this many satoshis, otherwise the + // route will fail at an intermediate node due to an insufficient + // amount of fees. + TotalAmount lnwire.MilliSatoshi + + // SourcePubKey is the pubkey of the node where this route originates + // from. + SourcePubKey Vertex + + // Hops contains details concerning the specific forwarding details at + // each hop. + Hops []*Hop +} + +// HopFee returns the fee charged by the route hop indicated by hopIndex. +func (r *Route) HopFee(hopIndex int) lnwire.MilliSatoshi { + var incomingAmt lnwire.MilliSatoshi + if hopIndex == 0 { + incomingAmt = r.TotalAmount + } else { + incomingAmt = r.Hops[hopIndex-1].AmtToForward + } + + // Fee is calculated as difference between incoming and outgoing amount. + return incomingAmt - r.Hops[hopIndex].AmtToForward +} + +// TotalFees is the sum of the fees paid at each hop within the final route. In +// the case of a one-hop payment, this value will be zero as we don't need to +// pay a fee to ourself. +func (r *Route) TotalFees() lnwire.MilliSatoshi { + if len(r.Hops) == 0 { + return 0 + } + + return r.TotalAmount - r.Hops[len(r.Hops)-1].AmtToForward +} + +// NewRouteFromHops creates a new Route structure from the minimally required +// information to perform the payment. It infers fee amounts and populates the +// node, chan and prev/next hop maps. +func NewRouteFromHops(amtToSend lnwire.MilliSatoshi, timeLock uint32, + sourceVertex Vertex, hops []*Hop) (*Route, error) { + + if len(hops) == 0 { + return nil, ErrNoRouteHopsProvided + } + + // First, we'll create a route struct and populate it with the fields + // for which the values are provided as arguments of this function. + // TotalFees is determined based on the difference between the amount + // that is send from the source and the final amount that is received + // by the destination. + route := &Route{ + SourcePubKey: sourceVertex, + Hops: hops, + TotalTimeLock: timeLock, + TotalAmount: amtToSend, + } + + return route, nil +} + +// ToSphinxPath converts a complete route into a sphinx PaymentPath that +// contains the per-hop paylods used to encoding the HTLC routing data for each +// hop in the route. This method also accepts an optional EOB payload for the +// final hop. +func (r *Route) ToSphinxPath() (*sphinx.PaymentPath, error) { + var path sphinx.PaymentPath + + // For each hop encoded within the route, we'll convert the hop struct + // to an OnionHop with matching per-hop payload within the path as used + // by the sphinx package. + for i, hop := range r.Hops { + pub, err := btcec.ParsePubKey( + hop.PubKeyBytes[:], btcec.S256(), + ) + if err != nil { + return nil, err + } + + // As a base case, the next hop is set to all zeroes in order + // to indicate that the "last hop" as no further hops after it. + nextHop := uint64(0) + + // If we aren't on the last hop, then we set the "next address" + // field to be the channel that directly follows it. + if i != len(r.Hops)-1 { + nextHop = r.Hops[i+1].ChannelID + } + + var payload sphinx.HopPayload + + // If this is the legacy payload, then we can just include the + // hop data as normal. + if hop.LegacyPayload { + // Before we encode this value, we'll pack the next hop + // into the NextAddress field of the hop info to ensure + // we point to the right now. + hopData := sphinx.HopData{ + ForwardAmount: uint64(hop.AmtToForward), + OutgoingCltv: hop.OutgoingTimeLock, + } + binary.BigEndian.PutUint64( + hopData.NextAddress[:], nextHop, + ) + + payload, err = sphinx.NewHopPayload(&hopData, nil) + if err != nil { + return nil, err + } + } else { + // For non-legacy payloads, we'll need to pack the + // routing information, along with any extra TLV + // information into the new per-hop payload format. + // We'll also pass in the chan ID of the hop this + // channel should be forwarded to so we can construct a + // valid payload. + var b bytes.Buffer + err := hop.PackHopPayload(&b, nextHop) + if err != nil { + return nil, err + } + + // TODO(roasbeef): make better API for NewHopPayload? + payload, err = sphinx.NewHopPayload(nil, b.Bytes()) + if err != nil { + return nil, err + } + } + + path[i] = sphinx.OnionHop{ + NodePub: *pub, + HopPayload: payload, + } + } + + return &path, nil +} + +// String returns a human readable representation of the route. +func (r *Route) String() string { + var b strings.Builder + + for i, hop := range r.Hops { + if i > 0 { + b.WriteString(",") + } + b.WriteString(strconv.FormatUint(hop.ChannelID, 10)) + } + + return fmt.Sprintf("amt=%v, fees=%v, tl=%v, chans=%v", + r.TotalAmount-r.TotalFees(), r.TotalFees(), r.TotalTimeLock, + b.String(), + ) +} diff --git a/channeldb/migtest/migtest.go b/channeldb/migtest/migtest.go new file mode 100644 index 0000000000..0b8e14f095 --- /dev/null +++ b/channeldb/migtest/migtest.go @@ -0,0 +1,91 @@ +package migtest + +import ( + "fmt" + "io/ioutil" + "os" + "testing" + + "github.com/lightningnetwork/lnd/channeldb/kvdb" +) + +// MakeDB creates a new instance of the ChannelDB for testing purposes. A +// callback which cleans up the created temporary directories is also returned +// and intended to be executed after the test completes. +func MakeDB() (kvdb.Backend, func(), error) { + // Create temporary database for mission control. + file, err := ioutil.TempFile("", "*.db") + if err != nil { + return nil, nil, err + } + + dbPath := file.Name() + db, err := kvdb.Open(kvdb.BoltBackendName, dbPath, true) + if err != nil { + return nil, nil, err + } + + cleanUp := func() { + db.Close() + os.RemoveAll(dbPath) + } + + return db, cleanUp, nil +} + +// ApplyMigration is a helper test function that encapsulates the general steps +// which are needed to properly check the result of applying migration function. +func ApplyMigration(t *testing.T, + beforeMigration, afterMigration, migrationFunc func(tx kvdb.RwTx) error, + shouldFail bool) { + + cdb, cleanUp, err := MakeDB() + defer cleanUp() + if err != nil { + t.Fatal(err) + } + + // beforeMigration usually used for populating the database + // with test data. + err = kvdb.Update(cdb, beforeMigration) + if err != nil { + t.Fatal(err) + } + + defer func() { + if r := recover(); r != nil { + err = newError(r) + } + + if err == nil && shouldFail { + t.Fatal("error wasn't received on migration stage") + } else if err != nil && !shouldFail { + t.Fatalf("error was received on migration stage: %v", err) + } + + // afterMigration usually used for checking the database state and + // throwing the error if something went wrong. + err = kvdb.Update(cdb, afterMigration) + if err != nil { + t.Fatal(err) + } + }() + + // Apply migration. + err = kvdb.Update(cdb, migrationFunc) + if err != nil { + t.Fatal(err) + } +} + +func newError(e interface{}) error { + var err error + switch e := e.(type) { + case error: + err = e + default: + err = fmt.Errorf("%v", e) + } + + return err +} diff --git a/channeldb/migtest/raw_db.go b/channeldb/migtest/raw_db.go new file mode 100644 index 0000000000..8f3f1f2211 --- /dev/null +++ b/channeldb/migtest/raw_db.go @@ -0,0 +1,187 @@ +package migtest + +import ( + "bytes" + "encoding/hex" + "errors" + "fmt" + "strings" + + "github.com/lightningnetwork/lnd/channeldb/kvdb" +) + +// DumpDB dumps go code describing the contents of the database to stdout. This +// function is only intended for use during development. +// +// Example output: +// +// map[string]interface{}{ +// hex("1234"): map[string]interface{}{ +// "human-readable": hex("102030"), +// hex("1111"): hex("5783492373"), +// }, +// } +func DumpDB(tx kvdb.ReadTx, rootKey []byte) error { + bucket := tx.ReadBucket(rootKey) + if bucket == nil { + return fmt.Errorf("bucket %v not found", string(rootKey)) + } + + return dumpBucket(bucket) +} + +func dumpBucket(bucket kvdb.ReadBucket) error { + fmt.Printf("map[string]interface{} {\n") + err := bucket.ForEach(func(k, v []byte) error { + key := toString(k) + fmt.Printf("%v: ", key) + + subBucket := bucket.NestedReadBucket(k) + if subBucket != nil { + err := dumpBucket(subBucket) + if err != nil { + return err + } + } else { + fmt.Print(toHex(v)) + } + fmt.Printf(",\n") + + return nil + }) + if err != nil { + return err + } + fmt.Printf("}") + + return nil +} + +// RestoreDB primes the database with the given data set. +func RestoreDB(tx kvdb.RwTx, rootKey []byte, data map[string]interface{}) error { + bucket, err := tx.CreateTopLevelBucket(rootKey) + if err != nil { + return err + } + + return restoreDB(bucket, data) +} + +func restoreDB(bucket kvdb.RwBucket, data map[string]interface{}) error { + for k, v := range data { + key := []byte(k) + + switch value := v.(type) { + + // Key contains value. + case string: + err := bucket.Put(key, []byte(value)) + if err != nil { + return err + } + + // Key contains a sub-bucket. + case map[string]interface{}: + subBucket, err := bucket.CreateBucket(key) + if err != nil { + return err + } + + if err := restoreDB(subBucket, value); err != nil { + return err + } + + default: + return errors.New("invalid type") + } + } + + return nil +} + +// VerifyDB verifies the database against the given data set. +func VerifyDB(tx kvdb.ReadTx, rootKey []byte, data map[string]interface{}) error { + bucket := tx.ReadBucket(rootKey) + if bucket == nil { + return fmt.Errorf("bucket %v not found", string(rootKey)) + } + + return verifyDB(bucket, data) +} + +func verifyDB(bucket kvdb.ReadBucket, data map[string]interface{}) error { + for k, v := range data { + key := []byte(k) + + switch value := v.(type) { + + // Key contains value. + case string: + expectedValue := []byte(value) + dbValue := bucket.Get(key) + + if !bytes.Equal(dbValue, expectedValue) { + return errors.New("value mismatch") + } + + // Key contains a sub-bucket. + case map[string]interface{}: + subBucket := bucket.NestedReadBucket(key) + if subBucket == nil { + return fmt.Errorf("bucket %v not found", k) + } + + err := verifyDB(subBucket, value) + if err != nil { + return err + } + + default: + return errors.New("invalid type") + } + } + + keyCount := 0 + err := bucket.ForEach(func(k, v []byte) error { + keyCount++ + return nil + }) + if err != nil { + return err + } + if keyCount != len(data) { + return errors.New("unexpected keys in database") + } + + return nil +} + +func toHex(v []byte) string { + if len(v) == 0 { + return "nil" + } + + return "hex(\"" + hex.EncodeToString(v) + "\")" +} + +func toString(v []byte) string { + readableChars := "abcdefghijklmnopqrstuvwxyz0123456789-" + + for _, c := range v { + if !strings.Contains(readableChars, string(c)) { + return toHex(v) + } + } + + return "\"" + string(v) + "\"" +} + +// Hex is a test helper function to convert readable hex arrays to raw byte +// strings. +func Hex(value string) string { + b, err := hex.DecodeString(value) + if err != nil { + panic(err) + } + return string(b) +} diff --git a/channeldb/mp_payment.go b/channeldb/mp_payment.go new file mode 100644 index 0000000000..fd89f3ab45 --- /dev/null +++ b/channeldb/mp_payment.go @@ -0,0 +1,299 @@ +package channeldb + +import ( + "bytes" + "io" + "time" + + "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/wire" + "github.com/lightningnetwork/lnd/lntypes" + "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/routing/route" +) + +// HTLCAttemptInfo contains static information about a specific HTLC attempt +// for a payment. This information is used by the router to handle any errors +// coming back after an attempt is made, and to query the switch about the +// status of the attempt. +type HTLCAttemptInfo struct { + // AttemptID is the unique ID used for this attempt. + AttemptID uint64 + + // SessionKey is the ephemeral key used for this attempt. + SessionKey *btcec.PrivateKey + + // Route is the route attempted to send the HTLC. + Route route.Route + + // AttemptTime is the time at which this HTLC was attempted. + AttemptTime time.Time +} + +// HTLCAttempt contains information about a specific HTLC attempt for a given +// payment. It contains the HTLCAttemptInfo used to send the HTLC, as well +// as a timestamp and any known outcome of the attempt. +type HTLCAttempt struct { + HTLCAttemptInfo + + // Settle is the preimage of a successful payment. This serves as a + // proof of payment. It will only be non-nil for settled payments. + // + // NOTE: Can be nil if payment is not settled. + Settle *HTLCSettleInfo + + // Fail is a failure reason code indicating the reason the payment + // failed. It is only non-nil for failed payments. + // + // NOTE: Can be nil if payment is not failed. + Failure *HTLCFailInfo +} + +// HTLCSettleInfo encapsulates the information that augments an HTLCAttempt in +// the event that the HTLC is successful. +type HTLCSettleInfo struct { + // Preimage is the preimage of a successful HTLC. This serves as a proof + // of payment. + Preimage lntypes.Preimage + + // SettleTime is the time at which this HTLC was settled. + SettleTime time.Time +} + +// HTLCFailReason is the reason an htlc failed. +type HTLCFailReason byte + +const ( + // HTLCFailUnknown is recorded for htlcs that failed with an unknown + // reason. + HTLCFailUnknown HTLCFailReason = 0 + + // HTLCFailUnknown is recorded for htlcs that had a failure message that + // couldn't be decrypted. + HTLCFailUnreadable HTLCFailReason = 1 + + // HTLCFailInternal is recorded for htlcs that failed because of an + // internal error. + HTLCFailInternal HTLCFailReason = 2 + + // HTLCFailMessage is recorded for htlcs that failed with a network + // failure message. + HTLCFailMessage HTLCFailReason = 3 +) + +// HTLCFailInfo encapsulates the information that augments an HTLCAttempt in the +// event that the HTLC fails. +type HTLCFailInfo struct { + // FailTime is the time at which this HTLC was failed. + FailTime time.Time + + // Message is the wire message that failed this HTLC. This field will be + // populated when the failure reason is HTLCFailMessage. + Message lnwire.FailureMessage + + // Reason is the failure reason for this HTLC. + Reason HTLCFailReason + + // The position in the path of the intermediate or final node that + // generated the failure message. Position zero is the sender node. This + // field will be populated when the failure reason is either + // HTLCFailMessage or HTLCFailUnknown. + FailureSourceIndex uint32 +} + +// MPPayment is a wrapper around a payment's PaymentCreationInfo and +// HTLCAttempts. All payments will have the PaymentCreationInfo set, any +// HTLCs made in attempts to be completed will populated in the HTLCs slice. +// Each populated HTLCAttempt represents an attempted HTLC, each of which may +// have the associated Settle or Fail struct populated if the HTLC is no longer +// in-flight. +type MPPayment struct { + // SequenceNum is a unique identifier used to sort the payments in + // order of creation. + SequenceNum uint64 + + // Info holds all static information about this payment, and is + // populated when the payment is initiated. + Info *PaymentCreationInfo + + // HTLCs holds the information about individual HTLCs that we send in + // order to make the payment. + HTLCs []HTLCAttempt + + // FailureReason is the failure reason code indicating the reason the + // payment failed. + // + // NOTE: Will only be set once the daemon has given up on the payment + // altogether. + FailureReason *FailureReason + + // Status is the current PaymentStatus of this payment. + Status PaymentStatus +} + +// TerminalInfo returns any HTLC settle info recorded. If no settle info is +// recorded, any payment level failure will be returned. If neither a settle +// nor a failure is recorded, both return values will be nil. +func (m *MPPayment) TerminalInfo() (*HTLCSettleInfo, *FailureReason) { + for _, h := range m.HTLCs { + if h.Settle != nil { + return h.Settle, nil + } + } + + return nil, m.FailureReason +} + +// SentAmt returns the sum of sent amount and fees for HTLCs that are either +// settled or still in flight. +func (m *MPPayment) SentAmt() (lnwire.MilliSatoshi, lnwire.MilliSatoshi) { + var sent, fees lnwire.MilliSatoshi + for _, h := range m.HTLCs { + if h.Failure != nil { + continue + } + + // The attempt was not failed, meaning the amount was + // potentially sent to the receiver. + sent += h.Route.ReceiverAmt() + fees += h.Route.TotalFees() + } + + return sent, fees +} + +// InFlightHTLCs returns the HTLCs that are still in-flight, meaning they have +// not been settled or failed. +func (m *MPPayment) InFlightHTLCs() []HTLCAttempt { + var inflights []HTLCAttempt + for _, h := range m.HTLCs { + if h.Settle != nil || h.Failure != nil { + continue + } + + inflights = append(inflights, h) + } + + return inflights +} + +// serializeHTLCSettleInfo serializes the details of a settled htlc. +func serializeHTLCSettleInfo(w io.Writer, s *HTLCSettleInfo) error { + if _, err := w.Write(s.Preimage[:]); err != nil { + return err + } + + if err := serializeTime(w, s.SettleTime); err != nil { + return err + } + + return nil +} + +// deserializeHTLCSettleInfo deserializes the details of a settled htlc. +func deserializeHTLCSettleInfo(r io.Reader) (*HTLCSettleInfo, error) { + s := &HTLCSettleInfo{} + if _, err := io.ReadFull(r, s.Preimage[:]); err != nil { + return nil, err + } + + var err error + s.SettleTime, err = deserializeTime(r) + if err != nil { + return nil, err + } + + return s, nil +} + +// serializeHTLCFailInfo serializes the details of a failed htlc including the +// wire failure. +func serializeHTLCFailInfo(w io.Writer, f *HTLCFailInfo) error { + if err := serializeTime(w, f.FailTime); err != nil { + return err + } + + // Write failure. If there is no failure message, write an empty + // byte slice. + var messageBytes bytes.Buffer + if f.Message != nil { + err := lnwire.EncodeFailureMessage(&messageBytes, f.Message, 0) + if err != nil { + return err + } + } + if err := wire.WriteVarBytes(w, 0, messageBytes.Bytes()); err != nil { + return err + } + + return WriteElements(w, byte(f.Reason), f.FailureSourceIndex) +} + +// deserializeHTLCFailInfo deserializes the details of a failed htlc including +// the wire failure. +func deserializeHTLCFailInfo(r io.Reader) (*HTLCFailInfo, error) { + f := &HTLCFailInfo{} + var err error + f.FailTime, err = deserializeTime(r) + if err != nil { + return nil, err + } + + // Read failure. + failureBytes, err := wire.ReadVarBytes( + r, 0, lnwire.FailureMessageLength, "failure", + ) + if err != nil { + return nil, err + } + if len(failureBytes) > 0 { + f.Message, err = lnwire.DecodeFailureMessage( + bytes.NewReader(failureBytes), 0, + ) + if err != nil { + return nil, err + } + } + + var reason byte + err = ReadElements(r, &reason, &f.FailureSourceIndex) + if err != nil { + return nil, err + } + f.Reason = HTLCFailReason(reason) + + return f, nil +} + +// deserializeTime deserializes time as unix nanoseconds. +func deserializeTime(r io.Reader) (time.Time, error) { + var scratch [8]byte + if _, err := io.ReadFull(r, scratch[:]); err != nil { + return time.Time{}, err + } + + // Convert to time.Time. Interpret unix nano time zero as a zero + // time.Time value. + unixNano := byteOrder.Uint64(scratch[:]) + if unixNano == 0 { + return time.Time{}, nil + } + + return time.Unix(0, int64(unixNano)), nil +} + +// serializeTime serializes time as unix nanoseconds. +func serializeTime(w io.Writer, t time.Time) error { + var scratch [8]byte + + // Convert to unix nano seconds, but only if time is non-zero. Calling + // UnixNano() on a zero time yields an undefined result. + var unixNano int64 + if !t.IsZero() { + unixNano = t.UnixNano() + } + + byteOrder.PutUint64(scratch[:], uint64(unixNano)) + _, err := w.Write(scratch[:]) + return err +} diff --git a/channeldb/nodes.go b/channeldb/nodes.go index 95f6f7a263..57197fb384 100644 --- a/channeldb/nodes.go +++ b/channeldb/nodes.go @@ -8,7 +8,7 @@ import ( "github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/wire" - "github.com/coreos/bbolt" + "github.com/lightningnetwork/lnd/channeldb/kvdb" ) var ( @@ -101,8 +101,8 @@ func (l *LinkNode) Sync() error { // Finally update the database by storing the link node and updating // any relevant indexes. - return l.db.Update(func(tx *bbolt.Tx) error { - nodeMetaBucket := tx.Bucket(nodeInfoBucket) + return kvdb.Update(l.db, func(tx kvdb.RwTx) error { + nodeMetaBucket := tx.ReadWriteBucket(nodeInfoBucket) if nodeMetaBucket == nil { return ErrLinkNodesNotFound } @@ -114,7 +114,7 @@ func (l *LinkNode) Sync() error { // putLinkNode serializes then writes the encoded version of the passed link // node into the nodeMetaBucket. This function is provided in order to allow // the ability to re-use a database transaction across many operations. -func putLinkNode(nodeMetaBucket *bbolt.Bucket, l *LinkNode) error { +func putLinkNode(nodeMetaBucket kvdb.RwBucket, l *LinkNode) error { // First serialize the LinkNode into its raw-bytes encoding. var b bytes.Buffer if err := serializeLinkNode(&b, l); err != nil { @@ -130,13 +130,13 @@ func putLinkNode(nodeMetaBucket *bbolt.Bucket, l *LinkNode) error { // DeleteLinkNode removes the link node with the given identity from the // database. func (db *DB) DeleteLinkNode(identity *btcec.PublicKey) error { - return db.Update(func(tx *bbolt.Tx) error { + return kvdb.Update(db, func(tx kvdb.RwTx) error { return db.deleteLinkNode(tx, identity) }) } -func (db *DB) deleteLinkNode(tx *bbolt.Tx, identity *btcec.PublicKey) error { - nodeMetaBucket := tx.Bucket(nodeInfoBucket) +func (db *DB) deleteLinkNode(tx kvdb.RwTx, identity *btcec.PublicKey) error { + nodeMetaBucket := tx.ReadWriteBucket(nodeInfoBucket) if nodeMetaBucket == nil { return ErrLinkNodesNotFound } @@ -150,7 +150,7 @@ func (db *DB) deleteLinkNode(tx *bbolt.Tx, identity *btcec.PublicKey) error { // key cannot be found, then ErrNodeNotFound if returned. func (db *DB) FetchLinkNode(identity *btcec.PublicKey) (*LinkNode, error) { var linkNode *LinkNode - err := db.View(func(tx *bbolt.Tx) error { + err := kvdb.View(db, func(tx kvdb.ReadTx) error { node, err := fetchLinkNode(tx, identity) if err != nil { return err @@ -163,10 +163,10 @@ func (db *DB) FetchLinkNode(identity *btcec.PublicKey) (*LinkNode, error) { return linkNode, err } -func fetchLinkNode(tx *bbolt.Tx, targetPub *btcec.PublicKey) (*LinkNode, error) { +func fetchLinkNode(tx kvdb.ReadTx, targetPub *btcec.PublicKey) (*LinkNode, error) { // First fetch the bucket for storing node metadata, bailing out early // if it hasn't been created yet. - nodeMetaBucket := tx.Bucket(nodeInfoBucket) + nodeMetaBucket := tx.ReadBucket(nodeInfoBucket) if nodeMetaBucket == nil { return nil, ErrLinkNodesNotFound } @@ -191,7 +191,7 @@ func fetchLinkNode(tx *bbolt.Tx, targetPub *btcec.PublicKey) (*LinkNode, error) // whom we have active channels with. func (db *DB) FetchAllLinkNodes() ([]*LinkNode, error) { var linkNodes []*LinkNode - err := db.View(func(tx *bbolt.Tx) error { + err := kvdb.View(db, func(tx kvdb.ReadTx) error { nodes, err := db.fetchAllLinkNodes(tx) if err != nil { return err @@ -209,8 +209,8 @@ func (db *DB) FetchAllLinkNodes() ([]*LinkNode, error) { // fetchAllLinkNodes uses an existing database transaction to fetch all nodes // with whom we have active channels with. -func (db *DB) fetchAllLinkNodes(tx *bbolt.Tx) ([]*LinkNode, error) { - nodeMetaBucket := tx.Bucket(nodeInfoBucket) +func (db *DB) fetchAllLinkNodes(tx kvdb.ReadTx) ([]*LinkNode, error) { + nodeMetaBucket := tx.ReadBucket(nodeInfoBucket) if nodeMetaBucket == nil { return nil, ErrLinkNodesNotFound } diff --git a/channeldb/options.go b/channeldb/options.go index 38ac05efdd..90185f2cd0 100644 --- a/channeldb/options.go +++ b/channeldb/options.go @@ -1,5 +1,7 @@ package channeldb +import "github.com/lightningnetwork/lnd/clock" + const ( // DefaultRejectCacheSize is the default number of rejectCacheEntries to // cache for use in the rejection cache of incoming gossip traffic. This @@ -26,6 +28,9 @@ type Options struct { // freelist to disk, resulting in improved performance at the expense of // increased startup time. NoFreelistSync bool + + // clock is the time source used by the database. + clock clock.Clock } // DefaultOptions returns an Options populated with default values. @@ -34,6 +39,7 @@ func DefaultOptions() Options { RejectCacheSize: DefaultRejectCacheSize, ChannelCacheSize: DefaultChannelCacheSize, NoFreelistSync: true, + clock: clock.NewDefaultClock(), } } @@ -60,3 +66,10 @@ func OptionSetSyncFreelist(b bool) OptionModifier { o.NoFreelistSync = !b } } + +// OptionClock sets a non-default clock dependency. +func OptionClock(clock clock.Clock) OptionModifier { + return func(o *Options) { + o.clock = clock + } +} diff --git a/channeldb/payment_control.go b/channeldb/payment_control.go index 13c09369cc..1ba8466877 100644 --- a/channeldb/payment_control.go +++ b/channeldb/payment_control.go @@ -6,9 +6,8 @@ import ( "errors" "fmt" - "github.com/coreos/bbolt" + "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/lntypes" - "github.com/lightningnetwork/lnd/routing/route" ) var ( @@ -19,22 +18,59 @@ var ( // already "in flight" on the network. ErrPaymentInFlight = errors.New("payment is in transition") - // ErrPaymentNotInitiated is returned if payment wasn't initiated in - // switch. + // ErrPaymentNotInitiated is returned if the payment wasn't initiated. ErrPaymentNotInitiated = errors.New("payment isn't initiated") // ErrPaymentAlreadySucceeded is returned in the event we attempt to // change the status of a payment already succeeded. ErrPaymentAlreadySucceeded = errors.New("payment is already succeeded") - // ErrPaymentAlreadyFailed is returned in the event we attempt to - // re-fail a failed payment. + // ErrPaymentAlreadyFailed is returned in the event we attempt to alter + // a failed payment. ErrPaymentAlreadyFailed = errors.New("payment has already failed") // ErrUnknownPaymentStatus is returned when we do not recognize the // existing state of a payment. ErrUnknownPaymentStatus = errors.New("unknown payment status") + // ErrPaymentTerminal is returned if we attempt to alter a payment that + // already has reached a terminal condition. + ErrPaymentTerminal = errors.New("payment has reached terminal condition") + + // ErrAttemptAlreadySettled is returned if we try to alter an already + // settled HTLC attempt. + ErrAttemptAlreadySettled = errors.New("attempt already settled") + + // ErrAttemptAlreadyFailed is returned if we try to alter an already + // failed HTLC attempt. + ErrAttemptAlreadyFailed = errors.New("attempt already failed") + + // ErrValueMismatch is returned if we try to register a non-MPP attempt + // with an amount that doesn't match the payment amount. + ErrValueMismatch = errors.New("attempted value doesn't match payment" + + "amount") + + // ErrValueExceedsAmt is returned if we try to register an attempt that + // would take the total sent amount above the payment amount. + ErrValueExceedsAmt = errors.New("attempted value exceeds payment" + + "amount") + + // ErrNonMPPayment is returned if we try to register an MPP attempt for + // a payment that already has a non-MPP attempt regitered. + ErrNonMPPayment = errors.New("payment has non-MPP attempts") + + // ErrMPPayment is returned if we try to register a non-MPP attempt for + // a payment that already has an MPP attempt regitered. + ErrMPPayment = errors.New("payment has MPP attempts") + + // ErrMPPPaymentAddrMismatch is returned if we try to register an MPP + // shard where the payment address doesn't match existing shards. + ErrMPPPaymentAddrMismatch = errors.New("payment address mismatch") + + // ErrMPPTotalAmountMismatch is returned if we try to register an MPP + // shard where the total amount doesn't match existing shards. + ErrMPPTotalAmountMismatch = errors.New("mp payment total amount mismatch") + // errNoAttemptInfo is returned when no attempt info is stored yet. errNoAttemptInfo = errors.New("unable to find attempt info for " + "inflight payment") @@ -53,7 +89,7 @@ func NewPaymentControl(db *DB) *PaymentControl { } // InitPayment checks or records the given PaymentCreationInfo with the DB, -// making sure it does not already exist as an in-flight payment. Then this +// making sure it does not already exist as an in-flight payment. When this // method returns successfully, the payment is guranteeed to be in the InFlight // state. func (p *PaymentControl) InitPayment(paymentHash lntypes.Hash, @@ -66,7 +102,7 @@ func (p *PaymentControl) InitPayment(paymentHash lntypes.Hash, infoBytes := b.Bytes() var updateErr error - err := p.db.Batch(func(tx *bbolt.Tx) error { + err := kvdb.Batch(p.db.Backend, func(tx kvdb.RwTx) error { // Reset the update error, to avoid carrying over an error // from a previous execution of the batched db transaction. updateErr = nil @@ -77,7 +113,10 @@ func (p *PaymentControl) InitPayment(paymentHash lntypes.Hash, } // Get the existing status of this payment, if any. - paymentStatus := fetchPaymentStatus(bucket) + paymentStatus, err := fetchPaymentStatus(bucket) + if err != nil { + return err + } switch paymentStatus { @@ -125,11 +164,11 @@ func (p *PaymentControl) InitPayment(paymentHash lntypes.Hash, return err } - // We'll delete any lingering attempt info to start with, in - // case we are initializing a payment that was attempted - // earlier, but left in a state where we could retry. - err = bucket.Delete(paymentAttemptInfoKey) - if err != nil { + // We'll delete any lingering HTLCs to start with, in case we + // are initializing a payment that was attempted earlier, but + // left in a state where we could retry. + err = bucket.DeleteNestedBucket(paymentHtlcsBucket) + if err != nil && err != kvdb.ErrBucketNotFound { return err } @@ -144,101 +183,217 @@ func (p *PaymentControl) InitPayment(paymentHash lntypes.Hash, return updateErr } -// RegisterAttempt atomically records the provided PaymentAttemptInfo to the +// RegisterAttempt atomically records the provided HTLCAttemptInfo to the // DB. func (p *PaymentControl) RegisterAttempt(paymentHash lntypes.Hash, - attempt *PaymentAttemptInfo) error { + attempt *HTLCAttemptInfo) (*MPPayment, error) { // Serialize the information before opening the db transaction. var a bytes.Buffer - if err := serializePaymentAttemptInfo(&a, attempt); err != nil { - return err + err := serializeHTLCAttemptInfo(&a, attempt) + if err != nil { + return nil, err } - attemptBytes := a.Bytes() + htlcInfoBytes := a.Bytes() - var updateErr error - err := p.db.Batch(func(tx *bbolt.Tx) error { - // Reset the update error, to avoid carrying over an error - // from a previous execution of the batched db transaction. - updateErr = nil + htlcIDBytes := make([]byte, 8) + binary.BigEndian.PutUint64(htlcIDBytes, attempt.AttemptID) - bucket, err := fetchPaymentBucket(tx, paymentHash) - if err == ErrPaymentNotInitiated { - updateErr = ErrPaymentNotInitiated - return nil - } else if err != nil { + var payment *MPPayment + err = kvdb.Batch(p.db.Backend, func(tx kvdb.RwTx) error { + bucket, err := fetchPaymentBucketUpdate(tx, paymentHash) + if err != nil { return err } - // We can only register attempts for payments that are - // in-flight. - if err := ensureInFlight(bucket); err != nil { - updateErr = err - return nil + p, err := fetchPayment(bucket) + if err != nil { + return err + } + + // Ensure the payment is in-flight. + if err := ensureInFlight(p); err != nil { + return err + } + + // We cannot register a new attempt if the payment already has + // reached a terminal condition: + settle, fail := p.TerminalInfo() + if settle != nil || fail != nil { + return ErrPaymentTerminal + } + + // Make sure any existing shards match the new one with regards + // to MPP options. + mpp := attempt.Route.FinalHop().MPP + for _, h := range p.InFlightHTLCs() { + hMpp := h.Route.FinalHop().MPP + + switch { + + // We tried to register a non-MPP attempt for a MPP + // payment. + case mpp == nil && hMpp != nil: + return ErrMPPayment + + // We tried to register a MPP shard for a non-MPP + // payment. + case mpp != nil && hMpp == nil: + return ErrNonMPPayment + + // Non-MPP payment, nothing more to validate. + case mpp == nil: + continue + } + + // Check that MPP options match. + if mpp.PaymentAddr() != hMpp.PaymentAddr() { + return ErrMPPPaymentAddrMismatch + } + + if mpp.TotalMsat() != hMpp.TotalMsat() { + return ErrMPPTotalAmountMismatch + } + } + + // If this is a non-MPP attempt, it must match the total amount + // exactly. + amt := attempt.Route.ReceiverAmt() + if mpp == nil && amt != p.Info.Value { + return ErrValueMismatch + } + + // Ensure we aren't sending more than the total payment amount. + sentAmt, _ := p.SentAmt() + if sentAmt+amt > p.Info.Value { + return ErrValueExceedsAmt + } + + htlcsBucket, err := bucket.CreateBucketIfNotExists( + paymentHtlcsBucket, + ) + if err != nil { + return err + } + + // Create bucket for this attempt. Fail if the bucket already + // exists. + htlcBucket, err := htlcsBucket.CreateBucket(htlcIDBytes) + if err != nil { + return err } - // Add the payment attempt to the payments bucket. - return bucket.Put(paymentAttemptInfoKey, attemptBytes) + err = htlcBucket.Put(htlcAttemptInfoKey, htlcInfoBytes) + if err != nil { + return err + } + + // Retrieve attempt info for the notification. + payment, err = fetchPayment(bucket) + return err }) if err != nil { - return err + return nil, err } - return updateErr + return payment, err } -// Success transitions a payment into the Succeeded state. After invoking this -// method, InitPayment should always return an error to prevent us from making -// duplicate payments to the same payment hash. The provided preimage is -// atomically saved to the DB for record keeping. -func (p *PaymentControl) Success(paymentHash lntypes.Hash, - preimage lntypes.Preimage) (*route.Route, error) { +// SettleAttempt marks the given attempt settled with the preimage. If this is +// a multi shard payment, this might implicitly mean that the full payment +// succeeded. +// +// After invoking this method, InitPayment should always return an error to +// prevent us from making duplicate payments to the same payment hash. The +// provided preimage is atomically saved to the DB for record keeping. +func (p *PaymentControl) SettleAttempt(hash lntypes.Hash, + attemptID uint64, settleInfo *HTLCSettleInfo) (*MPPayment, error) { - var ( - updateErr error - route *route.Route - ) - err := p.db.Batch(func(tx *bbolt.Tx) error { - // Reset the update error, to avoid carrying over an error - // from a previous execution of the batched db transaction. - updateErr = nil + var b bytes.Buffer + if err := serializeHTLCSettleInfo(&b, settleInfo); err != nil { + return nil, err + } + settleBytes := b.Bytes() - bucket, err := fetchPaymentBucket(tx, paymentHash) - if err == ErrPaymentNotInitiated { - updateErr = ErrPaymentNotInitiated - return nil - } else if err != nil { + return p.updateHtlcKey(hash, attemptID, htlcSettleInfoKey, settleBytes) +} + +// FailAttempt marks the given payment attempt failed. +func (p *PaymentControl) FailAttempt(hash lntypes.Hash, + attemptID uint64, failInfo *HTLCFailInfo) (*MPPayment, error) { + + var b bytes.Buffer + if err := serializeHTLCFailInfo(&b, failInfo); err != nil { + return nil, err + } + failBytes := b.Bytes() + + return p.updateHtlcKey(hash, attemptID, htlcFailInfoKey, failBytes) +} + +// updateHtlcKey updates a database key for the specified htlc. +func (p *PaymentControl) updateHtlcKey(paymentHash lntypes.Hash, + attemptID uint64, key, value []byte) (*MPPayment, error) { + + htlcIDBytes := make([]byte, 8) + binary.BigEndian.PutUint64(htlcIDBytes, attemptID) + + var payment *MPPayment + err := kvdb.Batch(p.db.Backend, func(tx kvdb.RwTx) error { + payment = nil + + bucket, err := fetchPaymentBucketUpdate(tx, paymentHash) + if err != nil { return err } - // We can only mark in-flight payments as succeeded. - if err := ensureInFlight(bucket); err != nil { - updateErr = err - return nil + p, err := fetchPayment(bucket) + if err != nil { + return err } - // Record the successful payment info atomically to the - // payments record. - err = bucket.Put(paymentSettleInfoKey, preimage[:]) - if err != nil { + // We can only update keys of in-flight payments. We allow + // updating keys even if the payment has reached a terminal + // condition, since the HTLC outcomes must still be updated. + if err := ensureInFlight(p); err != nil { return err } - // Retrieve attempt info for the notification. - attempt, err := fetchPaymentAttempt(bucket) + htlcsBucket := bucket.NestedReadWriteBucket(paymentHtlcsBucket) + if htlcsBucket == nil { + return fmt.Errorf("htlcs bucket not found") + } + + htlcBucket := htlcsBucket.NestedReadWriteBucket(htlcIDBytes) + if htlcBucket == nil { + return fmt.Errorf("HTLC with ID %v not registered", + attemptID) + } + + // Make sure the shard is not already failed or settled. + if htlcBucket.Get(htlcFailInfoKey) != nil { + return ErrAttemptAlreadyFailed + } + + if htlcBucket.Get(htlcSettleInfoKey) != nil { + return ErrAttemptAlreadySettled + } + + // Add or update the key for this htlc. + err = htlcBucket.Put(key, value) if err != nil { return err } - route = &attempt.Route - - return nil + // Retrieve attempt info for the notification. + payment, err = fetchPayment(bucket) + return err }) if err != nil { return nil, err } - return route, updateErr + return payment, err } // Fail transitions a payment into the Failed state, and records the reason the @@ -246,18 +401,19 @@ func (p *PaymentControl) Success(paymentHash lntypes.Hash, // its next call for this payment hash, allowing the switch to make a // subsequent payment. func (p *PaymentControl) Fail(paymentHash lntypes.Hash, - reason FailureReason) (*route.Route, error) { + reason FailureReason) (*MPPayment, error) { var ( updateErr error - route *route.Route + payment *MPPayment ) - err := p.db.Batch(func(tx *bbolt.Tx) error { + err := kvdb.Batch(p.db.Backend, func(tx kvdb.RwTx) error { // Reset the update error, to avoid carrying over an error // from a previous execution of the batched db transaction. updateErr = nil + payment = nil - bucket, err := fetchPaymentBucket(tx, paymentHash) + bucket, err := fetchPaymentBucketUpdate(tx, paymentHash) if err == ErrPaymentNotInitiated { updateErr = ErrPaymentNotInitiated return nil @@ -265,9 +421,17 @@ func (p *PaymentControl) Fail(paymentHash lntypes.Hash, return err } - // We can only mark in-flight payments as failed. - if err := ensureInFlight(bucket); err != nil { - updateErr = err + // We mark the payent as failed as long as it is known. This + // lets the last attempt to fail with a terminal write its + // failure to the PaymentControl without synchronizing with + // other attempts. + paymentStatus, err := fetchPaymentStatus(bucket) + if err != nil { + return err + } + + if paymentStatus == StatusUnknown { + updateErr = ErrPaymentNotInitiated return nil } @@ -279,13 +443,10 @@ func (p *PaymentControl) Fail(paymentHash lntypes.Hash, } // Retrieve attempt info for the notification, if available. - attempt, err := fetchPaymentAttempt(bucket) - if err != nil && err != errNoAttemptInfo { + payment, err = fetchPayment(bucket) + if err != nil { return err } - if err != errNoAttemptInfo { - route = &attempt.Route - } return nil }) @@ -293,15 +454,15 @@ func (p *PaymentControl) Fail(paymentHash lntypes.Hash, return nil, err } - return route, updateErr + return payment, updateErr } // FetchPayment returns information about a payment from the database. func (p *PaymentControl) FetchPayment(paymentHash lntypes.Hash) ( - *Payment, error) { + *MPPayment, error) { - var payment *Payment - err := p.db.View(func(tx *bbolt.Tx) error { + var payment *MPPayment + err := kvdb.View(p.db, func(tx kvdb.ReadTx) error { bucket, err := fetchPaymentBucket(tx, paymentHash) if err != nil { return err @@ -320,10 +481,10 @@ func (p *PaymentControl) FetchPayment(paymentHash lntypes.Hash) ( // createPaymentBucket creates or fetches the sub-bucket assigned to this // payment hash. -func createPaymentBucket(tx *bbolt.Tx, paymentHash lntypes.Hash) ( - *bbolt.Bucket, error) { +func createPaymentBucket(tx kvdb.RwTx, paymentHash lntypes.Hash) ( + kvdb.RwBucket, error) { - payments, err := tx.CreateBucketIfNotExists(paymentsRootBucket) + payments, err := tx.CreateTopLevelBucket(paymentsRootBucket) if err != nil { return nil, err } @@ -333,15 +494,15 @@ func createPaymentBucket(tx *bbolt.Tx, paymentHash lntypes.Hash) ( // fetchPaymentBucket fetches the sub-bucket assigned to this payment hash. If // the bucket does not exist, it returns ErrPaymentNotInitiated. -func fetchPaymentBucket(tx *bbolt.Tx, paymentHash lntypes.Hash) ( - *bbolt.Bucket, error) { +func fetchPaymentBucket(tx kvdb.ReadTx, paymentHash lntypes.Hash) ( + kvdb.ReadBucket, error) { - payments := tx.Bucket(paymentsRootBucket) + payments := tx.ReadBucket(paymentsRootBucket) if payments == nil { return nil, ErrPaymentNotInitiated } - bucket := payments.Bucket(paymentHash[:]) + bucket := payments.NestedReadBucket(paymentHash[:]) if bucket == nil { return nil, ErrPaymentNotInitiated } @@ -350,10 +511,28 @@ func fetchPaymentBucket(tx *bbolt.Tx, paymentHash lntypes.Hash) ( } +// fetchPaymentBucketUpdate is identical to fetchPaymentBucket, but it returns a +// bucket that can be written to. +func fetchPaymentBucketUpdate(tx kvdb.RwTx, paymentHash lntypes.Hash) ( + kvdb.RwBucket, error) { + + payments := tx.ReadWriteBucket(paymentsRootBucket) + if payments == nil { + return nil, ErrPaymentNotInitiated + } + + bucket := payments.NestedReadWriteBucket(paymentHash[:]) + if bucket == nil { + return nil, ErrPaymentNotInitiated + } + + return bucket, nil +} + // nextPaymentSequence returns the next sequence number to store for a new // payment. -func nextPaymentSequence(tx *bbolt.Tx) ([]byte, error) { - payments, err := tx.CreateBucketIfNotExists(paymentsRootBucket) +func nextPaymentSequence(tx kvdb.RwTx) ([]byte, error) { + payments, err := tx.CreateTopLevelBucket(paymentsRootBucket) if err != nil { return nil, err } @@ -370,31 +549,30 @@ func nextPaymentSequence(tx *bbolt.Tx) ([]byte, error) { // fetchPaymentStatus fetches the payment status of the payment. If the payment // isn't found, it will default to "StatusUnknown". -func fetchPaymentStatus(bucket *bbolt.Bucket) PaymentStatus { - if bucket.Get(paymentSettleInfoKey) != nil { - return StatusSucceeded +func fetchPaymentStatus(bucket kvdb.ReadBucket) (PaymentStatus, error) { + // Creation info should be set for all payments, regardless of state. + // If not, it is unknown. + if bucket.Get(paymentCreationInfoKey) == nil { + return StatusUnknown, nil } - if bucket.Get(paymentFailInfoKey) != nil { - return StatusFailed - } - - if bucket.Get(paymentCreationInfoKey) != nil { - return StatusInFlight + payment, err := fetchPayment(bucket) + if err != nil { + return 0, err } - return StatusUnknown + return payment.Status, nil } // ensureInFlight checks whether the payment found in the given bucket has // status InFlight, and returns an error otherwise. This should be used to // ensure we only mark in-flight payments as succeeded or failed. -func ensureInFlight(bucket *bbolt.Bucket) error { - paymentStatus := fetchPaymentStatus(bucket) +func ensureInFlight(payment *MPPayment) error { + paymentStatus := payment.Status switch { - // The payment was indeed InFlight, return. + // The payment was indeed InFlight. case paymentStatus == StatusInFlight: return nil @@ -416,73 +594,76 @@ func ensureInFlight(bucket *bbolt.Bucket) error { } } -// fetchPaymentAttempt fetches the payment attempt from the bucket. -func fetchPaymentAttempt(bucket *bbolt.Bucket) (*PaymentAttemptInfo, error) { - attemptData := bucket.Get(paymentAttemptInfoKey) - if attemptData == nil { - return nil, errNoAttemptInfo - } - - r := bytes.NewReader(attemptData) - return deserializePaymentAttemptInfo(r) -} - // InFlightPayment is a wrapper around a payment that has status InFlight. type InFlightPayment struct { // Info is the PaymentCreationInfo of the in-flight payment. Info *PaymentCreationInfo - // Attempt contains information about the last payment attempt that was - // made to this payment hash. + // Attempts is the set of payment attempts that was made to this + // payment hash. // - // NOTE: Might be nil. - Attempt *PaymentAttemptInfo + // NOTE: Might be empty. + Attempts []HTLCAttemptInfo } // FetchInFlightPayments returns all payments with status InFlight. func (p *PaymentControl) FetchInFlightPayments() ([]*InFlightPayment, error) { var inFlights []*InFlightPayment - err := p.db.View(func(tx *bbolt.Tx) error { - payments := tx.Bucket(paymentsRootBucket) + err := kvdb.View(p.db, func(tx kvdb.ReadTx) error { + payments := tx.ReadBucket(paymentsRootBucket) if payments == nil { return nil } return payments.ForEach(func(k, _ []byte) error { - bucket := payments.Bucket(k) + bucket := payments.NestedReadBucket(k) if bucket == nil { return fmt.Errorf("non bucket element") } // If the status is not InFlight, we can return early. - paymentStatus := fetchPaymentStatus(bucket) + paymentStatus, err := fetchPaymentStatus(bucket) + if err != nil { + return err + } + if paymentStatus != StatusInFlight { return nil } - var ( - inFlight = &InFlightPayment{} - err error - ) + inFlight := &InFlightPayment{} // Get the CreationInfo. - b := bucket.Get(paymentCreationInfoKey) - if b == nil { - return fmt.Errorf("unable to find creation " + - "info for inflight payment") + inFlight.Info, err = fetchCreationInfo(bucket) + if err != nil { + return err } - r := bytes.NewReader(b) - inFlight.Info, err = deserializePaymentCreationInfo(r) + htlcsBucket := bucket.NestedReadBucket( + paymentHtlcsBucket, + ) + if htlcsBucket == nil { + return nil + } + + // Fetch all HTLCs attempted for this payment. + htlcs, err := fetchHtlcAttempts(htlcsBucket) if err != nil { return err } - // Now get the attempt info. It could be that there is - // no attempt info yet. - inFlight.Attempt, err = fetchPaymentAttempt(bucket) - if err != nil && err != errNoAttemptInfo { - return err + // We only care about the static info for the HTLCs + // still in flight, so convert the result to a slice of + // HTLCAttemptInfos. + for _, h := range htlcs { + // Skip HTLCs not in flight. + if h.Settle != nil || h.Failure != nil { + continue + } + + inFlight.Attempts = append( + inFlight.Attempts, h.HTLCAttemptInfo, + ) } inFlights = append(inFlights, inFlight) diff --git a/channeldb/payment_control_test.go b/channeldb/payment_control_test.go index 479e68988b..95862f5e29 100644 --- a/channeldb/payment_control_test.go +++ b/channeldb/payment_control_test.go @@ -1,7 +1,6 @@ package channeldb import ( - "bytes" "crypto/rand" "fmt" "io" @@ -11,10 +10,9 @@ import ( "time" "github.com/btcsuite/fastsha256" - "github.com/coreos/bbolt" "github.com/davecgh/go-spew/spew" "github.com/lightningnetwork/lnd/lntypes" - "github.com/lightningnetwork/lnd/routing/route" + "github.com/lightningnetwork/lnd/record" ) func initDB() (*DB, error) { @@ -39,7 +37,7 @@ func genPreimage() ([32]byte, error) { return preimage, nil } -func genInfo() (*PaymentCreationInfo, *PaymentAttemptInfo, +func genInfo() (*PaymentCreationInfo, *HTLCAttemptInfo, lntypes.Preimage, error) { preimage, err := genPreimage() @@ -51,14 +49,14 @@ func genInfo() (*PaymentCreationInfo, *PaymentAttemptInfo, rhash := fastsha256.Sum256(preimage[:]) return &PaymentCreationInfo{ PaymentHash: rhash, - Value: 1, - CreationDate: time.Unix(time.Now().Unix(), 0), + Value: testRoute.ReceiverAmt(), + CreationTime: time.Unix(time.Now().Unix(), 0), PaymentRequest: []byte("hola"), }, - &PaymentAttemptInfo{ - PaymentID: 1, + &HTLCAttemptInfo{ + AttemptID: 0, SessionKey: priv, - Route: testRoute, + Route: *testRoute.Copy(), }, preimage, nil } @@ -86,10 +84,9 @@ func TestPaymentControlSwitchFail(t *testing.T) { t.Fatalf("unable to send htlc message: %v", err) } - assertPaymentStatus(t, db, info.PaymentHash, StatusInFlight) + assertPaymentStatus(t, pControl, info.PaymentHash, StatusInFlight) assertPaymentInfo( - t, db, info.PaymentHash, info, nil, lntypes.Preimage{}, - nil, + t, pControl, info.PaymentHash, info, nil, nil, ) // Fail the payment, which should moved it to Failed. @@ -100,10 +97,9 @@ func TestPaymentControlSwitchFail(t *testing.T) { } // Verify the status is indeed Failed. - assertPaymentStatus(t, db, info.PaymentHash, StatusFailed) + assertPaymentStatus(t, pControl, info.PaymentHash, StatusFailed) assertPaymentInfo( - t, db, info.PaymentHash, info, nil, lntypes.Preimage{}, - &failReason, + t, pControl, info.PaymentHash, info, &failReason, nil, ) // Sends the htlc again, which should succeed since the prior payment @@ -113,39 +109,85 @@ func TestPaymentControlSwitchFail(t *testing.T) { t.Fatalf("unable to send htlc message: %v", err) } - assertPaymentStatus(t, db, info.PaymentHash, StatusInFlight) + assertPaymentStatus(t, pControl, info.PaymentHash, StatusInFlight) assertPaymentInfo( - t, db, info.PaymentHash, info, nil, lntypes.Preimage{}, - nil, + t, pControl, info.PaymentHash, info, nil, nil, ) - // Record a new attempt. - attempt.PaymentID = 2 - err = pControl.RegisterAttempt(info.PaymentHash, attempt) + // Record a new attempt. In this test scenario, the attempt fails. + // However, this is not communicated to control tower in the current + // implementation. It only registers the initiation of the attempt. + _, err = pControl.RegisterAttempt(info.PaymentHash, attempt) + if err != nil { + t.Fatalf("unable to register attempt: %v", err) + } + + htlcReason := HTLCFailUnreadable + _, err = pControl.FailAttempt( + info.PaymentHash, attempt.AttemptID, + &HTLCFailInfo{ + Reason: htlcReason, + }, + ) + if err != nil { + t.Fatal(err) + } + assertPaymentStatus(t, pControl, info.PaymentHash, StatusInFlight) + + htlc := &htlcStatus{ + HTLCAttemptInfo: attempt, + failure: &htlcReason, + } + + assertPaymentInfo(t, pControl, info.PaymentHash, info, nil, htlc) + + // Record another attempt. + attempt.AttemptID = 1 + _, err = pControl.RegisterAttempt(info.PaymentHash, attempt) if err != nil { t.Fatalf("unable to send htlc message: %v", err) } - assertPaymentStatus(t, db, info.PaymentHash, StatusInFlight) + assertPaymentStatus(t, pControl, info.PaymentHash, StatusInFlight) + + htlc = &htlcStatus{ + HTLCAttemptInfo: attempt, + } + assertPaymentInfo( - t, db, info.PaymentHash, info, attempt, lntypes.Preimage{}, - nil, + t, pControl, info.PaymentHash, info, nil, htlc, ) - // Verifies that status was changed to StatusSucceeded. - var route *route.Route - route, err = pControl.Success(info.PaymentHash, preimg) + // Settle the attempt and verify that status was changed to + // StatusSucceeded. + var payment *MPPayment + payment, err = pControl.SettleAttempt( + info.PaymentHash, attempt.AttemptID, + &HTLCSettleInfo{ + Preimage: preimg, + }, + ) if err != nil { t.Fatalf("error shouldn't have been received, got: %v", err) } - err = assertRouteEqual(route, &attempt.Route) + if len(payment.HTLCs) != 2 { + t.Fatalf("payment should have two htlcs, got: %d", + len(payment.HTLCs)) + } + + err = assertRouteEqual(&payment.HTLCs[0].Route, &attempt.Route) if err != nil { t.Fatalf("unexpected route returned: %v vs %v: %v", - spew.Sdump(attempt.Route), spew.Sdump(*route), err) + spew.Sdump(attempt.Route), + spew.Sdump(payment.HTLCs[0].Route), err) } - assertPaymentStatus(t, db, info.PaymentHash, StatusSucceeded) - assertPaymentInfo(t, db, info.PaymentHash, info, attempt, preimg, nil) + assertPaymentStatus(t, pControl, info.PaymentHash, StatusSucceeded) + + htlc.settle = &preimg + assertPaymentInfo( + t, pControl, info.PaymentHash, info, nil, htlc, + ) // Attempt a final payment, which should now fail since the prior // payment succeed. @@ -179,10 +221,9 @@ func TestPaymentControlSwitchDoubleSend(t *testing.T) { t.Fatalf("unable to send htlc message: %v", err) } - assertPaymentStatus(t, db, info.PaymentHash, StatusInFlight) + assertPaymentStatus(t, pControl, info.PaymentHash, StatusInFlight) assertPaymentInfo( - t, db, info.PaymentHash, info, nil, lntypes.Preimage{}, - nil, + t, pControl, info.PaymentHash, info, nil, nil, ) // Try to initiate double sending of htlc message with the same @@ -195,14 +236,17 @@ func TestPaymentControlSwitchDoubleSend(t *testing.T) { } // Record an attempt. - err = pControl.RegisterAttempt(info.PaymentHash, attempt) + _, err = pControl.RegisterAttempt(info.PaymentHash, attempt) if err != nil { t.Fatalf("unable to send htlc message: %v", err) } - assertPaymentStatus(t, db, info.PaymentHash, StatusInFlight) + assertPaymentStatus(t, pControl, info.PaymentHash, StatusInFlight) + + htlc := &htlcStatus{ + HTLCAttemptInfo: attempt, + } assertPaymentInfo( - t, db, info.PaymentHash, info, attempt, lntypes.Preimage{}, - nil, + t, pControl, info.PaymentHash, info, nil, htlc, ) // Sends base htlc message which initiate StatusInFlight. @@ -213,11 +257,19 @@ func TestPaymentControlSwitchDoubleSend(t *testing.T) { } // After settling, the error should be ErrAlreadyPaid. - if _, err := pControl.Success(info.PaymentHash, preimg); err != nil { + _, err = pControl.SettleAttempt( + info.PaymentHash, attempt.AttemptID, + &HTLCSettleInfo{ + Preimage: preimg, + }, + ) + if err != nil { t.Fatalf("error shouldn't have been received, got: %v", err) } - assertPaymentStatus(t, db, info.PaymentHash, StatusSucceeded) - assertPaymentInfo(t, db, info.PaymentHash, info, attempt, preimg, nil) + assertPaymentStatus(t, pControl, info.PaymentHash, StatusSucceeded) + + htlc.settle = &preimg + assertPaymentInfo(t, pControl, info.PaymentHash, info, nil, htlc) err = pControl.InitPayment(info.PaymentHash, info) if err != ErrAlreadyPaid { @@ -243,16 +295,17 @@ func TestPaymentControlSuccessesWithoutInFlight(t *testing.T) { } // Attempt to complete the payment should fail. - _, err = pControl.Success(info.PaymentHash, preimg) + _, err = pControl.SettleAttempt( + info.PaymentHash, 0, + &HTLCSettleInfo{ + Preimage: preimg, + }, + ) if err != ErrPaymentNotInitiated { t.Fatalf("expected ErrPaymentNotInitiated, got %v", err) } - assertPaymentStatus(t, db, info.PaymentHash, StatusUnknown) - assertPaymentInfo( - t, db, info.PaymentHash, nil, nil, lntypes.Preimage{}, - nil, - ) + assertPaymentStatus(t, pControl, info.PaymentHash, StatusUnknown) } // TestPaymentControlFailsWithoutInFlight checks that a strict payment @@ -278,10 +331,7 @@ func TestPaymentControlFailsWithoutInFlight(t *testing.T) { t.Fatalf("expected ErrPaymentNotInitiated, got %v", err) } - assertPaymentStatus(t, db, info.PaymentHash, StatusUnknown) - assertPaymentInfo( - t, db, info.PaymentHash, nil, nil, lntypes.Preimage{}, nil, - ) + assertPaymentStatus(t, pControl, info.PaymentHash, StatusUnknown) } // TestPaymentControlDeleteNonInFlight checks that calling DeletaPayments only @@ -325,12 +375,28 @@ func TestPaymentControlDeleteNonInFligt(t *testing.T) { if err != nil { t.Fatalf("unable to send htlc message: %v", err) } - err = pControl.RegisterAttempt(info.PaymentHash, attempt) + _, err = pControl.RegisterAttempt(info.PaymentHash, attempt) if err != nil { t.Fatalf("unable to send htlc message: %v", err) } + htlc := &htlcStatus{ + HTLCAttemptInfo: attempt, + } + if p.failed { + // Fail the payment attempt. + htlcFailure := HTLCFailUnreadable + _, err := pControl.FailAttempt( + info.PaymentHash, attempt.AttemptID, + &HTLCFailInfo{ + Reason: htlcFailure, + }, + ) + if err != nil { + t.Fatalf("unable to fail htlc: %v", err) + } + // Fail the payment, which should moved it to Failed. failReason := FailureReasonNoRoute _, err = pControl.Fail(info.PaymentHash, failReason) @@ -339,27 +405,35 @@ func TestPaymentControlDeleteNonInFligt(t *testing.T) { } // Verify the status is indeed Failed. - assertPaymentStatus(t, db, info.PaymentHash, StatusFailed) + assertPaymentStatus(t, pControl, info.PaymentHash, StatusFailed) + + htlc.failure = &htlcFailure assertPaymentInfo( - t, db, info.PaymentHash, info, attempt, - lntypes.Preimage{}, &failReason, + t, pControl, info.PaymentHash, info, + &failReason, htlc, ) } else if p.success { // Verifies that status was changed to StatusSucceeded. - _, err := pControl.Success(info.PaymentHash, preimg) + _, err := pControl.SettleAttempt( + info.PaymentHash, attempt.AttemptID, + &HTLCSettleInfo{ + Preimage: preimg, + }, + ) if err != nil { t.Fatalf("error shouldn't have been received, got: %v", err) } - assertPaymentStatus(t, db, info.PaymentHash, StatusSucceeded) + assertPaymentStatus(t, pControl, info.PaymentHash, StatusSucceeded) + + htlc.settle = &preimg assertPaymentInfo( - t, db, info.PaymentHash, info, attempt, preimg, nil, + t, pControl, info.PaymentHash, info, nil, htlc, ) } else { - assertPaymentStatus(t, db, info.PaymentHash, StatusInFlight) + assertPaymentStatus(t, pControl, info.PaymentHash, StatusInFlight) assertPaymentInfo( - t, db, info.PaymentHash, info, attempt, - lntypes.Preimage{}, nil, + t, pControl, info.PaymentHash, info, nil, htlc, ) } } @@ -385,166 +459,456 @@ func TestPaymentControlDeleteNonInFligt(t *testing.T) { } } -func assertPaymentStatus(t *testing.T, db *DB, - hash [32]byte, expStatus PaymentStatus) { +// TestPaymentControlMultiShard checks the ability of payment control to +// have multiple in-flight HTLCs for a single payment. +func TestPaymentControlMultiShard(t *testing.T) { + t.Parallel() - t.Helper() + // We will register three HTLC attempts, and always fail the second + // one. We'll generate all combinations of settling/failing the first + // and third HTLC, and assert that the payment status end up as we + // expect. + type testCase struct { + settleFirst bool + settleLast bool + } - var paymentStatus = StatusUnknown - err := db.View(func(tx *bbolt.Tx) error { - payments := tx.Bucket(paymentsRootBucket) - if payments == nil { - return nil + var tests []testCase + for _, f := range []bool{true, false} { + for _, l := range []bool{true, false} { + tests = append(tests, testCase{f, l}) } + } - bucket := payments.Bucket(hash[:]) - if bucket == nil { - return nil + runSubTest := func(t *testing.T, test testCase) { + db, err := initDB() + if err != nil { + t.Fatalf("unable to init db: %v", err) } - // Get the existing status of this payment, if any. - paymentStatus = fetchPaymentStatus(bucket) - return nil - }) - if err != nil { - t.Fatalf("unable to fetch payment status: %v", err) + pControl := NewPaymentControl(db) + + info, attempt, preimg, err := genInfo() + if err != nil { + t.Fatalf("unable to generate htlc message: %v", err) + } + + // Init the payment, moving it to the StatusInFlight state. + err = pControl.InitPayment(info.PaymentHash, info) + if err != nil { + t.Fatalf("unable to send htlc message: %v", err) + } + + assertPaymentStatus(t, pControl, info.PaymentHash, StatusInFlight) + assertPaymentInfo( + t, pControl, info.PaymentHash, info, nil, nil, + ) + + // Create three unique attempts we'll use for the test, and + // register them with the payment control. We set each + // attempts's value to one third of the payment amount, and + // populate the MPP options. + shardAmt := info.Value / 3 + attempt.Route.FinalHop().AmtToForward = shardAmt + attempt.Route.FinalHop().MPP = record.NewMPP( + info.Value, [32]byte{1}, + ) + + var attempts []*HTLCAttemptInfo + for i := uint64(0); i < 3; i++ { + a := *attempt + a.AttemptID = i + attempts = append(attempts, &a) + + _, err = pControl.RegisterAttempt(info.PaymentHash, &a) + if err != nil { + t.Fatalf("unable to send htlc message: %v", err) + } + assertPaymentStatus( + t, pControl, info.PaymentHash, StatusInFlight, + ) + + htlc := &htlcStatus{ + HTLCAttemptInfo: &a, + } + assertPaymentInfo( + t, pControl, info.PaymentHash, info, nil, htlc, + ) + } + + // For a fourth attempt, check that attempting to + // register it will fail since the total sent amount + // will be too large. + b := *attempt + b.AttemptID = 3 + _, err = pControl.RegisterAttempt(info.PaymentHash, &b) + if err != ErrValueExceedsAmt { + t.Fatalf("expected ErrValueExceedsAmt, got: %v", + err) + } + + // Fail the second attempt. + a := attempts[1] + htlcFail := HTLCFailUnreadable + _, err = pControl.FailAttempt( + info.PaymentHash, a.AttemptID, + &HTLCFailInfo{ + Reason: htlcFail, + }, + ) + if err != nil { + t.Fatal(err) + } + + htlc := &htlcStatus{ + HTLCAttemptInfo: a, + failure: &htlcFail, + } + assertPaymentInfo( + t, pControl, info.PaymentHash, info, nil, htlc, + ) + + // Payment should still be in-flight. + assertPaymentStatus(t, pControl, info.PaymentHash, StatusInFlight) + + // Depending on the test case, settle or fail the first attempt. + a = attempts[0] + htlc = &htlcStatus{ + HTLCAttemptInfo: a, + } + + var firstFailReason *FailureReason + if test.settleFirst { + _, err := pControl.SettleAttempt( + info.PaymentHash, a.AttemptID, + &HTLCSettleInfo{ + Preimage: preimg, + }, + ) + if err != nil { + t.Fatalf("error shouldn't have been "+ + "received, got: %v", err) + } + + // Assert that the HTLC has had the preimage recorded. + htlc.settle = &preimg + assertPaymentInfo( + t, pControl, info.PaymentHash, info, nil, htlc, + ) + } else { + _, err := pControl.FailAttempt( + info.PaymentHash, a.AttemptID, + &HTLCFailInfo{ + Reason: htlcFail, + }, + ) + if err != nil { + t.Fatalf("error shouldn't have been "+ + "received, got: %v", err) + } + + // Assert the failure was recorded. + htlc.failure = &htlcFail + assertPaymentInfo( + t, pControl, info.PaymentHash, info, nil, htlc, + ) + + // We also record a payment level fail, to move it into + // a terminal state. + failReason := FailureReasonNoRoute + _, err = pControl.Fail(info.PaymentHash, failReason) + if err != nil { + t.Fatalf("unable to fail payment hash: %v", err) + } + + // Record the reason we failed the payment, such that + // we can assert this later in the test. + firstFailReason = &failReason + } + + // The payment should still be considered in-flight, since there + // is still an active HTLC. + assertPaymentStatus(t, pControl, info.PaymentHash, StatusInFlight) + + // Try to register yet another attempt. This should fail now + // that the payment has reached a terminal condition. + b = *attempt + b.AttemptID = 3 + _, err = pControl.RegisterAttempt(info.PaymentHash, &b) + if err != ErrPaymentTerminal { + t.Fatalf("expected ErrPaymentTerminal, got: %v", err) + } + + assertPaymentStatus(t, pControl, info.PaymentHash, StatusInFlight) + + // Settle or fail the remaining attempt based on the testcase. + a = attempts[2] + htlc = &htlcStatus{ + HTLCAttemptInfo: a, + } + if test.settleLast { + // Settle the last outstanding attempt. + _, err = pControl.SettleAttempt( + info.PaymentHash, a.AttemptID, + &HTLCSettleInfo{ + Preimage: preimg, + }, + ) + if err != nil { + t.Fatalf("error shouldn't have been "+ + "received, got: %v", err) + } + + htlc.settle = &preimg + assertPaymentInfo( + t, pControl, info.PaymentHash, info, + firstFailReason, htlc, + ) + } else { + // Fail the attempt. + _, err := pControl.FailAttempt( + info.PaymentHash, a.AttemptID, + &HTLCFailInfo{ + Reason: htlcFail, + }, + ) + if err != nil { + t.Fatalf("error shouldn't have been "+ + "received, got: %v", err) + } + + // Assert the failure was recorded. + htlc.failure = &htlcFail + assertPaymentInfo( + t, pControl, info.PaymentHash, info, + firstFailReason, htlc, + ) + + // Check that we can override any perevious terminal + // failure. This is to allow multiple concurrent shard + // write a terminal failure to the database without + // syncing. + failReason := FailureReasonPaymentDetails + _, err = pControl.Fail(info.PaymentHash, failReason) + if err != nil { + t.Fatalf("unable to fail payment hash: %v", err) + } + } + + // If any of the two attempts settled, the payment should end + // up in the Succeeded state. If both failed the payment should + // also be Failed at this poinnt. + finalStatus := StatusFailed + expRegErr := ErrPaymentAlreadyFailed + if test.settleFirst || test.settleLast { + finalStatus = StatusSucceeded + expRegErr = ErrPaymentAlreadySucceeded + } + + assertPaymentStatus(t, pControl, info.PaymentHash, finalStatus) + + // Finally assert we cannot register more attempts. + _, err = pControl.RegisterAttempt(info.PaymentHash, &b) + if err != expRegErr { + t.Fatalf("expected error %v, got: %v", expRegErr, err) + } } - if paymentStatus != expStatus { - t.Fatalf("payment status mismatch: expected %v, got %v", - expStatus, paymentStatus) + for _, test := range tests { + test := test + subTest := fmt.Sprintf("first=%v, second=%v", + test.settleFirst, test.settleLast) + + t.Run(subTest, func(t *testing.T) { + runSubTest(t, test) + }) } } -func checkPaymentCreationInfo(bucket *bbolt.Bucket, c *PaymentCreationInfo) error { - b := bucket.Get(paymentCreationInfoKey) - switch { - case b == nil && c == nil: - return nil - case b == nil: - return fmt.Errorf("expected creation info not found") - case c == nil: - return fmt.Errorf("unexpected creation info found") +func TestPaymentControlMPPRecordValidation(t *testing.T) { + t.Parallel() + + db, err := initDB() + if err != nil { + t.Fatalf("unable to init db: %v", err) + } + + pControl := NewPaymentControl(db) + + info, attempt, _, err := genInfo() + if err != nil { + t.Fatalf("unable to generate htlc message: %v", err) + } + + // Init the payment. + err = pControl.InitPayment(info.PaymentHash, info) + if err != nil { + t.Fatalf("unable to send htlc message: %v", err) } - r := bytes.NewReader(b) - c2, err := deserializePaymentCreationInfo(r) + // Create three unique attempts we'll use for the test, and + // register them with the payment control. We set each + // attempts's value to one third of the payment amount, and + // populate the MPP options. + shardAmt := info.Value / 3 + attempt.Route.FinalHop().AmtToForward = shardAmt + attempt.Route.FinalHop().MPP = record.NewMPP( + info.Value, [32]byte{1}, + ) + + _, err = pControl.RegisterAttempt(info.PaymentHash, attempt) if err != nil { - return err + t.Fatalf("unable to send htlc message: %v", err) } - if !reflect.DeepEqual(c, c2) { - return fmt.Errorf("PaymentCreationInfos don't match: %v vs %v", - spew.Sdump(c), spew.Sdump(c2)) + + // Now try to register a non-MPP attempt, which should fail. + b := *attempt + b.AttemptID = 1 + b.Route.FinalHop().MPP = nil + _, err = pControl.RegisterAttempt(info.PaymentHash, &b) + if err != ErrMPPayment { + t.Fatalf("expected ErrMPPayment, got: %v", err) } - return nil -} + // Try to register attempt one with a different payment address. + b.Route.FinalHop().MPP = record.NewMPP( + info.Value, [32]byte{2}, + ) + _, err = pControl.RegisterAttempt(info.PaymentHash, &b) + if err != ErrMPPPaymentAddrMismatch { + t.Fatalf("expected ErrMPPPaymentAddrMismatch, got: %v", err) + } -func checkPaymentAttemptInfo(bucket *bbolt.Bucket, a *PaymentAttemptInfo) error { - b := bucket.Get(paymentAttemptInfoKey) - switch { - case b == nil && a == nil: - return nil - case b == nil: - return fmt.Errorf("expected attempt info not found") - case a == nil: - return fmt.Errorf("unexpected attempt info found") + // Try registering one with a different total amount. + b.Route.FinalHop().MPP = record.NewMPP( + info.Value/2, [32]byte{1}, + ) + _, err = pControl.RegisterAttempt(info.PaymentHash, &b) + if err != ErrMPPTotalAmountMismatch { + t.Fatalf("expected ErrMPPTotalAmountMismatch, got: %v", err) } - r := bytes.NewReader(b) - a2, err := deserializePaymentAttemptInfo(r) + // Create and init a new payment. This time we'll check that we cannot + // register an MPP attempt if we already registered a non-MPP one. + info, attempt, _, err = genInfo() if err != nil { - return err + t.Fatalf("unable to generate htlc message: %v", err) } - return assertRouteEqual(&a.Route, &a2.Route) -} - -func checkSettleInfo(bucket *bbolt.Bucket, preimg lntypes.Preimage) error { - zero := lntypes.Preimage{} - b := bucket.Get(paymentSettleInfoKey) - switch { - case b == nil && preimg == zero: - return nil - case b == nil: - return fmt.Errorf("expected preimage not found") - case preimg == zero: - return fmt.Errorf("unexpected preimage found") + err = pControl.InitPayment(info.PaymentHash, info) + if err != nil { + t.Fatalf("unable to send htlc message: %v", err) } - var pre2 lntypes.Preimage - copy(pre2[:], b[:]) - if preimg != pre2 { - return fmt.Errorf("Preimages don't match: %x vs %x", - preimg, pre2) + attempt.Route.FinalHop().MPP = nil + _, err = pControl.RegisterAttempt(info.PaymentHash, attempt) + if err != nil { + t.Fatalf("unable to send htlc message: %v", err) } - return nil + // Attempt to register an MPP attempt, which should fail. + b = *attempt + b.AttemptID = 1 + b.Route.FinalHop().MPP = record.NewMPP( + info.Value, [32]byte{1}, + ) + + _, err = pControl.RegisterAttempt(info.PaymentHash, &b) + if err != ErrNonMPPayment { + t.Fatalf("expected ErrNonMPPayment, got: %v", err) + } } -func checkFailInfo(bucket *bbolt.Bucket, failReason *FailureReason) error { - b := bucket.Get(paymentFailInfoKey) - switch { - case b == nil && failReason == nil: - return nil - case b == nil: - return fmt.Errorf("expected fail info not found") - case failReason == nil: - return fmt.Errorf("unexpected fail info found") +// assertPaymentStatus retrieves the status of the payment referred to by hash +// and compares it with the expected state. +func assertPaymentStatus(t *testing.T, p *PaymentControl, + hash lntypes.Hash, expStatus PaymentStatus) { + + t.Helper() + + payment, err := p.FetchPayment(hash) + if expStatus == StatusUnknown && err == ErrPaymentNotInitiated { + return + } + if err != nil { + t.Fatal(err) } - failReason2 := FailureReason(b[0]) - if *failReason != failReason2 { - return fmt.Errorf("Failure infos don't match: %v vs %v", - *failReason, failReason2) + if payment.Status != expStatus { + t.Fatalf("payment status mismatch: expected %v, got %v", + expStatus, payment.Status) } +} - return nil +type htlcStatus struct { + *HTLCAttemptInfo + settle *lntypes.Preimage + failure *HTLCFailReason } -func assertPaymentInfo(t *testing.T, db *DB, hash lntypes.Hash, - c *PaymentCreationInfo, a *PaymentAttemptInfo, s lntypes.Preimage, - f *FailureReason) { +// assertPaymentInfo retrieves the payment referred to by hash and verifies the +// expected values. +func assertPaymentInfo(t *testing.T, p *PaymentControl, hash lntypes.Hash, + c *PaymentCreationInfo, f *FailureReason, a *htlcStatus) { t.Helper() - err := db.View(func(tx *bbolt.Tx) error { - payments := tx.Bucket(paymentsRootBucket) - if payments == nil && c == nil { - return nil + payment, err := p.FetchPayment(hash) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(payment.Info, c) { + t.Fatalf("PaymentCreationInfos don't match: %v vs %v", + spew.Sdump(payment.Info), spew.Sdump(c)) + } + + if f != nil { + if *payment.FailureReason != *f { + t.Fatal("unexpected failure reason") } - if payments == nil { - return fmt.Errorf("sent payments not found") + } else { + if payment.FailureReason != nil { + t.Fatal("unexpected failure reason") } + } - bucket := payments.Bucket(hash[:]) - if bucket == nil && c == nil { - return nil + if a == nil { + if len(payment.HTLCs) > 0 { + t.Fatal("expected no htlcs") } + return + } - if bucket == nil { - return fmt.Errorf("payment not found") - } + htlc := payment.HTLCs[a.AttemptID] + if err := assertRouteEqual(&htlc.Route, &a.Route); err != nil { + t.Fatal("routes do not match") + } - if err := checkPaymentCreationInfo(bucket, c); err != nil { - return err - } + if htlc.AttemptID != a.AttemptID { + t.Fatalf("unnexpected attempt ID %v, expected %v", + htlc.AttemptID, a.AttemptID) + } - if err := checkPaymentAttemptInfo(bucket, a); err != nil { - return err + if a.failure != nil { + if htlc.Failure == nil { + t.Fatalf("expected HTLC to be failed") } - if err := checkSettleInfo(bucket, s); err != nil { - return err + if htlc.Failure.Reason != *a.failure { + t.Fatalf("expected HTLC failure %v, had %v", + *a.failure, htlc.Failure.Reason) } + } else if htlc.Failure != nil { + t.Fatalf("expected no HTLC failure") + } - if err := checkFailInfo(bucket, f); err != nil { - return err + if a.settle != nil { + if htlc.Settle.Preimage != *a.settle { + t.Fatalf("Preimages don't match: %x vs %x", + htlc.Settle.Preimage, a.settle) } - return nil - }) - if err != nil { - t.Fatalf("assert payment info failed: %v", err) + } else if htlc.Settle != nil { + t.Fatal("expected no settle info") } - } diff --git a/channeldb/payments.go b/channeldb/payments.go index 00d29366d0..7c5f49a2af 100644 --- a/channeldb/payments.go +++ b/channeldb/payments.go @@ -3,17 +3,17 @@ package channeldb import ( "bytes" "encoding/binary" - "errors" "fmt" "io" + "math" "sort" "time" - "github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/wire" - "github.com/coreos/bbolt" + "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/record" "github.com/lightningnetwork/lnd/routing/route" "github.com/lightningnetwork/lnd/tlv" ) @@ -31,9 +31,19 @@ var ( // |-- // | |--sequence-key: // | |--creation-info-key: - // | |--attempt-info-key: - // | |--settle-info-key: - // | |--fail-info-key: + // | |--fail-info-key: <(optional) fail info> + // | | + // | |--payment-htlcs-bucket (shard-bucket) + // | | | + // | | |-- + // | | | |--htlc-attempt-info-key: + // | | | |--htlc-settle-info-key: <(optional) settle info> + // | | | |--htlc-fail-info-key: <(optional) fail info> + // | | | + // | | |-- + // | | | | + // | | ... ... + // | | // | | // | |--duplicate-bucket (only for old, completed payments) // | | @@ -55,13 +65,6 @@ var ( // paymentsRootBucket = []byte("payments-root-bucket") - // paymentDublicateBucket is the name of a optional sub-bucket within - // the payment hash bucket, that is used to hold duplicate payments to - // a payment hash. This is needed to support information from earlier - // versions of lnd, where it was possible to pay to a payment hash more - // than once. - paymentDuplicateBucket = []byte("payment-duplicate-bucket") - // paymentSequenceKey is a key used in the payment's sub-bucket to // store the sequence number of the payment. paymentSequenceKey = []byte("payment-sequence-key") @@ -70,14 +73,21 @@ var ( // store the creation info of the payment. paymentCreationInfoKey = []byte("payment-creation-info") - // paymentAttemptInfoKey is a key used in the payment's sub-bucket to - // store the info about the latest attempt that was done for the - // payment in question. - paymentAttemptInfoKey = []byte("payment-attempt-info") + // paymentHtlcsBucket is a bucket where we'll store the information + // about the HTLCs that were attempted for a payment. + paymentHtlcsBucket = []byte("payment-htlcs-bucket") - // paymentSettleInfoKey is a key used in the payment's sub-bucket to - // store the settle info of the payment. - paymentSettleInfoKey = []byte("payment-settle-info") + // htlcAttemptInfoKey is a key used in a HTLC's sub-bucket to store the + // info about the attempt that was done for the HTLC in question. + htlcAttemptInfoKey = []byte("htlc-attempt-info") + + // htlcSettleInfoKey is a key used in a HTLC's sub-bucket to store the + // settle info, if any. + htlcSettleInfoKey = []byte("htlc-settle-info") + + // htlcFailInfoKey is a key used in a HTLC's sub-bucket to store + // failure information, if any. + htlcFailInfoKey = []byte("htlc-fail-info") // paymentFailInfoKey is a key used in the payment's sub-bucket to // store information about the reason a payment failed. @@ -100,9 +110,13 @@ const ( // payment. FailureReasonError FailureReason = 2 - // FailureReasonIncorrectPaymentDetails indicates that either the hash - // is unknown or the final cltv delta or amount is incorrect. - FailureReasonIncorrectPaymentDetails FailureReason = 3 + // FailureReasonPaymentDetails indicates that either the hash is unknown + // or the final cltv delta or amount is incorrect. + FailureReasonPaymentDetails FailureReason = 3 + + // FailureReasonInsufficientBalance indicates that we didn't have enough + // balance to complete the payment. + FailureReasonInsufficientBalance FailureReason = 4 // TODO(halseth): cancel state. @@ -110,7 +124,12 @@ const ( // LocalLiquidityInsufficient, RemoteCapacityInsufficient. ) -// String returns a human readable FailureReason +// Error returns a human readable error string for the FailureReason. +func (r FailureReason) Error() string { + return r.String() +} + +// String returns a human readable FailureReason. func (r FailureReason) String() string { switch r { case FailureReasonTimeout: @@ -119,8 +138,10 @@ func (r FailureReason) String() string { return "no_route" case FailureReasonError: return "error" - case FailureReasonIncorrectPaymentDetails: + case FailureReasonPaymentDetails: return "incorrect_payment_details" + case FailureReasonInsufficientBalance: + return "insufficient_balance" } return "unknown" @@ -147,27 +168,6 @@ const ( StatusFailed PaymentStatus = 3 ) -// Bytes returns status as slice of bytes. -func (ps PaymentStatus) Bytes() []byte { - return []byte{byte(ps)} -} - -// FromBytes sets status from slice of bytes. -func (ps *PaymentStatus) FromBytes(status []byte) error { - if len(status) != 1 { - return errors.New("payment status is empty") - } - - switch PaymentStatus(status[0]) { - case StatusUnknown, StatusInFlight, StatusSucceeded, StatusFailed: - *ps = PaymentStatus(status[0]) - default: - return errors.New("unknown payment status") - } - - return nil -} - // String returns readable representation of payment status. func (ps PaymentStatus) String() string { switch ps { @@ -193,76 +193,27 @@ type PaymentCreationInfo struct { // Value is the amount we are paying. Value lnwire.MilliSatoshi - // CreatingDate is the time when this payment was initiated. - CreationDate time.Time + // CreationTime is the time when this payment was initiated. + CreationTime time.Time // PaymentRequest is the full payment request, if any. PaymentRequest []byte } -// PaymentAttemptInfo contains information about a specific payment attempt for -// a given payment. This information is used by the router to handle any errors -// coming back after an attempt is made, and to query the switch about the -// status of a payment. For settled payment this will be the information for -// the succeeding payment attempt. -type PaymentAttemptInfo struct { - // PaymentID is the unique ID used for this attempt. - PaymentID uint64 - - // SessionKey is the ephemeral key used for this payment attempt. - SessionKey *btcec.PrivateKey - - // Route is the route attempted to send the HTLC. - Route route.Route -} - -// Payment is a wrapper around a payment's PaymentCreationInfo, -// PaymentAttemptInfo, and preimage. All payments will have the -// PaymentCreationInfo set, the PaymentAttemptInfo will be set only if at least -// one payment attempt has been made, while only completed payments will have a -// non-zero payment preimage. -type Payment struct { - // sequenceNum is a unique identifier used to sort the payments in - // order of creation. - sequenceNum uint64 - - // Status is the current PaymentStatus of this payment. - Status PaymentStatus - - // Info holds all static information about this payment, and is - // populated when the payment is initiated. - Info *PaymentCreationInfo - - // Attempt is the information about the last payment attempt made. - // - // NOTE: Can be nil if no attempt is yet made. - Attempt *PaymentAttemptInfo - - // PaymentPreimage is the preimage of a successful payment. This serves - // as a proof of payment. It will only be non-nil for settled payments. - // - // NOTE: Can be nil if payment is not settled. - PaymentPreimage *lntypes.Preimage - - // Failure is a failure reason code indicating the reason the payment - // failed. It is only non-nil for failed payments. - // - // NOTE: Can be nil if payment is not failed. - Failure *FailureReason -} - // FetchPayments returns all sent payments found in the DB. -func (db *DB) FetchPayments() ([]*Payment, error) { - var payments []*Payment +// +// nolint: dupl +func (db *DB) FetchPayments() ([]*MPPayment, error) { + var payments []*MPPayment - err := db.View(func(tx *bbolt.Tx) error { - paymentsBucket := tx.Bucket(paymentsRootBucket) + err := kvdb.View(db, func(tx kvdb.ReadTx) error { + paymentsBucket := tx.ReadBucket(paymentsRootBucket) if paymentsBucket == nil { return nil } return paymentsBucket.ForEach(func(k, v []byte) error { - bucket := paymentsBucket.Bucket(k) + bucket := paymentsBucket.NestedReadBucket(k) if bucket == nil { // We only expect sub-buckets to be found in // this top-level bucket. @@ -281,28 +232,13 @@ func (db *DB) FetchPayments() ([]*Payment, error) { // payment has was possible. These will be found in a // sub-bucket indexed by their sequence number if // available. - dup := bucket.Bucket(paymentDuplicateBucket) - if dup == nil { - return nil + duplicatePayments, err := fetchDuplicatePayments(bucket) + if err != nil { + return err } - return dup.ForEach(func(k, v []byte) error { - subBucket := dup.Bucket(k) - if subBucket == nil { - // We one bucket for each duplicate to - // be found. - return fmt.Errorf("non bucket element" + - "in duplicate bucket") - } - - p, err := fetchPayment(subBucket) - if err != nil { - return err - } - - payments = append(payments, p) - return nil - }) + payments = append(payments, duplicatePayments...) + return nil }) }) if err != nil { @@ -311,81 +247,328 @@ func (db *DB) FetchPayments() ([]*Payment, error) { // Before returning, sort the payments by their sequence number. sort.Slice(payments, func(i, j int) bool { - return payments[i].sequenceNum < payments[j].sequenceNum + return payments[i].SequenceNum < payments[j].SequenceNum }) return payments, nil } -func fetchPayment(bucket *bbolt.Bucket) (*Payment, error) { - var ( - err error - p = &Payment{} - ) +func fetchCreationInfo(bucket kvdb.ReadBucket) (*PaymentCreationInfo, error) { + b := bucket.Get(paymentCreationInfoKey) + if b == nil { + return nil, fmt.Errorf("creation info not found") + } + r := bytes.NewReader(b) + return deserializePaymentCreationInfo(r) +} + +func fetchPayment(bucket kvdb.ReadBucket) (*MPPayment, error) { seqBytes := bucket.Get(paymentSequenceKey) if seqBytes == nil { return nil, fmt.Errorf("sequence number not found") } - p.sequenceNum = binary.BigEndian.Uint64(seqBytes) - - // Get the payment status. - p.Status = fetchPaymentStatus(bucket) + sequenceNum := binary.BigEndian.Uint64(seqBytes) // Get the PaymentCreationInfo. - b := bucket.Get(paymentCreationInfoKey) - if b == nil { - return nil, fmt.Errorf("creation info not found") - } - - r := bytes.NewReader(b) - p.Info, err = deserializePaymentCreationInfo(r) + creationInfo, err := fetchCreationInfo(bucket) if err != nil { return nil, err } - // Get the PaymentAttemptInfo. This can be unset. - b = bucket.Get(paymentAttemptInfoKey) - if b != nil { - r = bytes.NewReader(b) - p.Attempt, err = deserializePaymentAttemptInfo(r) + var htlcs []HTLCAttempt + htlcsBucket := bucket.NestedReadBucket(paymentHtlcsBucket) + if htlcsBucket != nil { + // Get the payment attempts. This can be empty. + htlcs, err = fetchHtlcAttempts(htlcsBucket) if err != nil { return nil, err } } - // Get the payment preimage. This is only found for - // completed payments. - b = bucket.Get(paymentSettleInfoKey) - if b != nil { - var preimg lntypes.Preimage - copy(preimg[:], b[:]) - p.PaymentPreimage = &preimg - } - // Get failure reason if available. - b = bucket.Get(paymentFailInfoKey) + var failureReason *FailureReason + b := bucket.Get(paymentFailInfoKey) if b != nil { reason := FailureReason(b[0]) - p.Failure = &reason + failureReason = &reason + } + + // Go through all HTLCs for this payment, noting whether we have any + // settled HTLC, and any still in-flight. + var inflight, settled bool + for _, h := range htlcs { + if h.Failure != nil { + continue + } + + if h.Settle != nil { + settled = true + continue + } + + // If any of the HTLCs are not failed nor settled, we + // still have inflight HTLCs. + inflight = true + } + + // Use the DB state to determine the status of the payment. + var paymentStatus PaymentStatus + + switch { + + // If any of the the HTLCs did succeed and there are no HTLCs in + // flight, the payment succeeded. + case !inflight && settled: + paymentStatus = StatusSucceeded + + // If we have no in-flight HTLCs, and the payment failure is set, the + // payment is considered failed. + case !inflight && failureReason != nil: + paymentStatus = StatusFailed + + // Otherwise it is still in flight. + default: + paymentStatus = StatusInFlight + } + + return &MPPayment{ + SequenceNum: sequenceNum, + Info: creationInfo, + HTLCs: htlcs, + FailureReason: failureReason, + Status: paymentStatus, + }, nil +} + +// fetchHtlcAttempts retrives all htlc attempts made for the payment found in +// the given bucket. +func fetchHtlcAttempts(bucket kvdb.ReadBucket) ([]HTLCAttempt, error) { + htlcs := make([]HTLCAttempt, 0) + + err := bucket.ForEach(func(k, _ []byte) error { + aid := byteOrder.Uint64(k) + htlcBucket := bucket.NestedReadBucket(k) + + attemptInfo, err := fetchHtlcAttemptInfo( + htlcBucket, + ) + if err != nil { + return err + } + attemptInfo.AttemptID = aid + + htlc := HTLCAttempt{ + HTLCAttemptInfo: *attemptInfo, + } + + // Settle info might be nil. + htlc.Settle, err = fetchHtlcSettleInfo(htlcBucket) + if err != nil { + return err + } + + // Failure info might be nil. + htlc.Failure, err = fetchHtlcFailInfo(htlcBucket) + if err != nil { + return err + } + + htlcs = append(htlcs, htlc) + return nil + }) + if err != nil { + return nil, err + } + + return htlcs, nil +} + +// fetchHtlcAttemptInfo fetches the payment attempt info for this htlc from the +// bucket. +func fetchHtlcAttemptInfo(bucket kvdb.ReadBucket) (*HTLCAttemptInfo, error) { + b := bucket.Get(htlcAttemptInfoKey) + if b == nil { + return nil, errNoAttemptInfo } - return p, nil + r := bytes.NewReader(b) + return deserializeHTLCAttemptInfo(r) +} + +// fetchHtlcSettleInfo retrieves the settle info for the htlc. If the htlc isn't +// settled, nil is returned. +func fetchHtlcSettleInfo(bucket kvdb.ReadBucket) (*HTLCSettleInfo, error) { + b := bucket.Get(htlcSettleInfoKey) + if b == nil { + // Settle info is optional. + return nil, nil + } + + r := bytes.NewReader(b) + return deserializeHTLCSettleInfo(r) +} + +// fetchHtlcFailInfo retrieves the failure info for the htlc. If the htlc hasn't +// failed, nil is returned. +func fetchHtlcFailInfo(bucket kvdb.ReadBucket) (*HTLCFailInfo, error) { + b := bucket.Get(htlcFailInfoKey) + if b == nil { + // Fail info is optional. + return nil, nil + } + + r := bytes.NewReader(b) + return deserializeHTLCFailInfo(r) +} + +// PaymentsQuery represents a query to the payments database starting or ending +// at a certain offset index. The number of retrieved records can be limited. +type PaymentsQuery struct { + // IndexOffset determines the starting point of the payments query and + // is always exclusive. In normal order, the query starts at the next + // higher (available) index compared to IndexOffset. In reversed order, + // the query ends at the next lower (available) index compared to the + // IndexOffset. In the case of a zero index_offset, the query will start + // with the oldest payment when paginating forwards, or will end with + // the most recent payment when paginating backwards. + IndexOffset uint64 + + // MaxPayments is the maximal number of payments returned in the + // payments query. + MaxPayments uint64 + + // Reversed gives a meaning to the IndexOffset. If reversed is set to + // true, the query will fetch payments with indices lower than the + // IndexOffset, otherwise, it will return payments with indices greater + // than the IndexOffset. + Reversed bool + + // If IncludeIncomplete is true, then return payments that have not yet + // fully completed. This means that pending payments, as well as failed + // payments will show up if this field is set to true. + IncludeIncomplete bool +} + +// PaymentsResponse contains the result of a query to the payments database. +// It includes the set of payments that match the query and integers which +// represent the index of the first and last item returned in the series of +// payments. These integers allow callers to resume their query in the event +// that the query's response exceeds the max number of returnable events. +type PaymentsResponse struct { + // Payments is the set of payments returned from the database for the + // PaymentsQuery. + Payments []MPPayment + + // FirstIndexOffset is the index of the first element in the set of + // returned MPPayments. Callers can use this to resume their query + // in the event that the slice has too many events to fit into a single + // response. The offset can be used to continue reverse pagination. + FirstIndexOffset uint64 + + // LastIndexOffset is the index of the last element in the set of + // returned MPPayments. Callers can use this to resume their query + // in the event that the slice has too many events to fit into a single + // response. The offset can be used to continue forward pagination. + LastIndexOffset uint64 +} + +// QueryPayments is a query to the payments database which is restricted +// to a subset of payments by the payments query, containing an offset +// index and a maximum number of returned payments. +func (db *DB) QueryPayments(query PaymentsQuery) (PaymentsResponse, error) { + var resp PaymentsResponse + + allPayments, err := db.FetchPayments() + if err != nil { + return resp, err + } + + if len(allPayments) == 0 { + return resp, nil + } + + indexExclusiveLimit := query.IndexOffset + // In backward pagination, if the index limit is the default 0 value, + // we set our limit to maxint to include all payments from the highest + // sequence number on. + if query.Reversed && indexExclusiveLimit == 0 { + indexExclusiveLimit = math.MaxInt64 + } + + for i := range allPayments { + var payment *MPPayment + + // If we have the max number of payments we want, exit. + if uint64(len(resp.Payments)) == query.MaxPayments { + break + } + + if query.Reversed { + payment = allPayments[len(allPayments)-1-i] + + // In the reversed direction, skip over all payments + // that have sequence numbers greater than or equal to + // the index offset. We skip payments with equal index + // because the offset is exclusive. + if payment.SequenceNum >= indexExclusiveLimit { + continue + } + } else { + payment = allPayments[i] + + // In the forward direction, skip over all payments that + // have sequence numbers less than or equal to the index + // offset. We skip payments with equal indexes because + // the index offset is exclusive. + if payment.SequenceNum <= indexExclusiveLimit { + continue + } + } + + // To keep compatibility with the old API, we only return + // non-succeeded payments if requested. + if payment.Status != StatusSucceeded && + !query.IncludeIncomplete { + + continue + } + + resp.Payments = append(resp.Payments, *payment) + } + + // Need to swap the payments slice order if reversed order. + if query.Reversed { + for l, r := 0, len(resp.Payments)-1; l < r; l, r = l+1, r-1 { + resp.Payments[l], resp.Payments[r] = + resp.Payments[r], resp.Payments[l] + } + } + + // Set the first and last index of the returned payments so that the + // caller can resume from this point later on. + if len(resp.Payments) > 0 { + resp.FirstIndexOffset = resp.Payments[0].SequenceNum + resp.LastIndexOffset = + resp.Payments[len(resp.Payments)-1].SequenceNum + } + + return resp, err } // DeletePayments deletes all completed and failed payments from the DB. func (db *DB) DeletePayments() error { - return db.Update(func(tx *bbolt.Tx) error { - payments := tx.Bucket(paymentsRootBucket) + return kvdb.Update(db, func(tx kvdb.RwTx) error { + payments := tx.ReadWriteBucket(paymentsRootBucket) if payments == nil { return nil } var deleteBuckets [][]byte err := payments.ForEach(func(k, _ []byte) error { - bucket := payments.Bucket(k) + bucket := payments.NestedReadWriteBucket(k) if bucket == nil { // We only expect sub-buckets to be found in // this top-level bucket. @@ -395,7 +578,13 @@ func (db *DB) DeletePayments() error { // If the status is InFlight, we cannot safely delete // the payment information, so we return early. - paymentStatus := fetchPaymentStatus(bucket) + paymentStatus, err := fetchPaymentStatus(bucket) + if err != nil { + return err + } + + // If the status is InFlight, we cannot safely delete + // the payment information, so we return early. if paymentStatus == StatusInFlight { return nil } @@ -408,7 +597,7 @@ func (db *DB) DeletePayments() error { } for _, k := range deleteBuckets { - if err := payments.DeleteBucket(k); err != nil { + if err := payments.DeleteNestedBucket(k); err != nil { return err } } @@ -417,6 +606,7 @@ func (db *DB) DeletePayments() error { }) } +// nolint: dupl func serializePaymentCreationInfo(w io.Writer, c *PaymentCreationInfo) error { var scratch [8]byte @@ -429,8 +619,7 @@ func serializePaymentCreationInfo(w io.Writer, c *PaymentCreationInfo) error { return err } - byteOrder.PutUint64(scratch[:], uint64(c.CreationDate.Unix())) - if _, err := w.Write(scratch[:]); err != nil { + if err := serializeTime(w, c.CreationTime); err != nil { return err } @@ -460,10 +649,11 @@ func deserializePaymentCreationInfo(r io.Reader) (*PaymentCreationInfo, error) { } c.Value = lnwire.MilliSatoshi(byteOrder.Uint64(scratch[:])) - if _, err := io.ReadFull(r, scratch[:]); err != nil { + creationTime, err := deserializeTime(r) + if err != nil { return nil, err } - c.CreationDate = time.Unix(int64(byteOrder.Uint64(scratch[:])), 0) + c.CreationTime = creationTime if _, err := io.ReadFull(r, scratch[:4]); err != nil { return nil, err @@ -472,7 +662,7 @@ func deserializePaymentCreationInfo(r io.Reader) (*PaymentCreationInfo, error) { reqLen := uint32(byteOrder.Uint32(scratch[:4])) payReq := make([]byte, reqLen) if reqLen > 0 { - if _, err := io.ReadFull(r, payReq[:]); err != nil { + if _, err := io.ReadFull(r, payReq); err != nil { return nil, err } } @@ -481,8 +671,8 @@ func deserializePaymentCreationInfo(r io.Reader) (*PaymentCreationInfo, error) { return c, nil } -func serializePaymentAttemptInfo(w io.Writer, a *PaymentAttemptInfo) error { - if err := WriteElements(w, a.PaymentID, a.SessionKey); err != nil { +func serializeHTLCAttemptInfo(w io.Writer, a *HTLCAttemptInfo) error { + if err := WriteElements(w, a.SessionKey); err != nil { return err } @@ -490,12 +680,12 @@ func serializePaymentAttemptInfo(w io.Writer, a *PaymentAttemptInfo) error { return err } - return nil + return serializeTime(w, a.AttemptTime) } -func deserializePaymentAttemptInfo(r io.Reader) (*PaymentAttemptInfo, error) { - a := &PaymentAttemptInfo{} - err := ReadElements(r, &a.PaymentID, &a.SessionKey) +func deserializeHTLCAttemptInfo(r io.Reader) (*HTLCAttemptInfo, error) { + a := &HTLCAttemptInfo{} + err := ReadElements(r, &a.SessionKey) if err != nil { return nil, err } @@ -503,12 +693,20 @@ func deserializePaymentAttemptInfo(r io.Reader) (*PaymentAttemptInfo, error) { if err != nil { return nil, err } + + a.AttemptTime, err = deserializeTime(r) + if err != nil { + return nil, err + } + return a, nil } func serializeHop(w io.Writer, h *route.Hop) error { if err := WriteElements(w, - h.PubKeyBytes[:], h.ChannelID, h.OutgoingTimeLock, + h.PubKeyBytes[:], + h.ChannelID, + h.OutgoingTimeLock, h.AmtToForward, ); err != nil { return err @@ -525,10 +723,34 @@ func serializeHop(w io.Writer, h *route.Hop) error { return WriteElements(w, uint32(0)) } + // Gather all non-primitive TLV records so that they can be serialized + // as a single blob. + // + // TODO(conner): add migration to unify all fields in a single TLV + // blobs. The split approach will cause headaches down the road as more + // fields are added, which we can avoid by having a single TLV stream + // for all payload fields. + var records []tlv.Record + if h.MPP != nil { + records = append(records, h.MPP.Record()) + } + + // Final sanity check to absolutely rule out custom records that are not + // custom and write into the standard range. + if err := h.CustomRecords.Validate(); err != nil { + return err + } + + // Convert custom records to tlv and add to the record list. + // MapToRecords sorts the list, so adding it here will keep the list + // canonical. + tlvRecords := tlv.MapToRecords(h.CustomRecords) + records = append(records, tlvRecords...) + // Otherwise, we'll transform our slice of records into a map of the // raw bytes, then serialize them in-line with a length (number of // elements) prefix. - mapRecords, err := tlv.RecordsToMap(h.TLVRecords) + mapRecords, err := tlv.RecordsToMap(records) if err != nil { return err } @@ -604,12 +826,30 @@ func deserializeHop(r io.Reader) (*route.Hop, error) { tlvMap[tlvType] = rawRecordBytes } - tlvRecords, err := tlv.MapToRecords(tlvMap) - if err != nil { - return nil, err + // If the MPP type is present, remove it from the generic TLV map and + // parse it back into a proper MPP struct. + // + // TODO(conner): add migration to unify all fields in a single TLV + // blobs. The split approach will cause headaches down the road as more + // fields are added, which we can avoid by having a single TLV stream + // for all payload fields. + mppType := uint64(record.MPPOnionType) + if mppBytes, ok := tlvMap[mppType]; ok { + delete(tlvMap, mppType) + + var ( + mpp = &record.MPP{} + mppRec = mpp.Record() + r = bytes.NewReader(mppBytes) + ) + err := mppRec.Decode(r, uint64(len(mppBytes))) + if err != nil { + return nil, err + } + h.MPP = mpp } - h.TLVRecords = tlvRecords + h.CustomRecords = tlvMap return h, nil } diff --git a/channeldb/payments_test.go b/channeldb/payments_test.go index 8cc036fc84..1c6f09e694 100644 --- a/channeldb/payments_test.go +++ b/channeldb/payments_test.go @@ -2,8 +2,8 @@ package channeldb import ( "bytes" - "errors" "fmt" + "math" "math/rand" "reflect" "testing" @@ -11,8 +11,9 @@ import ( "github.com/btcsuite/btcd/btcec" "github.com/davecgh/go-spew/spew" + "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/lntypes" - "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/record" "github.com/lightningnetwork/lnd/routing/route" "github.com/lightningnetwork/lnd/tlv" ) @@ -28,10 +29,11 @@ var ( ChannelID: 12345, OutgoingTimeLock: 111, AmtToForward: 555, - TLVRecords: []tlv.Record{ - tlv.MakeStaticRecord(1, nil, 3, tlvEncoder, nil), - tlv.MakeStaticRecord(2, nil, 3, tlvEncoder, nil), + CustomRecords: record.CustomSet{ + 65536: []byte{}, + 80001: []byte{}, }, + MPP: record.NewMPP(32, [32]byte{0x42}), } testHop2 = &route.Hop{ @@ -47,41 +49,13 @@ var ( TotalAmount: 1234567, SourcePubKey: route.NewVertex(pub), Hops: []*route.Hop{ - testHop1, testHop2, + testHop1, }, } ) -func makeFakePayment() *outgoingPayment { - fakeInvoice := &Invoice{ - // Use single second precision to avoid false positive test - // failures due to the monotonic time component. - CreationDate: time.Unix(time.Now().Unix(), 0), - Memo: []byte("fake memo"), - Receipt: []byte("fake receipt"), - PaymentRequest: []byte(""), - } - - copy(fakeInvoice.Terms.PaymentPreimage[:], rev[:]) - fakeInvoice.Terms.Value = lnwire.NewMSatFromSatoshis(10000) - - fakePath := make([][33]byte, 3) - for i := 0; i < 3; i++ { - copy(fakePath[i][:], bytes.Repeat([]byte{byte(i)}, 33)) - } - - fakePayment := &outgoingPayment{ - Invoice: *fakeInvoice, - Fee: 101, - Path: fakePath, - TimeLockLength: 1000, - } - copy(fakePayment.PaymentPreimage[:], rev[:]) - return fakePayment -} - -func makeFakeInfo() (*PaymentCreationInfo, *PaymentAttemptInfo) { +func makeFakeInfo() (*PaymentCreationInfo, *HTLCAttemptInfo) { var preimg lntypes.Preimage copy(preimg[:], rev[:]) @@ -90,14 +64,15 @@ func makeFakeInfo() (*PaymentCreationInfo, *PaymentAttemptInfo) { Value: 1000, // Use single second precision to avoid false positive test // failures due to the monotonic time component. - CreationDate: time.Unix(time.Now().Unix(), 0), + CreationTime: time.Unix(time.Now().Unix(), 0), PaymentRequest: []byte(""), } - a := &PaymentAttemptInfo{ - PaymentID: 44, - SessionKey: priv, - Route: testRoute, + a := &HTLCAttemptInfo{ + AttemptID: 44, + SessionKey: priv, + Route: testRoute, + AttemptTime: time.Unix(100, 0), } return c, a } @@ -114,58 +89,6 @@ func randomBytes(minLen, maxLen int) ([]byte, error) { return randBuf, nil } -func makeRandomFakePayment() (*outgoingPayment, error) { - var err error - fakeInvoice := &Invoice{ - // Use single second precision to avoid false positive test - // failures due to the monotonic time component. - CreationDate: time.Unix(time.Now().Unix(), 0), - } - - fakeInvoice.Memo, err = randomBytes(1, 50) - if err != nil { - return nil, err - } - - fakeInvoice.Receipt, err = randomBytes(1, 50) - if err != nil { - return nil, err - } - - fakeInvoice.PaymentRequest, err = randomBytes(1, 50) - if err != nil { - return nil, err - } - - preImg, err := randomBytes(32, 33) - if err != nil { - return nil, err - } - copy(fakeInvoice.Terms.PaymentPreimage[:], preImg) - - fakeInvoice.Terms.Value = lnwire.MilliSatoshi(rand.Intn(10000)) - - fakePathLen := 1 + rand.Intn(5) - fakePath := make([][33]byte, fakePathLen) - for i := 0; i < fakePathLen; i++ { - b, err := randomBytes(33, 34) - if err != nil { - return nil, err - } - copy(fakePath[i][:], b) - } - - fakePayment := &outgoingPayment{ - Invoice: *fakeInvoice, - Fee: lnwire.MilliSatoshi(rand.Intn(1001)), - Path: fakePath, - TimeLockLength: uint32(rand.Intn(10000)), - } - copy(fakePayment.PaymentPreimage[:], fakeInvoice.Terms.PaymentPreimage[:]) - - return fakePayment, nil -} - func TestSentPaymentSerialization(t *testing.T) { t.Parallel() @@ -189,33 +112,34 @@ func TestSentPaymentSerialization(t *testing.T) { } b.Reset() - if err := serializePaymentAttemptInfo(&b, s); err != nil { + if err := serializeHTLCAttemptInfo(&b, s); err != nil { t.Fatalf("unable to serialize info: %v", err) } - newAttemptInfo, err := deserializePaymentAttemptInfo(&b) + newWireInfo, err := deserializeHTLCAttemptInfo(&b) if err != nil { t.Fatalf("unable to deserialize info: %v", err) } + newWireInfo.AttemptID = s.AttemptID // First we verify all the records match up porperly, as they aren't // able to be properly compared using reflect.DeepEqual. - err = assertRouteEqual(&s.Route, &newAttemptInfo.Route) + err = assertRouteEqual(&s.Route, &newWireInfo.Route) if err != nil { t.Fatalf("Routes do not match after "+ "serialization/deserialization: %v", err) } // Clear routes to allow DeepEqual to compare the remaining fields. - newAttemptInfo.Route = route.Route{} + newWireInfo.Route = route.Route{} s.Route = route.Route{} - if !reflect.DeepEqual(s, newAttemptInfo) { + if !reflect.DeepEqual(s, newWireInfo) { s.SessionKey.Curve = nil - newAttemptInfo.SessionKey.Curve = nil + newWireInfo.SessionKey.Curve = nil t.Fatalf("Payments do not match after "+ "serialization/deserialization %v vs %v", - spew.Sdump(s), spew.Sdump(newAttemptInfo), + spew.Sdump(s), spew.Sdump(newWireInfo), ) } } @@ -223,83 +147,14 @@ func TestSentPaymentSerialization(t *testing.T) { // assertRouteEquals compares to routes for equality and returns an error if // they are not equal. func assertRouteEqual(a, b *route.Route) error { - err := assertRouteHopRecordsEqual(a, b) - if err != nil { - return err - } - - // TLV records have already been compared and need to be cleared to - // properly compare the remaining fields using DeepEqual. - copyRouteNoHops := func(r *route.Route) *route.Route { - copy := *r - copy.Hops = make([]*route.Hop, len(r.Hops)) - for i, hop := range r.Hops { - hopCopy := *hop - hopCopy.TLVRecords = nil - copy.Hops[i] = &hopCopy - } - return © - } - - if !reflect.DeepEqual(copyRouteNoHops(a), copyRouteNoHops(b)) { - return fmt.Errorf("PaymentAttemptInfos don't match: %v vs %v", + if !reflect.DeepEqual(a, b) { + return fmt.Errorf("HTLCAttemptInfos don't match: %v vs %v", spew.Sdump(a), spew.Sdump(b)) } return nil } -func assertRouteHopRecordsEqual(r1, r2 *route.Route) error { - if len(r1.Hops) != len(r2.Hops) { - return errors.New("route hop count mismatch") - } - - for i := 0; i < len(r1.Hops); i++ { - records1 := r1.Hops[i].TLVRecords - records2 := r2.Hops[i].TLVRecords - if len(records1) != len(records2) { - return fmt.Errorf("route record count for hop %v "+ - "mismatch", i) - } - - for j := 0; j < len(records1); j++ { - expectedRecord := records1[j] - newRecord := records2[j] - - err := assertHopRecordsEqual(expectedRecord, newRecord) - if err != nil { - return fmt.Errorf("route record mismatch: %v", err) - } - } - } - - return nil -} - -func assertHopRecordsEqual(h1, h2 tlv.Record) error { - if h1.Type() != h2.Type() { - return fmt.Errorf("wrong type: expected %v, got %v", h1.Type(), - h2.Type()) - } - - var b bytes.Buffer - if err := h2.Encode(&b); err != nil { - return fmt.Errorf("unable to encode record: %v", err) - } - - if !bytes.Equal(b.Bytes(), tlvBytes) { - return fmt.Errorf("wrong raw record: expected %x, got %x", - tlvBytes, b.Bytes()) - } - - if h1.Size() != h2.Size() { - return fmt.Errorf("wrong size: expected %v, "+ - "got %v", h1.Size(), h2.Size()) - } - - return nil -} - func TestRouteSerialization(t *testing.T) { t.Parallel() @@ -322,3 +177,264 @@ func TestRouteSerialization(t *testing.T) { spew.Sdump(testRoute), spew.Sdump(route2)) } } + +// deletePayment removes a payment with paymentHash from the payments database. +func deletePayment(t *testing.T, db *DB, paymentHash lntypes.Hash) { + t.Helper() + + err := kvdb.Update(db, func(tx kvdb.RwTx) error { + payments := tx.ReadWriteBucket(paymentsRootBucket) + + err := payments.DeleteNestedBucket(paymentHash[:]) + if err != nil { + return err + } + + return nil + }) + + if err != nil { + t.Fatalf("could not delete "+ + "payment: %v", err) + } +} + +// TestQueryPayments tests retrieval of payments with forwards and reversed +// queries. +func TestQueryPayments(t *testing.T) { + // Define table driven test for QueryPayments. + // Test payments have sequence indices [1, 3, 4, 5, 6, 7]. + tests := []struct { + name string + query PaymentsQuery + firstIndex uint64 + lastIndex uint64 + + // expectedSeqNrs contains the set of sequence numbers we expect + // our query to return. + expectedSeqNrs []uint64 + }{ + { + name: "IndexOffset at the end of the payments range", + query: PaymentsQuery{ + IndexOffset: 7, + MaxPayments: 7, + Reversed: false, + IncludeIncomplete: true, + }, + firstIndex: 0, + lastIndex: 0, + expectedSeqNrs: nil, + }, + { + name: "query in forwards order, start at beginning", + query: PaymentsQuery{ + IndexOffset: 0, + MaxPayments: 2, + Reversed: false, + IncludeIncomplete: true, + }, + firstIndex: 1, + lastIndex: 3, + expectedSeqNrs: []uint64{1, 3}, + }, + { + name: "query in forwards order, start at end, overflow", + query: PaymentsQuery{ + IndexOffset: 6, + MaxPayments: 2, + Reversed: false, + IncludeIncomplete: true, + }, + firstIndex: 7, + lastIndex: 7, + expectedSeqNrs: []uint64{7}, + }, + { + name: "start at offset index outside of payments", + query: PaymentsQuery{ + IndexOffset: 20, + MaxPayments: 2, + Reversed: false, + IncludeIncomplete: true, + }, + firstIndex: 0, + lastIndex: 0, + expectedSeqNrs: nil, + }, + { + name: "overflow in forwards order", + query: PaymentsQuery{ + IndexOffset: 4, + MaxPayments: math.MaxUint64, + Reversed: false, + IncludeIncomplete: true, + }, + firstIndex: 5, + lastIndex: 7, + expectedSeqNrs: []uint64{5, 6, 7}, + }, + { + name: "start at offset index outside of payments, " + + "reversed order", + query: PaymentsQuery{ + IndexOffset: 9, + MaxPayments: 2, + Reversed: true, + IncludeIncomplete: true, + }, + firstIndex: 6, + lastIndex: 7, + expectedSeqNrs: []uint64{6, 7}, + }, + { + name: "query in reverse order, start at end", + query: PaymentsQuery{ + IndexOffset: 0, + MaxPayments: 2, + Reversed: true, + IncludeIncomplete: true, + }, + firstIndex: 6, + lastIndex: 7, + expectedSeqNrs: []uint64{6, 7}, + }, + { + name: "query in reverse order, starting in middle", + query: PaymentsQuery{ + IndexOffset: 4, + MaxPayments: 2, + Reversed: true, + IncludeIncomplete: true, + }, + firstIndex: 1, + lastIndex: 3, + expectedSeqNrs: []uint64{1, 3}, + }, + { + name: "query in reverse order, starting in middle, " + + "with underflow", + query: PaymentsQuery{ + IndexOffset: 4, + MaxPayments: 5, + Reversed: true, + IncludeIncomplete: true, + }, + firstIndex: 1, + lastIndex: 3, + expectedSeqNrs: []uint64{1, 3}, + }, + { + name: "all payments in reverse, order maintained", + query: PaymentsQuery{ + IndexOffset: 0, + MaxPayments: 7, + Reversed: true, + IncludeIncomplete: true, + }, + firstIndex: 1, + lastIndex: 7, + expectedSeqNrs: []uint64{1, 3, 4, 5, 6, 7}, + }, + { + name: "exclude incomplete payments", + query: PaymentsQuery{ + IndexOffset: 0, + MaxPayments: 7, + Reversed: false, + IncludeIncomplete: false, + }, + firstIndex: 0, + lastIndex: 0, + expectedSeqNrs: nil, + }, + { + name: "query payments at index gap", + query: PaymentsQuery{ + IndexOffset: 1, + MaxPayments: 7, + Reversed: false, + IncludeIncomplete: true, + }, + firstIndex: 3, + lastIndex: 7, + expectedSeqNrs: []uint64{3, 4, 5, 6, 7}, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + db, err := initDB() + if err != nil { + t.Fatalf("unable to init db: %v", err) + } + + // Populate the database with a set of test payments. + numberOfPayments := 7 + pControl := NewPaymentControl(db) + + for i := 0; i < numberOfPayments; i++ { + // Generate a test payment. + info, _, _, err := genInfo() + if err != nil { + t.Fatalf("unable to create test "+ + "payment: %v", err) + } + + // Create a new payment entry in the database. + err = pControl.InitPayment(info.PaymentHash, info) + if err != nil { + t.Fatalf("unable to initialize "+ + "payment in database: %v", err) + } + + // Immediately delete the payment with index 2. + if i == 1 { + deletePayment(t, db, info.PaymentHash) + } + } + + // Fetch all payments in the database. + allPayments, err := db.FetchPayments() + if err != nil { + t.Fatalf("payments could not be fetched from "+ + "database: %v", err) + } + + if len(allPayments) != 6 { + t.Fatalf("Number of payments received does not "+ + "match expected one. Got %v, want %v.", + len(allPayments), 6) + } + + querySlice, err := db.QueryPayments(tt.query) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if tt.firstIndex != querySlice.FirstIndexOffset || + tt.lastIndex != querySlice.LastIndexOffset { + t.Errorf("First or last index does not match "+ + "expected index. Want (%d, %d), got (%d, %d).", + tt.firstIndex, tt.lastIndex, + querySlice.FirstIndexOffset, + querySlice.LastIndexOffset) + } + + if len(querySlice.Payments) != len(tt.expectedSeqNrs) { + t.Errorf("expected: %v payments, got: %v", + len(allPayments), len(querySlice.Payments)) + } + + for i, seqNr := range tt.expectedSeqNrs { + q := querySlice.Payments[i] + if seqNr != q.SequenceNum { + t.Errorf("sequence numbers do not match, "+ + "got %v, want %v", q.SequenceNum, seqNr) + } + } + }) + } +} diff --git a/channeldb/waitingproof.go b/channeldb/waitingproof.go index 74b80a5159..dbaddaaec4 100644 --- a/channeldb/waitingproof.go +++ b/channeldb/waitingproof.go @@ -8,8 +8,8 @@ import ( "bytes" - "github.com/coreos/bbolt" "github.com/go-errors/errors" + "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/lnwire" ) @@ -61,12 +61,12 @@ func (s *WaitingProofStore) Add(proof *WaitingProof) error { s.mu.Lock() defer s.mu.Unlock() - err := s.db.Update(func(tx *bbolt.Tx) error { + err := kvdb.Update(s.db, func(tx kvdb.RwTx) error { var err error var b bytes.Buffer // Get or create the bucket. - bucket, err := tx.CreateBucketIfNotExists(waitingProofsBucketKey) + bucket, err := tx.CreateTopLevelBucket(waitingProofsBucketKey) if err != nil { return err } @@ -100,9 +100,9 @@ func (s *WaitingProofStore) Remove(key WaitingProofKey) error { return ErrWaitingProofNotFound } - err := s.db.Update(func(tx *bbolt.Tx) error { + err := kvdb.Update(s.db, func(tx kvdb.RwTx) error { // Get or create the top bucket. - bucket := tx.Bucket(waitingProofsBucketKey) + bucket := tx.ReadWriteBucket(waitingProofsBucketKey) if bucket == nil { return ErrWaitingProofNotFound } @@ -123,8 +123,8 @@ func (s *WaitingProofStore) Remove(key WaitingProofKey) error { // ForAll iterates thought all waiting proofs and passing the waiting proof // in the given callback. func (s *WaitingProofStore) ForAll(cb func(*WaitingProof) error) error { - return s.db.View(func(tx *bbolt.Tx) error { - bucket := tx.Bucket(waitingProofsBucketKey) + return kvdb.View(s.db, func(tx kvdb.ReadTx) error { + bucket := tx.ReadBucket(waitingProofsBucketKey) if bucket == nil { return ErrWaitingProofNotFound } @@ -158,8 +158,8 @@ func (s *WaitingProofStore) Get(key WaitingProofKey) (*WaitingProof, error) { return nil, ErrWaitingProofNotFound } - err := s.db.View(func(tx *bbolt.Tx) error { - bucket := tx.Bucket(waitingProofsBucketKey) + err := kvdb.View(s.db, func(tx kvdb.ReadTx) error { + bucket := tx.ReadBucket(waitingProofsBucketKey) if bucket == nil { return ErrWaitingProofNotFound } diff --git a/channeldb/witness_cache.go b/channeldb/witness_cache.go index 033662597b..7f52ada52a 100644 --- a/channeldb/witness_cache.go +++ b/channeldb/witness_cache.go @@ -3,7 +3,7 @@ package channeldb import ( "fmt" - "github.com/coreos/bbolt" + "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/lntypes" ) @@ -106,8 +106,8 @@ func (w *WitnessCache) addWitnessEntries(wType WitnessType, return nil } - return w.db.Batch(func(tx *bbolt.Tx) error { - witnessBucket, err := tx.CreateBucketIfNotExists(witnessBucketKey) + return kvdb.Batch(w.db.Backend, func(tx kvdb.RwTx) error { + witnessBucket, err := tx.CreateTopLevelBucket(witnessBucketKey) if err != nil { return err } @@ -150,8 +150,8 @@ func (w *WitnessCache) LookupSha256Witness(hash lntypes.Hash) (lntypes.Preimage, // will be returned. func (w *WitnessCache) lookupWitness(wType WitnessType, witnessKey []byte) ([]byte, error) { var witness []byte - err := w.db.View(func(tx *bbolt.Tx) error { - witnessBucket := tx.Bucket(witnessBucketKey) + err := kvdb.View(w.db, func(tx kvdb.ReadTx) error { + witnessBucket := tx.ReadBucket(witnessBucketKey) if witnessBucket == nil { return ErrNoWitnesses } @@ -160,7 +160,7 @@ func (w *WitnessCache) lookupWitness(wType WitnessType, witnessKey []byte) ([]by if err != nil { return err } - witnessTypeBucket := witnessBucket.Bucket(witnessTypeBucketKey) + witnessTypeBucket := witnessBucket.NestedReadBucket(witnessTypeBucketKey) if witnessTypeBucket == nil { return ErrNoWitnesses } @@ -189,8 +189,8 @@ func (w *WitnessCache) DeleteSha256Witness(hash lntypes.Hash) error { // deleteWitness attempts to delete a particular witness from the database. func (w *WitnessCache) deleteWitness(wType WitnessType, witnessKey []byte) error { - return w.db.Batch(func(tx *bbolt.Tx) error { - witnessBucket, err := tx.CreateBucketIfNotExists(witnessBucketKey) + return kvdb.Batch(w.db.Backend, func(tx kvdb.RwTx) error { + witnessBucket, err := tx.CreateTopLevelBucket(witnessBucketKey) if err != nil { return err } @@ -213,8 +213,8 @@ func (w *WitnessCache) deleteWitness(wType WitnessType, witnessKey []byte) error // DeleteWitnessClass attempts to delete an *entire* class of witnesses. After // this function return with a non-nil error, func (w *WitnessCache) DeleteWitnessClass(wType WitnessType) error { - return w.db.Batch(func(tx *bbolt.Tx) error { - witnessBucket, err := tx.CreateBucketIfNotExists(witnessBucketKey) + return kvdb.Batch(w.db.Backend, func(tx kvdb.RwTx) error { + witnessBucket, err := tx.CreateTopLevelBucket(witnessBucketKey) if err != nil { return err } @@ -224,6 +224,6 @@ func (w *WitnessCache) DeleteWitnessClass(wType WitnessType) error { return err } - return witnessBucket.DeleteBucket(witnessTypeBucketKey) + return witnessBucket.DeleteNestedBucket(witnessTypeBucketKey) }) } diff --git a/channelnotifier/channelnotifier.go b/channelnotifier/channelnotifier.go index fa6cbcddde..5d67fd51c0 100644 --- a/channelnotifier/channelnotifier.go +++ b/channelnotifier/channelnotifier.go @@ -20,6 +20,19 @@ type ChannelNotifier struct { chanDB *channeldb.DB } +// PendingOpenChannelEvent represents a new event where a new channel has +// entered a pending open state. +type PendingOpenChannelEvent struct { + // ChannelPoint is the channel outpoint for the new channel. + ChannelPoint *wire.OutPoint + + // PendingChannel is the channel configuration for the newly created + // channel. This might not have been persisted to the channel DB yet + // because we are still waiting for the final message from the remote + // peer. + PendingChannel *channeldb.OpenChannel +} + // OpenChannelEvent represents a new event where a channel goes from pending // open to open. type OpenChannelEvent struct { @@ -27,6 +40,13 @@ type OpenChannelEvent struct { Channel *channeldb.OpenChannel } +// ActiveLinkEvent represents a new event where the link becomes active in the +// switch. This happens before the ActiveChannelEvent. +type ActiveLinkEvent struct { + // ChannelPoint is the channel point for the newly active channel. + ChannelPoint *wire.OutPoint +} + // ActiveChannelEvent represents a new event where a channel becomes active. type ActiveChannelEvent struct { // ChannelPoint is the channelpoint for the newly active channel. @@ -73,11 +93,33 @@ func (c *ChannelNotifier) Stop() { } // SubscribeChannelEvents returns a subscribe.Client that will receive updates -// any time the Server is made aware of a new event. +// any time the Server is made aware of a new event. The subscription provides +// channel events from the point of subscription onwards. +// +// TODO(carlaKC): update to allow subscriptions to specify a block height from +// which we would like to subscribe to events. func (c *ChannelNotifier) SubscribeChannelEvents() (*subscribe.Client, error) { return c.ntfnServer.Subscribe() } +// NotifyPendingOpenChannelEvent notifies the channelEventNotifier goroutine +// that a new channel is pending. The pending channel is passed as a parameter +// instead of read from the database because it might not yet have been +// persisted to the DB because we still wait for the final message from the +// remote peer. +func (c *ChannelNotifier) NotifyPendingOpenChannelEvent(chanPoint wire.OutPoint, + pendingChan *channeldb.OpenChannel) { + + event := PendingOpenChannelEvent{ + ChannelPoint: &chanPoint, + PendingChannel: pendingChan, + } + + if err := c.ntfnServer.SendUpdate(event); err != nil { + log.Warnf("Unable to send pending open channel update: %v", err) + } +} + // NotifyOpenChannelEvent notifies the channelEventNotifier goroutine that a // channel has gone from pending open to open. func (c *ChannelNotifier) NotifyOpenChannelEvent(chanPoint wire.OutPoint) { @@ -111,6 +153,15 @@ func (c *ChannelNotifier) NotifyClosedChannelEvent(chanPoint wire.OutPoint) { } } +// NotifyActiveLinkEvent notifies the channelEventNotifier goroutine that a +// link has been added to the switch. +func (c *ChannelNotifier) NotifyActiveLinkEvent(chanPoint wire.OutPoint) { + event := ActiveLinkEvent{ChannelPoint: &chanPoint} + if err := c.ntfnServer.SendUpdate(event); err != nil { + log.Warnf("Unable to send active link update: %v", err) + } +} + // NotifyActiveChannelEvent notifies the channelEventNotifier goroutine that a // channel is active. func (c *ChannelNotifier) NotifyActiveChannelEvent(chanPoint wire.OutPoint) { diff --git a/chanrestore.go b/chanrestore.go index 6196aed587..c42f0de37c 100644 --- a/chanrestore.go +++ b/chanrestore.go @@ -2,9 +2,11 @@ package lnd import ( "fmt" + "math" "net" "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/lightningnetwork/lnd/chanbackup" "github.com/lightningnetwork/lnd/channeldb" @@ -14,6 +16,18 @@ import ( "github.com/lightningnetwork/lnd/shachain" ) +const ( + // mainnetSCBLaunchBlock is the approximate block height of the bitcoin + // mainnet chain of the date when SCBs first were released in lnd + // (v0.6.0-beta). The block date is 4/15/2019, 10:54 PM UTC. + mainnetSCBLaunchBlock = 571800 + + // testnetSCBLaunchBlock is the approximate block height of the bitcoin + // testnet3 chain of the date when SCBs first were released in lnd + // (v0.6.0-beta). The block date is 4/16/2019, 08:04 AM UTC. + testnetSCBLaunchBlock = 1489300 +) + // chanDBRestorer is an implementation of the chanbackup.ChannelRestorer // interface that is able to properly map a Single backup, into a // channeldb.ChannelShell which is required to fully restore a channel. We also @@ -87,15 +101,22 @@ func (c *chanDBRestorer) openChannelShell(backup chanbackup.Single) ( switch backup.Version { case chanbackup.DefaultSingleVersion: - chanType = channeldb.SingleFunder + chanType = channeldb.SingleFunderBit case chanbackup.TweaklessCommitVersion: - chanType = channeldb.SingleFunderTweakless + chanType = channeldb.SingleFunderTweaklessBit + + case chanbackup.AnchorsCommitVersion: + chanType = channeldb.AnchorOutputsBit + chanType |= channeldb.SingleFunderTweaklessBit default: return nil, fmt.Errorf("unknown Single version: %v", err) } + ltndLog.Infof("SCB Recovery: created channel shell for ChannelPoint(%v), "+ + "chan_type=%v", backup.FundingOutpoint, chanType) + chanShell := channeldb.ChannelShell{ NodeAddrs: backup.Addresses, Chan: &channeldb.OpenChannel{ @@ -126,15 +147,72 @@ func (c *chanDBRestorer) openChannelShell(backup chanbackup.Single) ( // NOTE: Part of the chanbackup.ChannelRestorer interface. func (c *chanDBRestorer) RestoreChansFromSingles(backups ...chanbackup.Single) error { channelShells := make([]*channeldb.ChannelShell, 0, len(backups)) + firstChanHeight := uint32(math.MaxUint32) for _, backup := range backups { chanShell, err := c.openChannelShell(backup) if err != nil { return err } + // Find the block height of the earliest channel in this backup. + chanHeight := chanShell.Chan.ShortChanID().BlockHeight + if chanHeight != 0 && chanHeight < firstChanHeight { + firstChanHeight = chanHeight + } + channelShells = append(channelShells, chanShell) } + // In case there were only unconfirmed channels, we will have to scan + // the chain beginning from the launch date of SCBs. + if firstChanHeight == math.MaxUint32 { + chainHash := channelShells[0].Chan.ChainHash + switch { + case chainHash.IsEqual(chaincfg.MainNetParams.GenesisHash): + firstChanHeight = mainnetSCBLaunchBlock + + case chainHash.IsEqual(chaincfg.TestNet3Params.GenesisHash): + firstChanHeight = testnetSCBLaunchBlock + + default: + // Worst case: We have no height hint and start at + // block 1. Should only happen for SCBs in regtest, + // simnet and litecoin. + firstChanHeight = 1 + } + } + + // If there were channels in the backup that were not confirmed at the + // time of the backup creation, they won't have a block height in the + // ShortChanID which would lead to an error in the chain watcher. + // We want to at least set the funding broadcast height that the chain + // watcher can use instead. We have two possible fallback values for + // the broadcast height that we are going to try here. + for _, chanShell := range channelShells { + channel := chanShell.Chan + + switch { + // Fallback case 1: It is extremely unlikely at this point that + // a channel we are trying to restore has a coinbase funding TX. + // Therefore we can be quite certain that if the TxIndex is + // zero, it was an unconfirmed channel where we used the + // BlockHeight to encode the funding TX broadcast height. To not + // end up with an invalid short channel ID that looks valid, we + // restore the "original" unconfirmed one here. + case channel.ShortChannelID.TxIndex == 0: + broadcastHeight := channel.ShortChannelID.BlockHeight + channel.FundingBroadcastHeight = broadcastHeight + channel.ShortChannelID.BlockHeight = 0 + + // Fallback case 2: This is an unconfirmed channel from an old + // backup file where we didn't have any workaround in place. + // Best we can do here is set the funding broadcast height to a + // reasonable value that we determined earlier. + case channel.ShortChanID().BlockHeight == 0: + channel.FundingBroadcastHeight = firstChanHeight + } + } + ltndLog.Infof("Inserting %v SCB channel shells into DB", len(channelShells)) diff --git a/clock/default_clock.go b/clock/default_clock.go new file mode 100644 index 0000000000..3a4f8df323 --- /dev/null +++ b/clock/default_clock.go @@ -0,0 +1,24 @@ +package clock + +import ( + "time" +) + +// DefaultClock implements Clock interface by simply calling the appropriate +// time functions. +type DefaultClock struct{} + +// NewDefaultClock constructs a new DefaultClock. +func NewDefaultClock() Clock { + return &DefaultClock{} +} + +// Now simply returns time.Now(). +func (DefaultClock) Now() time.Time { + return time.Now() +} + +// TickAfter simply wraps time.After(). +func (DefaultClock) TickAfter(duration time.Duration) <-chan time.Time { + return time.After(duration) +} diff --git a/clock/interface.go b/clock/interface.go new file mode 100644 index 0000000000..0450410e31 --- /dev/null +++ b/clock/interface.go @@ -0,0 +1,16 @@ +package clock + +import ( + "time" +) + +// Clock is an interface that provides a time functions for LND packages. +// This is useful during testing when a concrete time reference is needed. +type Clock interface { + // Now returns the current local time (as defined by the Clock). + Now() time.Time + + // TickAfter returns a channel that will receive a tick after the specified + // duration has passed. + TickAfter(duration time.Duration) <-chan time.Time +} diff --git a/clock/test_clock.go b/clock/test_clock.go new file mode 100644 index 0000000000..f4319cee38 --- /dev/null +++ b/clock/test_clock.go @@ -0,0 +1,75 @@ +package clock + +import ( + "sync" + "time" +) + +// TestClock can be used in tests to mock time. +type TestClock struct { + currentTime time.Time + timeChanMap map[time.Time][]chan time.Time + timeLock sync.Mutex +} + +// NewTestClock returns a new test clock. +func NewTestClock(startTime time.Time) *TestClock { + return &TestClock{ + currentTime: startTime, + timeChanMap: make(map[time.Time][]chan time.Time), + } +} + +// Now returns the current (test) time. +func (c *TestClock) Now() time.Time { + c.timeLock.Lock() + defer c.timeLock.Unlock() + + return c.currentTime +} + +// TickAfter returns a channel that will receive a tick after the specified +// duration has passed passed by the user set test time. +func (c *TestClock) TickAfter(duration time.Duration) <-chan time.Time { + c.timeLock.Lock() + defer c.timeLock.Unlock() + + triggerTime := c.currentTime.Add(duration) + ch := make(chan time.Time, 1) + + // If already expired, tick immediately. + if !triggerTime.After(c.currentTime) { + ch <- c.currentTime + return ch + } + + // Otherwise store the channel until the trigger time is there. + chans := c.timeChanMap[triggerTime] + chans = append(chans, ch) + c.timeChanMap[triggerTime] = chans + + return ch +} + +// SetTime sets the (test) time and triggers tick channels when they expire. +func (c *TestClock) SetTime(now time.Time) { + c.timeLock.Lock() + defer c.timeLock.Unlock() + + c.currentTime = now + remainingChans := make(map[time.Time][]chan time.Time) + for triggerTime, chans := range c.timeChanMap { + // If the trigger time is still in the future, keep this channel + // in the channel map for later. + if triggerTime.After(now) { + remainingChans[triggerTime] = chans + continue + } + + for _, c := range chans { + c <- now + } + } + + c.timeChanMap = remainingChans +} diff --git a/clock/test_clock_test.go b/clock/test_clock_test.go new file mode 100644 index 0000000000..879cc8fd19 --- /dev/null +++ b/clock/test_clock_test.go @@ -0,0 +1,63 @@ +package clock + +import ( + "testing" + "time" +) + +var ( + testTime = time.Date(2009, time.January, 3, 12, 0, 0, 0, time.UTC) +) + +func TestNow(t *testing.T) { + c := NewTestClock(testTime) + now := c.Now() + + if now != testTime { + t.Fatalf("expected: %v, got: %v", testTime, now) + } + + now = now.Add(time.Hour) + c.SetTime(now) + if c.Now() != now { + t.Fatalf("epected: %v, got: %v", now, c.Now()) + } +} + +func TestTickAfter(t *testing.T) { + c := NewTestClock(testTime) + + // Should be ticking immediately. + ticker0 := c.TickAfter(0) + + // Both should be ticking after SetTime + ticker1 := c.TickAfter(time.Hour) + ticker2 := c.TickAfter(time.Hour) + + // We don't expect this one to tick. + ticker3 := c.TickAfter(2 * time.Hour) + + tickOrTimeOut := func(ticker <-chan time.Time, expectTick bool) { + tick := false + select { + case <-ticker: + tick = true + case <-time.After(time.Millisecond): + } + + if tick != expectTick { + t.Fatalf("expected tick: %v, ticked: %v", expectTick, tick) + } + } + + tickOrTimeOut(ticker0, true) + tickOrTimeOut(ticker1, false) + tickOrTimeOut(ticker2, false) + tickOrTimeOut(ticker3, false) + + c.SetTime(c.Now().Add(time.Hour)) + + tickOrTimeOut(ticker1, true) + tickOrTimeOut(ticker2, true) + tickOrTimeOut(ticker3, false) +} diff --git a/cmd/lncli/autopilotrpc_active.go b/cmd/lncli/autopilotrpc_active.go index 16bd517c2a..7ad87304a6 100644 --- a/cmd/lncli/autopilotrpc_active.go +++ b/cmd/lncli/autopilotrpc_active.go @@ -96,7 +96,7 @@ func disable(ctx *cli.Context) error { var queryScoresCommand = cli.Command{ Name: "query", - Usage: "Query the autopilot heuristcs for nodes' scores.", + Usage: "Query the autopilot heuristics for nodes' scores.", ArgsUsage: "[flags] ...", Description: "", Action: actionDecorator(queryScores), diff --git a/cmd/lncli/cmd_bake_macaroon.go b/cmd/lncli/cmd_bake_macaroon.go new file mode 100644 index 0000000000..5929a536dc --- /dev/null +++ b/cmd/lncli/cmd_bake_macaroon.go @@ -0,0 +1,182 @@ +package main + +import ( + "context" + "encoding/hex" + "fmt" + "io/ioutil" + "net" + "strings" + + "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/macaroons" + "github.com/urfave/cli" + "gopkg.in/macaroon.v2" +) + +var bakeMacaroonCommand = cli.Command{ + Name: "bakemacaroon", + Category: "Macaroons", + Usage: "Bakes a new macaroon with the provided list of permissions " + + "and restrictions", + ArgsUsage: "[--save_to=] [--timeout=] [--ip_address=] permissions...", + Description: ` + Bake a new macaroon that grants the provided permissions and + optionally adds restrictions (timeout, IP address) to it. + + The new macaroon can either be shown on command line in hex serialized + format or it can be saved directly to a file using the --save_to + argument. + + A permission is a tuple of an entity and an action, separated by a + colon. Multiple operations can be added as arguments, for example: + + lncli bakemacaroon info:read invoices:write foo:bar + `, + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "save_to", + Usage: "save the created macaroon to this file " + + "using the default binary format", + }, + cli.Uint64Flag{ + Name: "timeout", + Usage: "the number of seconds the macaroon will be " + + "valid before it times out", + }, + cli.StringFlag{ + Name: "ip_address", + Usage: "the IP address the macaroon will be bound to", + }, + }, + Action: actionDecorator(bakeMacaroon), +} + +func bakeMacaroon(ctx *cli.Context) error { + client, cleanUp := getClient(ctx) + defer cleanUp() + + // Show command help if no arguments. + if ctx.NArg() == 0 { + return cli.ShowCommandHelp(ctx, "bakemacaroon") + } + args := ctx.Args() + + var ( + savePath string + timeout int64 + ipAddress net.IP + parsedPermissions []*lnrpc.MacaroonPermission + err error + ) + + if ctx.String("save_to") != "" { + savePath = cleanAndExpandPath(ctx.String("save_to")) + } + + if ctx.IsSet("timeout") { + timeout = ctx.Int64("timeout") + if timeout <= 0 { + return fmt.Errorf("timeout must be greater than 0") + } + } + + if ctx.IsSet("ip_address") { + ipAddress = net.ParseIP(ctx.String("ip_address")) + if ipAddress == nil { + return fmt.Errorf("unable to parse ip_address: %s", + ctx.String("ip_address")) + } + } + + // A command line argument can't be an empty string. So we'll check each + // entry if it's a valid entity:action tuple. The content itself is + // validated server side. We just make sure we can parse it correctly. + for _, permission := range args { + tuple := strings.Split(permission, ":") + if len(tuple) != 2 { + return fmt.Errorf("unable to parse "+ + "permission tuple: %s", permission) + } + entity, action := tuple[0], tuple[1] + if entity == "" { + return fmt.Errorf("invalid permission [%s]. entity "+ + "cannot be empty", permission) + } + if action == "" { + return fmt.Errorf("invalid permission [%s]. action "+ + "cannot be empty", permission) + } + + // No we can assume that we have a formally valid entity:action + // tuple. The rest of the validation happens server side. + parsedPermissions = append( + parsedPermissions, &lnrpc.MacaroonPermission{ + Entity: entity, + Action: action, + }, + ) + } + + // Now we have gathered all the input we need and can do the actual + // RPC call. + req := &lnrpc.BakeMacaroonRequest{ + Permissions: parsedPermissions, + } + resp, err := client.BakeMacaroon(context.Background(), req) + if err != nil { + return err + } + + // Now we should have gotten a valid macaroon. Unmarshal it so we can + // add first-party caveats (if necessary) to it. + macBytes, err := hex.DecodeString(resp.Macaroon) + if err != nil { + return err + } + unmarshalMac := &macaroon.Macaroon{} + if err = unmarshalMac.UnmarshalBinary(macBytes); err != nil { + return err + } + + // Now apply the desired constraints to the macaroon. This will always + // create a new macaroon object, even if no constraints are added. + macConstraints := make([]macaroons.Constraint, 0) + if timeout > 0 { + macConstraints = append( + macConstraints, macaroons.TimeoutConstraint(timeout), + ) + } + if ipAddress != nil { + macConstraints = append( + macConstraints, + macaroons.IPLockConstraint(ipAddress.String()), + ) + } + constrainedMac, err := macaroons.AddConstraints( + unmarshalMac, macConstraints..., + ) + if err != nil { + return err + } + macBytes, err = constrainedMac.MarshalBinary() + if err != nil { + return err + } + + // Now we can output the result. We either write it binary serialized to + // a file or write to the standard output using hex encoding. + switch { + case savePath != "": + err = ioutil.WriteFile(savePath, macBytes, 0644) + if err != nil { + return err + } + fmt.Printf("Macaroon saved to %s\n", savePath) + + default: + fmt.Printf("%s\n", hex.EncodeToString(macBytes)) + } + + return nil +} diff --git a/cmd/lncli/cmd_build_route.go b/cmd/lncli/cmd_build_route.go index 64cda08fa4..81c5bfb04f 100644 --- a/cmd/lncli/cmd_build_route.go +++ b/cmd/lncli/cmd_build_route.go @@ -1,5 +1,3 @@ -// +build routerrpc - package main import ( @@ -88,7 +86,7 @@ func buildRoute(ctx *cli.Context) error { return err } - printJSON(route) + printRespJSON(route) return nil } diff --git a/cmd/lncli/cmd_invoice.go b/cmd/lncli/cmd_invoice.go new file mode 100644 index 0000000000..feb56553e2 --- /dev/null +++ b/cmd/lncli/cmd_invoice.go @@ -0,0 +1,288 @@ +package main + +import ( + "context" + "encoding/hex" + "fmt" + "strconv" + + "github.com/lightningnetwork/lnd/lnrpc" + "github.com/urfave/cli" +) + +var addInvoiceCommand = cli.Command{ + Name: "addinvoice", + Category: "Invoices", + Usage: "Add a new invoice.", + Description: ` + Add a new invoice, expressing intent for a future payment. + + Invoices without an amount can be created by not supplying any + parameters or providing an amount of 0. These invoices allow the payee + to specify the amount of satoshis they wish to send.`, + ArgsUsage: "value preimage", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "memo", + Usage: "a description of the payment to attach along " + + "with the invoice (default=\"\")", + }, + cli.StringFlag{ + Name: "preimage", + Usage: "the hex-encoded preimage (32 byte) which will " + + "allow settling an incoming HTLC payable to this " + + "preimage. If not set, a random preimage will be " + + "created.", + }, + cli.Int64Flag{ + Name: "amt", + Usage: "the amt of satoshis in this invoice", + }, + cli.StringFlag{ + Name: "description_hash", + Usage: "SHA-256 hash of the description of the payment. " + + "Used if the purpose of payment cannot naturally " + + "fit within the memo. If provided this will be " + + "used instead of the description(memo) field in " + + "the encoded invoice.", + }, + cli.StringFlag{ + Name: "fallback_addr", + Usage: "fallback on-chain address that can be used in " + + "case the lightning payment fails", + }, + cli.Int64Flag{ + Name: "expiry", + Usage: "the invoice's expiry time in seconds. If not " + + "specified an expiry of 3600 seconds (1 hour) " + + "is implied.", + }, + cli.BoolTFlag{ + Name: "private", + Usage: "encode routing hints in the invoice with " + + "private channels in order to assist the " + + "payer in reaching you", + }, + }, + Action: actionDecorator(addInvoice), +} + +func addInvoice(ctx *cli.Context) error { + var ( + preimage []byte + descHash []byte + amt int64 + err error + ) + + client, cleanUp := getClient(ctx) + defer cleanUp() + + args := ctx.Args() + + switch { + case ctx.IsSet("amt"): + amt = ctx.Int64("amt") + case args.Present(): + amt, err = strconv.ParseInt(args.First(), 10, 64) + args = args.Tail() + if err != nil { + return fmt.Errorf("unable to decode amt argument: %v", err) + } + } + + switch { + case ctx.IsSet("preimage"): + preimage, err = hex.DecodeString(ctx.String("preimage")) + case args.Present(): + preimage, err = hex.DecodeString(args.First()) + } + + if err != nil { + return fmt.Errorf("unable to parse preimage: %v", err) + } + + descHash, err = hex.DecodeString(ctx.String("description_hash")) + if err != nil { + return fmt.Errorf("unable to parse description_hash: %v", err) + } + + invoice := &lnrpc.Invoice{ + Memo: ctx.String("memo"), + RPreimage: preimage, + Value: amt, + DescriptionHash: descHash, + FallbackAddr: ctx.String("fallback_addr"), + Expiry: ctx.Int64("expiry"), + Private: ctx.Bool("private"), + } + + resp, err := client.AddInvoice(context.Background(), invoice) + if err != nil { + return err + } + + printRespJSON(resp) + + return nil +} + +var lookupInvoiceCommand = cli.Command{ + Name: "lookupinvoice", + Category: "Invoices", + Usage: "Lookup an existing invoice by its payment hash.", + ArgsUsage: "rhash", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "rhash", + Usage: "the 32 byte payment hash of the invoice to query for, the hash " + + "should be a hex-encoded string", + }, + }, + Action: actionDecorator(lookupInvoice), +} + +func lookupInvoice(ctx *cli.Context) error { + client, cleanUp := getClient(ctx) + defer cleanUp() + + var ( + rHash []byte + err error + ) + + switch { + case ctx.IsSet("rhash"): + rHash, err = hex.DecodeString(ctx.String("rhash")) + case ctx.Args().Present(): + rHash, err = hex.DecodeString(ctx.Args().First()) + default: + return fmt.Errorf("rhash argument missing") + } + + if err != nil { + return fmt.Errorf("unable to decode rhash argument: %v", err) + } + + req := &lnrpc.PaymentHash{ + RHash: rHash, + } + + invoice, err := client.LookupInvoice(context.Background(), req) + if err != nil { + return err + } + + printRespJSON(invoice) + + return nil +} + +var listInvoicesCommand = cli.Command{ + Name: "listinvoices", + Category: "Invoices", + Usage: "List all invoices currently stored within the database. Any " + + "active debug invoices are ignored.", + Description: ` + This command enables the retrieval of all invoices currently stored + within the database. It has full support for paginationed responses, + allowing users to query for specific invoices through their add_index. + This can be done by using either the first_index_offset or + last_index_offset fields included in the response as the index_offset of + the next request. Backward pagination is enabled by default to receive + current invoices first. If you wish to paginate forwards, set the + paginate-forwards flag. If none of the parameters are specified, then + the last 100 invoices will be returned. + + For example: if you have 200 invoices, "lncli listinvoices" will return + the last 100 created. If you wish to retrieve the previous 100, the + first_offset_index of the response can be used as the index_offset of + the next listinvoices request.`, + Flags: []cli.Flag{ + cli.BoolFlag{ + Name: "pending_only", + Usage: "toggles if all invoices should be returned, " + + "or only those that are currently unsettled", + }, + cli.Uint64Flag{ + Name: "index_offset", + Usage: "the index of an invoice that will be used as " + + "either the start or end of a query to " + + "determine which invoices should be returned " + + "in the response", + }, + cli.Uint64Flag{ + Name: "max_invoices", + Usage: "the max number of invoices to return", + }, + cli.BoolFlag{ + Name: "paginate-forwards", + Usage: "if set, invoices succeeding the " + + "index_offset will be returned", + }, + }, + Action: actionDecorator(listInvoices), +} + +func listInvoices(ctx *cli.Context) error { + client, cleanUp := getClient(ctx) + defer cleanUp() + + req := &lnrpc.ListInvoiceRequest{ + PendingOnly: ctx.Bool("pending_only"), + IndexOffset: ctx.Uint64("index_offset"), + NumMaxInvoices: ctx.Uint64("max_invoices"), + Reversed: !ctx.Bool("paginate-forwards"), + } + + invoices, err := client.ListInvoices(context.Background(), req) + if err != nil { + return err + } + + printRespJSON(invoices) + + return nil +} + +var decodePayReqCommand = cli.Command{ + Name: "decodepayreq", + Category: "Invoices", + Usage: "Decode a payment request.", + Description: "Decode the passed payment request revealing the destination, payment hash and value of the payment request", + ArgsUsage: "pay_req", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "pay_req", + Usage: "the bech32 encoded payment request", + }, + }, + Action: actionDecorator(decodePayReq), +} + +func decodePayReq(ctx *cli.Context) error { + ctxb := context.Background() + client, cleanUp := getClient(ctx) + defer cleanUp() + + var payreq string + + switch { + case ctx.IsSet("pay_req"): + payreq = ctx.String("pay_req") + case ctx.Args().Present(): + payreq = ctx.Args().First() + default: + return fmt.Errorf("pay_req argument missing") + } + + resp, err := client.DecodePayReq(ctxb, &lnrpc.PayReqString{ + PayReq: payreq, + }) + if err != nil { + return err + } + + printRespJSON(resp) + return nil +} diff --git a/cmd/lncli/cmd_open_channel.go b/cmd/lncli/cmd_open_channel.go new file mode 100644 index 0000000000..27b275364d --- /dev/null +++ b/cmd/lncli/cmd_open_channel.go @@ -0,0 +1,665 @@ +package main + +import ( + "context" + "crypto/rand" + "encoding/base64" + "encoding/hex" + "fmt" + "io" + "strconv" + "strings" + + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcutil" + "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/lnwallet/chanfunding" + "github.com/lightningnetwork/lnd/signal" + "github.com/urfave/cli" +) + +const ( + defaultUtxoMinConf = 1 + userMsgFund = `PSBT funding initiated with peer %x. +Please create a PSBT that sends %v (%d satoshi) to the funding address %s. + +Note: The whole process should be completed within 10 minutes, otherwise there +is a risk of the remote node timing out and canceling the funding process. + +Example with bitcoind: + bitcoin-cli walletcreatefundedpsbt [] '[{"%s":%.8f}]' + +If you are using a wallet that can fund a PSBT directly (currently not possible +with bitcoind), you can use this PSBT that contains the same address and amount: +%s + +!!! WARNING !!! +DO NOT PUBLISH the finished transaction by yourself or with another tool. +lnd MUST publish it in the proper funding flow order OR THE FUNDS CAN BE LOST! + +Paste the funded PSBT here to continue the funding flow. +Base64 encoded PSBT: ` + + userMsgSign = ` +PSBT verified by lnd, please continue the funding flow by signing the PSBT by +all required parties/devices. Once the transaction is fully signed, paste it +again here. + +Base64 encoded signed PSBT: ` +) + +// TODO(roasbeef): change default number of confirmations +var openChannelCommand = cli.Command{ + Name: "openchannel", + Category: "Channels", + Usage: "Open a channel to a node or an existing peer.", + Description: ` + Attempt to open a new channel to an existing peer with the key node-key + optionally blocking until the channel is 'open'. + + One can also connect to a node before opening a new channel to it by + setting its host:port via the --connect argument. For this to work, + the node_key must be provided, rather than the peer_id. This is optional. + + The channel will be initialized with local-amt satoshis local and push-amt + satoshis for the remote node. Note that specifying push-amt means you give that + amount to the remote node as part of the channel opening. Once the channel is open, + a channelPoint (txid:vout) of the funding output is returned. + + If the remote peer supports the option upfront shutdown feature bit (query + listpeers to see their supported feature bits), an address to enforce + payout of funds on cooperative close can optionally be provided. Note that + if you set this value, you will not be able to cooperatively close out to + another address. + + One can manually set the fee to be used for the funding transaction via either + the --conf_target or --sat_per_byte arguments. This is optional.`, + ArgsUsage: "node-key local-amt push-amt", + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "node_key", + Usage: "the identity public key of the target node/peer " + + "serialized in compressed format", + }, + cli.StringFlag{ + Name: "connect", + Usage: "(optional) the host:port of the target node", + }, + cli.IntFlag{ + Name: "local_amt", + Usage: "the number of satoshis the wallet should commit to the channel", + }, + cli.IntFlag{ + Name: "push_amt", + Usage: "the number of satoshis to give the remote side " + + "as part of the initial commitment state, " + + "this is equivalent to first opening a " + + "channel and sending the remote party funds, " + + "but done all in one step", + }, + cli.BoolFlag{ + Name: "block", + Usage: "block and wait until the channel is fully open", + }, + cli.Int64Flag{ + Name: "conf_target", + Usage: "(optional) the number of blocks that the " + + "transaction *should* confirm in, will be " + + "used for fee estimation", + }, + cli.Int64Flag{ + Name: "sat_per_byte", + Usage: "(optional) a manual fee expressed in " + + "sat/byte that should be used when crafting " + + "the transaction", + }, + cli.BoolFlag{ + Name: "private", + Usage: "make the channel private, such that it won't " + + "be announced to the greater network, and " + + "nodes other than the two channel endpoints " + + "must be explicitly told about it to be able " + + "to route through it", + }, + cli.Int64Flag{ + Name: "min_htlc_msat", + Usage: "(optional) the minimum value we will require " + + "for incoming HTLCs on the channel", + }, + cli.Uint64Flag{ + Name: "remote_csv_delay", + Usage: "(optional) the number of blocks we will require " + + "our channel counterparty to wait before accessing " + + "its funds in case of unilateral close. If this is " + + "not set, we will scale the value according to the " + + "channel size", + }, + cli.Uint64Flag{ + Name: "min_confs", + Usage: "(optional) the minimum number of confirmations " + + "each one of your outputs used for the funding " + + "transaction must satisfy", + Value: defaultUtxoMinConf, + }, + cli.StringFlag{ + Name: "close_address", + Usage: "(optional) an address to enforce payout of our " + + "funds to on cooperative close. Note that if this " + + "value is set on channel open, you will *not* be " + + "able to cooperatively close to a different address.", + }, + cli.BoolFlag{ + Name: "psbt", + Usage: "start an interactive mode that initiates " + + "funding through a partially signed bitcoin " + + "transaction (PSBT), allowing the channel " + + "funds to be added and signed from a hardware " + + "or other offline device.", + }, + cli.StringFlag{ + Name: "base_psbt", + Usage: "when using the interactive PSBT mode to open " + + "a new channel, use this base64 encoded PSBT " + + "as a base and add the new channel output to " + + "it instead of creating a new, empty one.", + }, + }, + Action: actionDecorator(openChannel), +} + +func openChannel(ctx *cli.Context) error { + // TODO(roasbeef): add deadline to context + ctxb := context.Background() + client, cleanUp := getClient(ctx) + defer cleanUp() + + args := ctx.Args() + var err error + + // Show command help if no arguments provided + if ctx.NArg() == 0 && ctx.NumFlags() == 0 { + _ = cli.ShowCommandHelp(ctx, "openchannel") + return nil + } + + minConfs := int32(ctx.Uint64("min_confs")) + req := &lnrpc.OpenChannelRequest{ + TargetConf: int32(ctx.Int64("conf_target")), + SatPerByte: ctx.Int64("sat_per_byte"), + MinHtlcMsat: ctx.Int64("min_htlc_msat"), + RemoteCsvDelay: uint32(ctx.Uint64("remote_csv_delay")), + MinConfs: minConfs, + SpendUnconfirmed: minConfs == 0, + CloseAddress: ctx.String("close_address"), + } + + switch { + case ctx.IsSet("node_key"): + nodePubHex, err := hex.DecodeString(ctx.String("node_key")) + if err != nil { + return fmt.Errorf("unable to decode node public key: %v", err) + } + req.NodePubkey = nodePubHex + + case args.Present(): + nodePubHex, err := hex.DecodeString(args.First()) + if err != nil { + return fmt.Errorf("unable to decode node public key: %v", err) + } + args = args.Tail() + req.NodePubkey = nodePubHex + default: + return fmt.Errorf("node id argument missing") + } + + // As soon as we can confirm that the node's node_key was set, rather + // than the peer_id, we can check if the host:port was also set to + // connect to it before opening the channel. + if req.NodePubkey != nil && ctx.IsSet("connect") { + addr := &lnrpc.LightningAddress{ + Pubkey: hex.EncodeToString(req.NodePubkey), + Host: ctx.String("connect"), + } + + req := &lnrpc.ConnectPeerRequest{ + Addr: addr, + Perm: false, + } + + // Check if connecting to the node was successful. + // We discard the peer id returned as it is not needed. + _, err := client.ConnectPeer(ctxb, req) + if err != nil && + !strings.Contains(err.Error(), "already connected") { + return err + } + } + + switch { + case ctx.IsSet("local_amt"): + req.LocalFundingAmount = int64(ctx.Int("local_amt")) + case args.Present(): + req.LocalFundingAmount, err = strconv.ParseInt(args.First(), 10, 64) + if err != nil { + return fmt.Errorf("unable to decode local amt: %v", err) + } + args = args.Tail() + default: + return fmt.Errorf("local amt argument missing") + } + + if ctx.IsSet("push_amt") { + req.PushSat = int64(ctx.Int("push_amt")) + } else if args.Present() { + req.PushSat, err = strconv.ParseInt(args.First(), 10, 64) + if err != nil { + return fmt.Errorf("unable to decode push amt: %v", err) + } + } + + req.Private = ctx.Bool("private") + + // PSBT funding is a more involved, interactive process that is too + // large to also fit into this already long function. + if ctx.Bool("psbt") { + return openChannelPsbt(ctx, client, req) + } + + stream, err := client.OpenChannel(ctxb, req) + if err != nil { + return err + } + + for { + resp, err := stream.Recv() + if err == io.EOF { + return nil + } else if err != nil { + return err + } + + switch update := resp.Update.(type) { + case *lnrpc.OpenStatusUpdate_ChanPending: + err := printChanPending(update) + if err != nil { + return err + } + + if !ctx.Bool("block") { + return nil + } + + case *lnrpc.OpenStatusUpdate_ChanOpen: + return printChanOpen(update) + } + } +} + +// openChannelPsbt starts an interactive channel open protocol that uses a +// partially signed bitcoin transaction (PSBT) to fund the channel output. The +// protocol involves several steps between the RPC server and the CLI client: +// +// RPC server CLI client +// | | +// | |<------open channel (stream)-----| +// | |-------ready for funding----->| | +// | |<------PSBT verify------------| | +// | |-------ready for signing----->| | +// | |<------PSBT finalize----------| | +// | |-------channel pending------->| | +// | |-------channel open------------->| +// | | +func openChannelPsbt(ctx *cli.Context, client lnrpc.LightningClient, + req *lnrpc.OpenChannelRequest) error { + + var ( + pendingChanID [32]byte + shimPending = true + basePsbtBytes []byte + quit = make(chan struct{}) + srvMsg = make(chan *lnrpc.OpenStatusUpdate, 1) + srvErr = make(chan error, 1) + ctxc, cancel = context.WithCancel(context.Background()) + ) + defer cancel() + + // Make sure the user didn't supply any command line flags that are + // incompatible with PSBT funding. + err := checkPsbtFlags(req) + if err != nil { + return err + } + + // If the user supplied a base PSBT, only make sure it's valid base64. + // The RPC server will make sure it's also a valid PSBT. + basePsbt := ctx.String("base_psbt") + if basePsbt != "" { + basePsbtBytes, err = base64.StdEncoding.DecodeString(basePsbt) + if err != nil { + return fmt.Errorf("error parsing base PSBT: %v", err) + } + } + + // Generate a new, random pending channel ID that we'll use as the main + // identifier when sending update messages to the RPC server. + if _, err := rand.Read(pendingChanID[:]); err != nil { + return fmt.Errorf("unable to generate random chan ID: %v", err) + } + fmt.Printf("Starting PSBT funding flow with pending channel ID %x.\n", + pendingChanID) + + // maybeCancelShim is a helper function that cancels the funding shim + // with the RPC server in case we end up aborting early. + maybeCancelShim := func() { + // If the user canceled while there was still a shim registered + // with the wallet, release the resources now. + if shimPending { + fmt.Printf("Canceling PSBT funding flow for pending "+ + "channel ID %x.\n", pendingChanID) + cancelMsg := &lnrpc.FundingTransitionMsg{ + Trigger: &lnrpc.FundingTransitionMsg_ShimCancel{ + ShimCancel: &lnrpc.FundingShimCancel{ + PendingChanId: pendingChanID[:], + }, + }, + } + err := sendFundingState(ctxc, ctx, cancelMsg) + if err != nil { + fmt.Printf("Error canceling shim: %v\n", err) + } + shimPending = false + } + + // Abort the stream connection to the server. + cancel() + } + defer maybeCancelShim() + + // Create the PSBT funding shim that will tell the funding manager we + // want to use a PSBT. + req.FundingShim = &lnrpc.FundingShim{ + Shim: &lnrpc.FundingShim_PsbtShim{ + PsbtShim: &lnrpc.PsbtShim{ + PendingChanId: pendingChanID[:], + BasePsbt: basePsbtBytes, + }, + }, + } + + // Start the interactive process by opening the stream connection to the + // daemon. If the user cancels by pressing we need to cancel + // the shim. To not just kill the process on interrupt, we need to + // explicitly capture the signal. + stream, err := client.OpenChannel(ctxc, req) + if err != nil { + return fmt.Errorf("opening stream to server failed: %v", err) + } + signal.Intercept() + + // We also need to spawn a goroutine that reads from the server. This + // will copy the messages to the channel as long as they come in or add + // exactly one error to the error stream and then bail out. + go func() { + for { + // Recv blocks until a message or error arrives. + resp, err := stream.Recv() + if err == io.EOF { + srvErr <- fmt.Errorf("lnd shutting down: %v", + err) + return + } else if err != nil { + srvErr <- fmt.Errorf("got error from server: "+ + "%v", err) + return + } + + // Don't block on sending in case of shutting down. + select { + case srvMsg <- resp: + case <-quit: + return + } + } + }() + + // Spawn another goroutine that only handles abort from user or errors + // from the server. Both will trigger an attempt to cancel the shim with + // the server. + go func() { + select { + case <-signal.ShutdownChannel(): + fmt.Printf("\nInterrupt signal received.\n") + close(quit) + + case err := <-srvErr: + fmt.Printf("\nError received: %v\n", err) + + // If the remote peer canceled on us, the reservation + // has already been deleted. We don't need to try to + // remove it again, this would just produce another + // error. + cancelErr := chanfunding.ErrRemoteCanceled.Error() + if err != nil && strings.Contains(err.Error(), cancelErr) { + shimPending = false + } + close(quit) + + case <-quit: + } + }() + + // Our main event loop where we wait for triggers + for { + var srvResponse *lnrpc.OpenStatusUpdate + select { + case srvResponse = <-srvMsg: + case <-quit: + return nil + } + + switch update := srvResponse.Update.(type) { + case *lnrpc.OpenStatusUpdate_PsbtFund: + // First tell the user how to create the PSBT with the + // address and amount we now know. + amt := btcutil.Amount(update.PsbtFund.FundingAmount) + addr := update.PsbtFund.FundingAddress + fmt.Printf( + userMsgFund, req.NodePubkey, amt, amt, addr, + addr, amt.ToBTC(), + base64.StdEncoding.EncodeToString( + update.PsbtFund.Psbt, + ), + ) + + // Read the user's response and send it to the server to + // verify everything's correct before anything is + // signed. + psbtBase64, err := readLine(quit) + if err == io.EOF { + return nil + } + if err != nil { + return fmt.Errorf("reading from console "+ + "failed: %v", err) + } + psbt, err := base64.StdEncoding.DecodeString( + strings.TrimSpace(psbtBase64), + ) + if err != nil { + return fmt.Errorf("base64 decode failed: %v", + err) + } + verifyMsg := &lnrpc.FundingTransitionMsg{ + Trigger: &lnrpc.FundingTransitionMsg_PsbtVerify{ + PsbtVerify: &lnrpc.FundingPsbtVerify{ + FundedPsbt: psbt, + PendingChanId: pendingChanID[:], + }, + }, + } + err = sendFundingState(ctxc, ctx, verifyMsg) + if err != nil { + return fmt.Errorf("verifying PSBT by lnd "+ + "failed: %v", err) + } + + // Now that we know the PSBT looks good, we can let it + // be signed by the user. + fmt.Print(userMsgSign) + + // Read the signed PSBT and send it to lnd. + psbtBase64, err = readLine(quit) + if err == io.EOF { + return nil + } + if err != nil { + return fmt.Errorf("reading from console "+ + "failed: %v", err) + } + psbt, err = base64.StdEncoding.DecodeString( + strings.TrimSpace(psbtBase64), + ) + if err != nil { + return fmt.Errorf("base64 decode failed: %v", + err) + } + finalizeMsg := &lnrpc.FundingTransitionMsg{ + Trigger: &lnrpc.FundingTransitionMsg_PsbtFinalize{ + PsbtFinalize: &lnrpc.FundingPsbtFinalize{ + SignedPsbt: psbt, + PendingChanId: pendingChanID[:], + }, + }, + } + err = sendFundingState(ctxc, ctx, finalizeMsg) + if err != nil { + return fmt.Errorf("finalizing PSBT funding "+ + "flow failed: %v", err) + } + + case *lnrpc.OpenStatusUpdate_ChanPending: + // As soon as the channel is pending, there is no more + // shim that needs to be canceled. If the user + // interrupts now, we don't need to clean up anything. + shimPending = false + + err := printChanPending(update) + if err != nil { + return err + } + + if !ctx.Bool("block") { + return nil + } + + case *lnrpc.OpenStatusUpdate_ChanOpen: + return printChanOpen(update) + } + } +} + +// printChanOpen prints the channel point of the channel open message. +func printChanOpen(update *lnrpc.OpenStatusUpdate_ChanOpen) error { + channelPoint := update.ChanOpen.ChannelPoint + + // A channel point's funding txid can be get/set as a + // byte slice or a string. In the case it is a string, + // decode it. + var txidHash []byte + switch channelPoint.GetFundingTxid().(type) { + case *lnrpc.ChannelPoint_FundingTxidBytes: + txidHash = channelPoint.GetFundingTxidBytes() + case *lnrpc.ChannelPoint_FundingTxidStr: + s := channelPoint.GetFundingTxidStr() + h, err := chainhash.NewHashFromStr(s) + if err != nil { + return err + } + + txidHash = h[:] + } + + txid, err := chainhash.NewHash(txidHash) + if err != nil { + return err + } + + index := channelPoint.OutputIndex + printJSON(struct { + ChannelPoint string `json:"channel_point"` + }{ + ChannelPoint: fmt.Sprintf("%v:%v", txid, index), + }) + return nil +} + +// printChanPending prints the funding transaction ID of the channel pending +// message. +func printChanPending(update *lnrpc.OpenStatusUpdate_ChanPending) error { + txid, err := chainhash.NewHash(update.ChanPending.Txid) + if err != nil { + return err + } + + printJSON(struct { + FundingTxid string `json:"funding_txid"` + }{ + FundingTxid: txid.String(), + }) + return nil +} + +// readLine reads a line from standard in but does not block in case of a +// system interrupt like syscall.SIGINT (Ctrl+C). +func readLine(quit chan struct{}) (string, error) { + msg := make(chan string, 1) + + // In a normal console, reading from stdin won't signal EOF when the + // user presses Ctrl+C. That's why we need to put this in a separate + // goroutine so it doesn't block. + go func() { + for { + var str string + _, _ = fmt.Scan(&str) + msg <- str + return + } + }() + for { + select { + case <-quit: + return "", io.EOF + + case str := <-msg: + return str, nil + } + } +} + +// checkPsbtFlags make sure a request to open a channel doesn't set any +// parameters that are incompatible with the PSBT funding flow. +func checkPsbtFlags(req *lnrpc.OpenChannelRequest) error { + if req.MinConfs != defaultUtxoMinConf || req.SpendUnconfirmed { + return fmt.Errorf("specifying minimum confirmations for PSBT " + + "funding is not supported") + } + if req.TargetConf != 0 || req.SatPerByte != 0 { + return fmt.Errorf("setting fee estimation parameters not " + + "supported for PSBT funding") + } + return nil +} + +// sendFundingState sends a single funding state step message by using a new +// client connection. This is necessary if the whole funding flow takes longer +// than the default macaroon timeout, then we cannot use a single client +// connection. +func sendFundingState(cancelCtx context.Context, cliCtx *cli.Context, + msg *lnrpc.FundingTransitionMsg) error { + + client, cleanUp := getClient(cliCtx) + defer cleanUp() + + _, err := client.FundingStateStep(cancelCtx, msg) + return err +} diff --git a/cmd/lncli/cmd_pay.go b/cmd/lncli/cmd_pay.go new file mode 100644 index 0000000000..a90330b558 --- /dev/null +++ b/cmd/lncli/cmd_pay.go @@ -0,0 +1,877 @@ +package main + +import ( + "bytes" + "context" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "io/ioutil" + "os" + "runtime" + "strconv" + "strings" + "time" + + "github.com/jedib0t/go-pretty/table" + "github.com/jedib0t/go-pretty/text" + "github.com/lightninglabs/protobuf-hex-display/jsonpb" + "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/lnrpc/routerrpc" + "github.com/lightningnetwork/lnd/lntypes" + "github.com/lightningnetwork/lnd/record" + "github.com/lightningnetwork/lnd/routing/route" + "github.com/urfave/cli" +) + +const ( + // paymentTimeoutSeconds is the default timeout for the payment loop in + // lnd. No new attempts will be started after the timeout. + paymentTimeoutSeconds = 60 +) + +var ( + cltvLimitFlag = cli.UintFlag{ + Name: "cltv_limit", + Usage: "the maximum time lock that may be used for " + + "this payment", + } + + lastHopFlag = cli.StringFlag{ + Name: "last_hop", + Usage: "pubkey of the last hop (penultimate node in the path) " + + "to route through for this payment", + } + + dataFlag = cli.StringFlag{ + Name: "data", + Usage: "attach custom data to the payment. The required " + + "format is: =,=" + + ",.. For example: --data 3438382=0a21ff. " + + "Custom record ids start from 65536.", + } + + inflightUpdatesFlag = cli.BoolFlag{ + Name: "inflight_updates", + Usage: "if set, intermediate payment state updates will be " + + "displayed. Only valid in combination with --json.", + } + + maxPartsFlag = cli.UintFlag{ + Name: "max_parts", + Usage: "the maximum number of partial payments that may be " + + "used", + Value: 1, + } + + jsonFlag = cli.BoolFlag{ + Name: "json", + Usage: "if set, payment updates are printed as json " + + "messages. Set by default on Windows because table " + + "formatting is unsupported.", + } +) + +// paymentFlags returns common flags for sendpayment and payinvoice. +func paymentFlags() []cli.Flag { + return []cli.Flag{ + cli.StringFlag{ + Name: "pay_req", + Usage: "a zpay32 encoded payment request to fulfill", + }, + cli.Int64Flag{ + Name: "fee_limit", + Usage: "maximum fee allowed in satoshis when " + + "sending the payment", + }, + cli.Int64Flag{ + Name: "fee_limit_percent", + Usage: "percentage of the payment's amount used as " + + "the maximum fee allowed when sending the " + + "payment", + }, + cltvLimitFlag, + lastHopFlag, + cli.Uint64Flag{ + Name: "outgoing_chan_id", + Usage: "short channel id of the outgoing channel to " + + "use for the first hop of the payment", + Value: 0, + }, + cli.BoolFlag{ + Name: "force, f", + Usage: "will skip payment request confirmation", + }, + cli.BoolFlag{ + Name: "allow_self_payment", + Usage: "allow sending a circular payment to self", + }, + dataFlag, inflightUpdatesFlag, maxPartsFlag, jsonFlag, + } +} + +var sendPaymentCommand = cli.Command{ + Name: "sendpayment", + Category: "Payments", + Usage: "Send a payment over lightning.", + Description: ` + Send a payment over Lightning. One can either specify the full + parameters of the payment, or just use a payment request which encodes + all the payment details. + + If payment isn't manually specified, then only a payment request needs + to be passed using the --pay_req argument. + + If the payment *is* manually specified, then all four alternative + arguments need to be specified in order to complete the payment: + * --dest=N + * --amt=A + * --final_cltv_delta=T + * --payment_hash=H + `, + ArgsUsage: "dest amt payment_hash final_cltv_delta | --pay_req=[payment request]", + Flags: append(paymentFlags(), + cli.StringFlag{ + Name: "dest, d", + Usage: "the compressed identity pubkey of the " + + "payment recipient", + }, + cli.Int64Flag{ + Name: "amt, a", + Usage: "number of satoshis to send", + }, + cli.StringFlag{ + Name: "payment_hash, r", + Usage: "the hash to use within the payment's HTLC", + }, + cli.Int64Flag{ + Name: "final_cltv_delta", + Usage: "the number of blocks the last hop has to reveal the preimage", + }, + cli.BoolFlag{ + Name: "keysend", + Usage: "will generate a pre-image and encode it in the sphinx packet, a dest must be set [experimental]", + }, + ), + Action: sendPayment, +} + +// retrieveFeeLimit retrieves the fee limit based on the different fee limit +// flags passed. It always returns a value and doesn't rely on lnd applying a +// default. +func retrieveFeeLimit(ctx *cli.Context, amt int64) (int64, error) { + switch { + + case ctx.IsSet("fee_limit") && ctx.IsSet("fee_limit_percent"): + return 0, fmt.Errorf("either fee_limit or fee_limit_percent " + + "can be set, but not both") + + case ctx.IsSet("fee_limit"): + return ctx.Int64("fee_limit"), nil + + case ctx.IsSet("fee_limit_percent"): + // Round up the fee limit to prevent hitting zero on small + // amounts. + feeLimitRoundedUp := + (amt*ctx.Int64("fee_limit_percent") + 99) / 100 + + return feeLimitRoundedUp, nil + } + + // If no fee limit is set, use the payment amount as a limit (100%). + return amt, nil +} + +func confirmPayReq(resp *lnrpc.PayReq, amt, feeLimit int64) error { + fmt.Printf("Payment hash: %v\n", resp.GetPaymentHash()) + fmt.Printf("Description: %v\n", resp.GetDescription()) + fmt.Printf("Amount (in satoshis): %v\n", amt) + fmt.Printf("Fee limit (in satoshis): %v\n", feeLimit) + fmt.Printf("Destination: %v\n", resp.GetDestination()) + + confirm := promptForConfirmation("Confirm payment (yes/no): ") + if !confirm { + return fmt.Errorf("payment not confirmed") + } + + return nil +} + +func sendPayment(ctx *cli.Context) error { + // Show command help if no arguments provided + if ctx.NArg() == 0 && ctx.NumFlags() == 0 { + _ = cli.ShowCommandHelp(ctx, "sendpayment") + return nil + } + + // If a payment request was provided, we can exit early since all of the + // details of the payment are encoded within the request. + if ctx.IsSet("pay_req") { + req := &routerrpc.SendPaymentRequest{ + PaymentRequest: ctx.String("pay_req"), + Amt: ctx.Int64("amt"), + } + + return sendPaymentRequest(ctx, req) + } + + var ( + destNode []byte + amount int64 + err error + ) + + args := ctx.Args() + + switch { + case ctx.IsSet("dest"): + destNode, err = hex.DecodeString(ctx.String("dest")) + case args.Present(): + destNode, err = hex.DecodeString(args.First()) + args = args.Tail() + default: + return fmt.Errorf("destination txid argument missing") + } + if err != nil { + return err + } + + if len(destNode) != 33 { + return fmt.Errorf("dest node pubkey must be exactly 33 bytes, is "+ + "instead: %v", len(destNode)) + } + + if ctx.IsSet("amt") { + amount = ctx.Int64("amt") + } else if args.Present() { + amount, err = strconv.ParseInt(args.First(), 10, 64) + args = args.Tail() + if err != nil { + return fmt.Errorf("unable to decode payment amount: %v", err) + } + } + + req := &routerrpc.SendPaymentRequest{ + Dest: destNode, + Amt: amount, + DestCustomRecords: make(map[uint64][]byte), + } + + var rHash []byte + + if ctx.Bool("keysend") { + if ctx.IsSet("payment_hash") { + return errors.New("cannot set payment hash when using " + + "keysend") + } + var preimage lntypes.Preimage + if _, err := rand.Read(preimage[:]); err != nil { + return err + } + + // Set the preimage. If the user supplied a preimage with the + // data flag, the preimage that is set here will be overwritten + // later. + req.DestCustomRecords[record.KeySendType] = preimage[:] + + hash := preimage.Hash() + rHash = hash[:] + } else { + switch { + case ctx.IsSet("payment_hash"): + rHash, err = hex.DecodeString(ctx.String("payment_hash")) + case args.Present(): + rHash, err = hex.DecodeString(args.First()) + args = args.Tail() + default: + return fmt.Errorf("payment hash argument missing") + } + } + + if err != nil { + return err + } + if len(rHash) != 32 { + return fmt.Errorf("payment hash must be exactly 32 "+ + "bytes, is instead %v", len(rHash)) + } + req.PaymentHash = rHash + + switch { + case ctx.IsSet("final_cltv_delta"): + req.FinalCltvDelta = int32(ctx.Int64("final_cltv_delta")) + case args.Present(): + delta, err := strconv.ParseInt(args.First(), 10, 64) + if err != nil { + return err + } + req.FinalCltvDelta = int32(delta) + } + + return sendPaymentRequest(ctx, req) +} + +func sendPaymentRequest(ctx *cli.Context, + req *routerrpc.SendPaymentRequest) error { + + conn := getClientConn(ctx, false) + defer conn.Close() + + client := lnrpc.NewLightningClient(conn) + routerClient := routerrpc.NewRouterClient(conn) + + req.OutgoingChanId = ctx.Uint64("outgoing_chan_id") + if ctx.IsSet(lastHopFlag.Name) { + lastHop, err := route.NewVertexFromStr( + ctx.String(lastHopFlag.Name), + ) + if err != nil { + return err + } + req.LastHopPubkey = lastHop[:] + } + + req.CltvLimit = int32(ctx.Int(cltvLimitFlag.Name)) + req.TimeoutSeconds = paymentTimeoutSeconds + + req.AllowSelfPayment = ctx.Bool("allow_self_payment") + + req.MaxParts = uint32(ctx.Uint(maxPartsFlag.Name)) + + // Parse custom data records. + data := ctx.String(dataFlag.Name) + if data != "" { + records := strings.Split(data, ",") + for _, r := range records { + kv := strings.Split(r, "=") + if len(kv) != 2 { + return errors.New("invalid data format: " + + "multiple equal signs in record") + } + + recordID, err := strconv.ParseUint(kv[0], 10, 64) + if err != nil { + return fmt.Errorf("invalid data format: %v", + err) + } + + hexValue, err := hex.DecodeString(kv[1]) + if err != nil { + return fmt.Errorf("invalid data format: %v", + err) + } + + req.DestCustomRecords[recordID] = hexValue + } + } + + var feeLimit int64 + if req.PaymentRequest != "" { + // Decode payment request to find out the amount. + decodeReq := &lnrpc.PayReqString{PayReq: req.PaymentRequest} + decodeResp, err := client.DecodePayReq( + context.Background(), decodeReq, + ) + if err != nil { + return err + } + + // If amount is present in the request, override the request + // amount. + amt := req.Amt + invoiceAmt := decodeResp.GetNumSatoshis() + if invoiceAmt != 0 { + amt = invoiceAmt + } + + // Calculate fee limit based on the determined amount. + feeLimit, err = retrieveFeeLimit(ctx, amt) + if err != nil { + return err + } + + // Ask for confirmation of amount and fee limit if payment is + // forced. + if !ctx.Bool("force") { + err := confirmPayReq(decodeResp, amt, feeLimit) + if err != nil { + return err + } + } + } else { + var err error + feeLimit, err = retrieveFeeLimit(ctx, req.Amt) + if err != nil { + return err + } + } + + req.FeeLimitSat = feeLimit + + // Always print in-flight updates for the table output. + printJSON := ctx.Bool(jsonFlag.Name) + req.NoInflightUpdates = !ctx.Bool(inflightUpdatesFlag.Name) && printJSON + + stream, err := routerClient.SendPaymentV2(context.Background(), req) + if err != nil { + return err + } + + finalState, err := printLivePayment( + stream, client, printJSON, + ) + if err != nil { + return err + } + + // If we get a payment error back, we pass an error up + // to main which eventually calls fatal() and returns + // with a non-zero exit code. + if finalState.Status != lnrpc.Payment_SUCCEEDED { + return errors.New(finalState.Status.String()) + } + + return nil +} + +var trackPaymentCommand = cli.Command{ + Name: "trackpayment", + Category: "Payments", + Usage: "Track progress of an existing payment.", + Description: ` + Pick up monitoring the progression of a previously initiated payment + specified by the hash argument. + `, + ArgsUsage: "hash", + Action: actionDecorator(trackPayment), +} + +func trackPayment(ctx *cli.Context) error { + args := ctx.Args() + + conn := getClientConn(ctx, false) + defer conn.Close() + + routerClient := routerrpc.NewRouterClient(conn) + + if !args.Present() { + return fmt.Errorf("hash argument missing") + } + + hash, err := hex.DecodeString(args.First()) + if err != nil { + return err + } + + req := &routerrpc.TrackPaymentRequest{ + PaymentHash: hash, + } + + stream, err := routerClient.TrackPaymentV2(context.Background(), req) + if err != nil { + return err + } + + client := lnrpc.NewLightningClient(conn) + _, err = printLivePayment(stream, client, ctx.Bool(jsonFlag.Name)) + return err +} + +// printLivePayment receives payment updates from the given stream and either +// outputs them as json or as a more user-friendly formatted table. The table +// option uses terminal control codes to rewrite the output. This call +// terminates when the payment reaches a final state. +func printLivePayment(stream routerrpc.Router_TrackPaymentV2Client, + client lnrpc.LightningClient, json bool) (*lnrpc.Payment, error) { + + // Terminal escape codes aren't supported on Windows, fall back to json. + if !json && runtime.GOOS == "windows" { + json = true + } + + aliases := newAliasCache(client) + + first := true + var lastLineCount int + for { + payment, err := stream.Recv() + if err != nil { + return nil, err + } + + if json { + // Delimit json messages by newlines (inspired by + // grpc over rest chunking). + if first { + first = false + } else { + fmt.Println() + } + + // Write raw json to stdout. + printRespJSON(payment) + } else { + table := formatPayment(payment, aliases) + + // Clear all previously written lines and print the + // updated table. + clearLines(lastLineCount) + fmt.Print(table) + + // Store the number of lines written for the next update + // pass. + lastLineCount = 0 + for _, b := range table { + if b == '\n' { + lastLineCount++ + } + } + } + + // Terminate loop if payments state is final. + if payment.Status != lnrpc.Payment_IN_FLIGHT { + return payment, nil + } + } +} + +// aliasCache allows cached retrieval of node aliases. +type aliasCache struct { + cache map[string]string + client lnrpc.LightningClient +} + +func newAliasCache(client lnrpc.LightningClient) *aliasCache { + return &aliasCache{ + client: client, + cache: make(map[string]string), + } +} + +// get returns a node alias either from cache or freshly requested from lnd. +func (a *aliasCache) get(pubkey string) string { + alias, ok := a.cache[pubkey] + if ok { + return alias + } + + // Request node info. + resp, err := a.client.GetNodeInfo( + context.Background(), + &lnrpc.NodeInfoRequest{ + PubKey: pubkey, + }, + ) + if err != nil { + // If no info is available, use the + // pubkey as identifier. + alias = pubkey[:6] + } else { + alias = resp.Node.Alias + } + a.cache[pubkey] = alias + + return alias +} + +// formatMsat formats msat amounts as fractional sats. +func formatMsat(amt int64) string { + return strconv.FormatFloat(float64(amt)/1000.0, 'f', -1, 64) +} + +// formatPayment formats the payment state as an ascii table. +func formatPayment(payment *lnrpc.Payment, aliases *aliasCache) string { + t := table.NewWriter() + + // Build table header. + t.AppendHeader(table.Row{ + "HTLC_STATE", "ATTEMPT_TIME", "RESOLVE_TIME", "RECEIVER_AMT", + "FEE", "TIMELOCK", "CHAN_OUT", "ROUTE", + }) + t.SetColumnConfigs([]table.ColumnConfig{ + {Name: "ATTEMPT_TIME", Align: text.AlignRight}, + {Name: "RESOLVE_TIME", Align: text.AlignRight}, + {Name: "CHAN_OUT", Align: text.AlignLeft, + AlignHeader: text.AlignLeft}, + }) + + // Add all htlcs as rows. + createTime := time.Unix(0, payment.CreationTimeNs) + var totalPaid, totalFees int64 + for _, htlc := range payment.Htlcs { + formatTime := func(timeNs int64) string { + if timeNs == 0 { + return "-" + } + resolveTime := time.Unix(0, timeNs) + resolveTimeMs := resolveTime.Sub(createTime). + Milliseconds() + return fmt.Sprintf( + "%.3f", float64(resolveTimeMs)/1000.0, + ) + } + + attemptTime := formatTime(htlc.AttemptTimeNs) + resolveTime := formatTime(htlc.ResolveTimeNs) + + route := htlc.Route + lastHop := route.Hops[len(route.Hops)-1] + + hops := []string{} + for _, h := range route.Hops { + alias := aliases.get(h.PubKey) + hops = append(hops, alias) + } + + state := htlc.Status.String() + if htlc.Failure != nil { + state = fmt.Sprintf( + "%v @ %v", + htlc.Failure.Code, + htlc.Failure.FailureSourceIndex, + ) + } + + t.AppendRow([]interface{}{ + state, attemptTime, resolveTime, + formatMsat(lastHop.AmtToForwardMsat), + formatMsat(route.TotalFeesMsat), + route.TotalTimeLock, route.Hops[0].ChanId, + strings.Join(hops, "->")}, + ) + + if htlc.Status == lnrpc.HTLCAttempt_SUCCEEDED { + totalPaid += lastHop.AmtToForwardMsat + totalFees += route.TotalFeesMsat + } + } + + // Render table. + b := &bytes.Buffer{} + t.SetOutputMirror(b) + t.Render() + + // Add additional payment-level data. + fmt.Fprintf(b, "Amount + fee: %v + %v sat\n", + formatMsat(totalPaid), formatMsat(totalFees)) + fmt.Fprintf(b, "Payment hash: %v\n", payment.PaymentHash) + fmt.Fprintf(b, "Payment status: %v", payment.Status) + switch payment.Status { + case lnrpc.Payment_SUCCEEDED: + fmt.Fprintf(b, ", preimage: %v", payment.PaymentPreimage) + case lnrpc.Payment_FAILED: + fmt.Fprintf(b, ", reason: %v", payment.FailureReason) + } + fmt.Fprintf(b, "\n") + + return b.String() +} + +var payInvoiceCommand = cli.Command{ + Name: "payinvoice", + Category: "Payments", + Usage: "Pay an invoice over lightning.", + ArgsUsage: "pay_req", + Flags: append(paymentFlags(), + cli.Int64Flag{ + Name: "amt", + Usage: "(optional) number of satoshis to fulfill the " + + "invoice", + }, + ), + Action: actionDecorator(payInvoice), +} + +func payInvoice(ctx *cli.Context) error { + args := ctx.Args() + + var payReq string + switch { + case ctx.IsSet("pay_req"): + payReq = ctx.String("pay_req") + case args.Present(): + payReq = args.First() + default: + return fmt.Errorf("pay_req argument missing") + } + + req := &routerrpc.SendPaymentRequest{ + PaymentRequest: payReq, + Amt: ctx.Int64("amt"), + DestCustomRecords: make(map[uint64][]byte), + } + + return sendPaymentRequest(ctx, req) +} + +var sendToRouteCommand = cli.Command{ + Name: "sendtoroute", + Category: "Payments", + Usage: "Send a payment over a predefined route.", + Description: ` + Send a payment over Lightning using a specific route. One must specify + the route to attempt and the payment hash. This command can even + be chained with the response to queryroutes or buildroute. This command + can be used to implement channel rebalancing by crafting a self-route, + or even atomic swaps using a self-route that crosses multiple chains. + + There are three ways to specify a route: + * using the --routes parameter to manually specify a JSON encoded + route in the format of the return value of queryroutes or + buildroute: + (lncli sendtoroute --payment_hash= --routes=) + + * passing the route as a positional argument: + (lncli sendtoroute --payment_hash=pay_hash ) + + * or reading in the route from stdin, which can allow chaining the + response from queryroutes or buildroute, or even read in a file + with a pre-computed route: + (lncli queryroutes --args.. | lncli sendtoroute --payment_hash= - + + notice the '-' at the end, which signals that lncli should read + the route in from stdin + `, + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "payment_hash, pay_hash", + Usage: "the hash to use within the payment's HTLC", + }, + cli.StringFlag{ + Name: "routes, r", + Usage: "a json array string in the format of the response " + + "of queryroutes that denotes which routes to use", + }, + }, + Action: sendToRoute, +} + +func sendToRoute(ctx *cli.Context) error { + // Show command help if no arguments provided. + if ctx.NArg() == 0 && ctx.NumFlags() == 0 { + _ = cli.ShowCommandHelp(ctx, "sendtoroute") + return nil + } + + args := ctx.Args() + + var ( + rHash []byte + err error + ) + switch { + case ctx.IsSet("payment_hash"): + rHash, err = hex.DecodeString(ctx.String("payment_hash")) + case args.Present(): + rHash, err = hex.DecodeString(args.First()) + + args = args.Tail() + default: + return fmt.Errorf("payment hash argument missing") + } + + if err != nil { + return err + } + + if len(rHash) != 32 { + return fmt.Errorf("payment hash must be exactly 32 "+ + "bytes, is instead %d", len(rHash)) + } + + var jsonRoutes string + switch { + // The user is specifying the routes explicitly via the key word + // argument. + case ctx.IsSet("routes"): + jsonRoutes = ctx.String("routes") + + // The user is specifying the routes as a positional argument. + case args.Present() && args.First() != "-": + jsonRoutes = args.First() + + // The user is signalling that we should read stdin in order to parse + // the set of target routes. + case args.Present() && args.First() == "-": + b, err := ioutil.ReadAll(os.Stdin) + if err != nil { + return err + } + if len(b) == 0 { + return fmt.Errorf("queryroutes output is empty") + } + + jsonRoutes = string(b) + } + + // Try to parse the provided json both in the legacy QueryRoutes format + // that contains a list of routes and the single route BuildRoute + // format. + var route *lnrpc.Route + routes := &lnrpc.QueryRoutesResponse{} + err = jsonpb.UnmarshalString(jsonRoutes, routes) + if err == nil { + if len(routes.Routes) == 0 { + return fmt.Errorf("no routes provided") + } + + if len(routes.Routes) != 1 { + return fmt.Errorf("expected a single route, but got %v", + len(routes.Routes)) + } + + route = routes.Routes[0] + } else { + routes := &routerrpc.BuildRouteResponse{} + err = jsonpb.UnmarshalString(jsonRoutes, routes) + if err != nil { + return fmt.Errorf("unable to unmarshal json string "+ + "from incoming array of routes: %v", err) + } + + route = routes.Route + } + + req := &lnrpc.SendToRouteRequest{ + PaymentHash: rHash, + Route: route, + } + + return sendToRouteRequest(ctx, req) +} + +func sendToRouteRequest(ctx *cli.Context, req *lnrpc.SendToRouteRequest) error { + client, cleanUp := getClient(ctx) + defer cleanUp() + + paymentStream, err := client.SendToRoute(context.Background()) + if err != nil { + return err + } + + if err := paymentStream.Send(req); err != nil { + return err + } + + resp, err := paymentStream.Recv() + if err != nil { + return err + } + + printRespJSON(resp) + + return nil +} + +// ESC is the ASCII code for escape character +const ESC = 27 + +// clearCode defines a terminal escape code to clear the currently line and move +// the cursor up. +var clearCode = fmt.Sprintf("%c[%dA%c[2K", ESC, 1, ESC) + +// clearLines erases the last count lines in the terminal window. +func clearLines(count int) { + _, _ = fmt.Print(strings.Repeat(clearCode, count)) +} diff --git a/cmd/lncli/cmd_query_mission_control.go b/cmd/lncli/cmd_query_mission_control.go index 3c13a09b84..93cf71c9c9 100644 --- a/cmd/lncli/cmd_query_mission_control.go +++ b/cmd/lncli/cmd_query_mission_control.go @@ -1,10 +1,7 @@ -// +build routerrpc - package main import ( "context" - "encoding/hex" "github.com/lightningnetwork/lnd/lnrpc/routerrpc" @@ -31,51 +28,7 @@ func queryMissionControl(ctx *cli.Context) error { return err } - type displayNodeHistory struct { - Pubkey string - LastFailTime int64 - OtherSuccessProb float32 - } - - type displayPairHistory struct { - NodeFrom, NodeTo string - LastAttemptSuccessful bool - Timestamp int64 - SuccessProb float32 - MinPenalizeAmtSat int64 - } - - displayResp := struct { - Nodes []displayNodeHistory - Pairs []displayPairHistory - }{} - - for _, n := range snapshot.Nodes { - displayResp.Nodes = append( - displayResp.Nodes, - displayNodeHistory{ - Pubkey: hex.EncodeToString(n.Pubkey), - LastFailTime: n.LastFailTime, - OtherSuccessProb: n.OtherSuccessProb, - }, - ) - } - - for _, n := range snapshot.Pairs { - displayResp.Pairs = append( - displayResp.Pairs, - displayPairHistory{ - NodeFrom: hex.EncodeToString(n.NodeFrom), - NodeTo: hex.EncodeToString(n.NodeTo), - LastAttemptSuccessful: n.LastAttemptSuccessful, - Timestamp: n.Timestamp, - SuccessProb: n.SuccessProb, - MinPenalizeAmtSat: n.MinPenalizeAmtSat, - }, - ) - } - - printJSON(displayResp) + printRespJSON(snapshot) return nil } diff --git a/cmd/lncli/cmd_query_probability.go b/cmd/lncli/cmd_query_probability.go new file mode 100644 index 0000000000..4eaa075c98 --- /dev/null +++ b/cmd/lncli/cmd_query_probability.go @@ -0,0 +1,68 @@ +package main + +import ( + "context" + "fmt" + "strconv" + + "github.com/btcsuite/btcutil" + "github.com/lightningnetwork/lnd/lnrpc/routerrpc" + "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/routing/route" + "github.com/urfave/cli" +) + +var queryProbCommand = cli.Command{ + Name: "queryprob", + Category: "Payments", + Usage: "Estimate a success probability.", + ArgsUsage: "from-node to-node amt", + Action: actionDecorator(queryProb), +} + +func queryProb(ctx *cli.Context) error { + args := ctx.Args() + + if len(args) != 3 { + return cli.ShowCommandHelp(ctx, "queryprob") + } + + fromNode, err := route.NewVertexFromStr(args.Get(0)) + if err != nil { + return fmt.Errorf("invalid from node key: %v", err) + } + + toNode, err := route.NewVertexFromStr(args.Get(1)) + if err != nil { + return fmt.Errorf("invalid to node key: %v", err) + } + + amtSat, err := strconv.ParseUint(args.Get(2), 10, 64) + if err != nil { + return fmt.Errorf("invalid amt: %v", err) + } + + amtMsat := lnwire.NewMSatFromSatoshis( + btcutil.Amount(amtSat), + ) + + conn := getClientConn(ctx, false) + defer conn.Close() + + client := routerrpc.NewRouterClient(conn) + + req := &routerrpc.QueryProbabilityRequest{ + FromNode: fromNode[:], + ToNode: toNode[:], + AmtMsat: int64(amtMsat), + } + rpcCtx := context.Background() + response, err := client.QueryProbability(rpcCtx, req) + if err != nil { + return err + } + + printRespJSON(response) + + return nil +} diff --git a/cmd/lncli/cmd_reset_mission_control.go b/cmd/lncli/cmd_reset_mission_control.go index db37fd4b19..b571a9e9a4 100644 --- a/cmd/lncli/cmd_reset_mission_control.go +++ b/cmd/lncli/cmd_reset_mission_control.go @@ -1,5 +1,3 @@ -// +build routerrpc - package main import ( diff --git a/cmd/lncli/cmd_version.go b/cmd/lncli/cmd_version.go new file mode 100644 index 0000000000..578129ed9a --- /dev/null +++ b/cmd/lncli/cmd_version.go @@ -0,0 +1,54 @@ +package main + +import ( + "context" + "fmt" + + "github.com/lightningnetwork/lnd/build" + "github.com/lightningnetwork/lnd/lnrpc/lnclipb" + "github.com/lightningnetwork/lnd/lnrpc/verrpc" + "github.com/urfave/cli" +) + +var versionCommand = cli.Command{ + Name: "version", + Usage: "Display lncli and lnd version info.", + Description: ` + Returns version information about both lncli and lnd. If lncli is unable + to connect to lnd, the command fails but still prints the lncli version. + `, + Action: actionDecorator(version), +} + +func version(ctx *cli.Context) error { + conn := getClientConn(ctx, false) + defer conn.Close() + + versions := &lnclipb.VersionResponse{ + Lncli: &verrpc.Version{ + Commit: build.Commit, + CommitHash: build.CommitHash, + Version: build.Version(), + AppMajor: uint32(build.AppMajor), + AppMinor: uint32(build.AppMinor), + AppPatch: uint32(build.AppPatch), + AppPreRelease: build.AppPreRelease, + BuildTags: build.Tags(), + GoVersion: build.GoVersion, + }, + } + + client := verrpc.NewVersionerClient(conn) + + ctxb := context.Background() + lndVersion, err := client.GetVersion(ctxb, &verrpc.VersionRequest{}) + if err != nil { + printRespJSON(versions) + return fmt.Errorf("unable fetch version from lnd: %v", err) + } + versions.Lnd = lndVersion + + printRespJSON(versions) + + return nil +} diff --git a/cmd/lncli/commands.go b/cmd/lncli/commands.go index 208bd11555..91741af3ee 100644 --- a/cmd/lncli/commands.go +++ b/cmd/lncli/commands.go @@ -5,7 +5,6 @@ import ( "bytes" "context" "encoding/hex" - "encoding/json" "errors" "fmt" "io" @@ -20,10 +19,11 @@ import ( "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" - "github.com/golang/protobuf/jsonpb" - "github.com/golang/protobuf/proto" + "github.com/lightninglabs/protobuf-hex-display/json" + "github.com/lightninglabs/protobuf-hex-display/jsonpb" + "github.com/lightninglabs/protobuf-hex-display/proto" "github.com/lightningnetwork/lnd/lnrpc" - "github.com/lightningnetwork/lnd/lnrpc/routerrpc" + "github.com/lightningnetwork/lnd/routing/route" "github.com/lightningnetwork/lnd/walletunlocker" "github.com/urfave/cli" "golang.org/x/crypto/ssh/terminal" @@ -53,6 +53,7 @@ func printJSON(resp interface{}) { func printRespJSON(resp proto.Message) { jsonMarshaler := &jsonpb.Marshaler{ EmitDefaults: true, + OrigName: true, Indent: " ", } @@ -199,7 +200,7 @@ var sendCoinsCommand = cli.Command{ Usage: "Send groestlcoin on-chain to an address.", ArgsUsage: "addr amt", Description: ` - Send amt coins in satoshis to the BASE58 encoded groestlcoin address addr. + Send amt coins in satoshis to the base58 or bech32 encoded groestlcoin address addr. Fees used when sending the transaction can be specified via the --conf_target, or --sat_per_byte optional flags. @@ -208,8 +209,9 @@ var sendCoinsCommand = cli.Command{ `, Flags: []cli.Flag{ cli.StringFlag{ - Name: "addr", - Usage: "the BASE58 encoded groestlcoin address to send coins to on-chain", + Name: "addr", + Usage: "the base58 or bech32 encoded groestlcoin address to send coins " + + "to on-chain", }, cli.BoolFlag{ Name: "sweepall", @@ -571,255 +573,6 @@ func disconnectPeer(ctx *cli.Context) error { return nil } -// TODO(roasbeef): change default number of confirmations -var openChannelCommand = cli.Command{ - Name: "openchannel", - Category: "Channels", - Usage: "Open a channel to a node or an existing peer.", - Description: ` - Attempt to open a new channel to an existing peer with the key node-key - optionally blocking until the channel is 'open'. - - One can also connect to a node before opening a new channel to it by - setting its host:port via the --connect argument. For this to work, - the node_key must be provided, rather than the peer_id. This is optional. - - The channel will be initialized with local-amt satoshis local and push-amt - satoshis for the remote node. Note that specifying push-amt means you give that - amount to the remote node as part of the channel opening. Once the channel is open, - a channelPoint (txid:vout) of the funding output is returned. - - One can manually set the fee to be used for the funding transaction via either - the --conf_target or --sat_per_byte arguments. This is optional.`, - ArgsUsage: "node-key local-amt push-amt", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "node_key", - Usage: "the identity public key of the target node/peer " + - "serialized in compressed format", - }, - cli.StringFlag{ - Name: "connect", - Usage: "(optional) the host:port of the target node", - }, - cli.IntFlag{ - Name: "local_amt", - Usage: "the number of satoshis the wallet should commit to the channel", - }, - cli.IntFlag{ - Name: "push_amt", - Usage: "the number of satoshis to give the remote side " + - "as part of the initial commitment state, " + - "this is equivalent to first opening a " + - "channel and sending the remote party funds, " + - "but done all in one step", - }, - cli.BoolFlag{ - Name: "block", - Usage: "block and wait until the channel is fully open", - }, - cli.Int64Flag{ - Name: "conf_target", - Usage: "(optional) the number of blocks that the " + - "transaction *should* confirm in, will be " + - "used for fee estimation", - }, - cli.Int64Flag{ - Name: "sat_per_byte", - Usage: "(optional) a manual fee expressed in " + - "sat/byte that should be used when crafting " + - "the transaction", - }, - cli.BoolFlag{ - Name: "private", - Usage: "make the channel private, such that it won't " + - "be announced to the greater network, and " + - "nodes other than the two channel endpoints " + - "must be explicitly told about it to be able " + - "to route through it", - }, - cli.Int64Flag{ - Name: "min_htlc_msat", - Usage: "(optional) the minimum value we will require " + - "for incoming HTLCs on the channel", - }, - cli.Uint64Flag{ - Name: "remote_csv_delay", - Usage: "(optional) the number of blocks we will require " + - "our channel counterparty to wait before accessing " + - "its funds in case of unilateral close. If this is " + - "not set, we will scale the value according to the " + - "channel size", - }, - cli.Uint64Flag{ - Name: "min_confs", - Usage: "(optional) the minimum number of confirmations " + - "each one of your outputs used for the funding " + - "transaction must satisfy", - Value: 1, - }, - }, - Action: actionDecorator(openChannel), -} - -func openChannel(ctx *cli.Context) error { - // TODO(roasbeef): add deadline to context - ctxb := context.Background() - client, cleanUp := getClient(ctx) - defer cleanUp() - - args := ctx.Args() - var err error - - // Show command help if no arguments provided - if ctx.NArg() == 0 && ctx.NumFlags() == 0 { - cli.ShowCommandHelp(ctx, "openchannel") - return nil - } - - minConfs := int32(ctx.Uint64("min_confs")) - req := &lnrpc.OpenChannelRequest{ - TargetConf: int32(ctx.Int64("conf_target")), - SatPerByte: ctx.Int64("sat_per_byte"), - MinHtlcMsat: ctx.Int64("min_htlc_msat"), - RemoteCsvDelay: uint32(ctx.Uint64("remote_csv_delay")), - MinConfs: minConfs, - SpendUnconfirmed: minConfs == 0, - } - - switch { - case ctx.IsSet("node_key"): - nodePubHex, err := hex.DecodeString(ctx.String("node_key")) - if err != nil { - return fmt.Errorf("unable to decode node public key: %v", err) - } - req.NodePubkey = nodePubHex - - case args.Present(): - nodePubHex, err := hex.DecodeString(args.First()) - if err != nil { - return fmt.Errorf("unable to decode node public key: %v", err) - } - args = args.Tail() - req.NodePubkey = nodePubHex - default: - return fmt.Errorf("node id argument missing") - } - - // As soon as we can confirm that the node's node_key was set, rather - // than the peer_id, we can check if the host:port was also set to - // connect to it before opening the channel. - if req.NodePubkey != nil && ctx.IsSet("connect") { - addr := &lnrpc.LightningAddress{ - Pubkey: hex.EncodeToString(req.NodePubkey), - Host: ctx.String("connect"), - } - - req := &lnrpc.ConnectPeerRequest{ - Addr: addr, - Perm: false, - } - - // Check if connecting to the node was successful. - // We discard the peer id returned as it is not needed. - _, err := client.ConnectPeer(ctxb, req) - if err != nil && - !strings.Contains(err.Error(), "already connected") { - return err - } - } - - switch { - case ctx.IsSet("local_amt"): - req.LocalFundingAmount = int64(ctx.Int("local_amt")) - case args.Present(): - req.LocalFundingAmount, err = strconv.ParseInt(args.First(), 10, 64) - if err != nil { - return fmt.Errorf("unable to decode local amt: %v", err) - } - args = args.Tail() - default: - return fmt.Errorf("local amt argument missing") - } - - if ctx.IsSet("push_amt") { - req.PushSat = int64(ctx.Int("push_amt")) - } else if args.Present() { - req.PushSat, err = strconv.ParseInt(args.First(), 10, 64) - if err != nil { - return fmt.Errorf("unable to decode push amt: %v", err) - } - } - - req.Private = ctx.Bool("private") - - stream, err := client.OpenChannel(ctxb, req) - if err != nil { - return err - } - - for { - resp, err := stream.Recv() - if err == io.EOF { - return nil - } else if err != nil { - return err - } - - switch update := resp.Update.(type) { - case *lnrpc.OpenStatusUpdate_ChanPending: - txid, err := chainhash.NewHash(update.ChanPending.Txid) - if err != nil { - return err - } - - printJSON(struct { - FundingTxid string `json:"funding_txid"` - }{ - FundingTxid: txid.String(), - }, - ) - - if !ctx.Bool("block") { - return nil - } - - case *lnrpc.OpenStatusUpdate_ChanOpen: - channelPoint := update.ChanOpen.ChannelPoint - - // A channel point's funding txid can be get/set as a - // byte slice or a string. In the case it is a string, - // decode it. - var txidHash []byte - switch channelPoint.GetFundingTxid().(type) { - case *lnrpc.ChannelPoint_FundingTxidBytes: - txidHash = channelPoint.GetFundingTxidBytes() - case *lnrpc.ChannelPoint_FundingTxidStr: - s := channelPoint.GetFundingTxidStr() - h, err := chainhash.NewHashFromStr(s) - if err != nil { - return err - } - - txidHash = h[:] - } - - txid, err := chainhash.NewHash(txidHash) - if err != nil { - return err - } - - index := channelPoint.OutputIndex - printJSON(struct { - ChannelPoint string `json:"channel_point"` - }{ - ChannelPoint: fmt.Sprintf("%v:%v", txid, index), - }, - ) - } - } -} - // TODO(roasbeef): also allow short relative channel ID. var closeChannelCommand = cli.Command{ @@ -834,11 +587,16 @@ var closeChannelCommand = cli.Command{ transaction will be broadcast to the network. As a result, any settled funds will be time locked for a few blocks before they can be spent. - In the case of a cooperative closure, One can manually set the fee to + In the case of a cooperative closure, one can manually set the fee to be used for the closing transaction via either the --conf_target or --sat_per_byte arguments. This will be the starting value used during fee negotiation. This is optional. + In the case of a cooperative closure, one can manually set the address + to deliver funds to upon closure. This is optional, and may only be used + if an upfront shutdown address has not already been set. If neither are + set the funds will be delivered to a new wallet address. + To view which funding_txids/output_indexes can be used for a channel close, see the channel_point values within the listchannels command output. The format for a channel_point is 'funding_txid:output_index'.`, @@ -873,6 +631,13 @@ var closeChannelCommand = cli.Command{ "sat/byte that should be used when crafting " + "the transaction", }, + cli.StringFlag{ + Name: "delivery_addr", + Usage: "(optional) an address to deliver funds " + + "upon cooperative channel closing, may only " + + "be used if an upfront shutdown addresss is not" + + "already set", + }, }, Action: actionDecorator(closeChannel), } @@ -894,10 +659,11 @@ func closeChannel(ctx *cli.Context) error { // TODO(roasbeef): implement time deadline within server req := &lnrpc.CloseChannelRequest{ - ChannelPoint: channelPoint, - Force: ctx.Bool("force"), - TargetConf: int32(ctx.Int64("conf_target")), - SatPerByte: ctx.Int64("sat_per_byte"), + ChannelPoint: channelPoint, + Force: ctx.Bool("force"), + TargetConf: int32(ctx.Int64("conf_target")), + SatPerByte: ctx.Int64("sat_per_byte"), + DeliveryAddress: ctx.String("delivery_addr"), } // After parsing the request, we'll spin up a goroutine that will @@ -1302,7 +1068,13 @@ var listPeersCommand = cli.Command{ Name: "listpeers", Category: "Peers", Usage: "List all active, currently connected peers.", - Action: actionDecorator(listPeers), + Flags: []cli.Flag{ + cli.BoolFlag{ + Name: "list_errors", + Usage: "list a full set of most recent errors for the peer", + }, + }, + Action: actionDecorator(listPeers), } func listPeers(ctx *cli.Context) error { @@ -1310,7 +1082,11 @@ func listPeers(ctx *cli.Context) error { client, cleanUp := getClient(ctx) defer cleanUp() - req := &lnrpc.ListPeersRequest{} + // By default, we display a single error on the cli. If the user + // specifically requests a full error set, then we will provide it. + req := &lnrpc.ListPeersRequest{ + LatestError: !ctx.IsSet("list_errors"), + } resp, err := client.ListPeers(ctxb, req) if err != nil { return err @@ -1400,6 +1176,86 @@ func create(ctx *cli.Context) error { client, cleanUp := getWalletUnlockerClient(ctx) defer cleanUp() + var ( + chanBackups *lnrpc.ChanBackupSnapshot + + // We use var restoreSCB to track if we will be including an SCB + // recovery in the init wallet request. + restoreSCB = false + ) + + backups, err := parseChanBackups(ctx) + + // We'll check to see if the user provided any static channel backups (SCB), + // if so, we will warn the user that SCB recovery closes all open channels + // and ask them to confirm their intention. + // If the user agrees, we'll add the SCB recovery onto the final init wallet + // request. + switch { + // parseChanBackups returns an errMissingBackup error (which we ignore) if + // the user did not request a SCB recovery. + case err == errMissingChanBackup: + + // Passed an invalid channel backup file. + case err != nil: + return fmt.Errorf("unable to parse chan backups: %v", err) + + // We have an SCB recovery option with a valid backup file. + default: + + warningLoop: + for { + + fmt.Println() + fmt.Printf("WARNING: You are attempting to restore from a " + + "static channel backup (SCB) file.\nThis action will CLOSE " + + "all currently open channels, and you will pay on-chain fees." + + "\n\nAre you sure you want to recover funds from a" + + " static channel backup? (Enter y/n): ") + + reader := bufio.NewReader(os.Stdin) + answer, err := reader.ReadString('\n') + if err != nil { + return err + } + + answer = strings.TrimSpace(answer) + answer = strings.ToLower(answer) + + switch answer { + case "y": + restoreSCB = true + break warningLoop + case "n": + fmt.Println("Aborting SCB recovery") + return nil + } + } + } + + // Proceed with SCB recovery. + if restoreSCB { + fmt.Println("Static Channel Backup (SCB) recovery selected!") + if backups != nil { + switch { + case backups.GetChanBackups() != nil: + singleBackup := backups.GetChanBackups() + chanBackups = &lnrpc.ChanBackupSnapshot{ + SingleChanBackups: singleBackup, + } + + case backups.GetMultiChanBackup() != nil: + multiBackup := backups.GetMultiChanBackup() + chanBackups = &lnrpc.ChanBackupSnapshot{ + MultiChanBackup: &lnrpc.MultiChanBackup{ + MultiChanBackup: multiBackup, + }, + } + } + } + + } + walletPassword, err := capturePassword( "Input wallet password: ", false, walletunlocker.ValidatePassword, ) @@ -1568,34 +1424,6 @@ mnemonicCheck: fmt.Println("\n!!!YOU MUST WRITE DOWN THIS SEED TO BE ABLE TO " + "RESTORE THE WALLET!!!") - // We'll also check to see if they provided any static channel backups, - // if so, then we'll also tack these onto the final init wallet request. - // We can ignore the errMissingChanBackup error as it's an optional - // field. - backups, err := parseChanBackups(ctx) - if err != nil && err != errMissingChanBackup { - return fmt.Errorf("unable to parse chan backups: %v", err) - } - - var chanBackups *lnrpc.ChanBackupSnapshot - if backups != nil { - switch { - case backups.GetChanBackups() != nil: - singleBackup := backups.GetChanBackups() - chanBackups = &lnrpc.ChanBackupSnapshot{ - SingleChanBackups: singleBackup, - } - - case backups.GetMultiChanBackup() != nil: - multiBackup := backups.GetMultiChanBackup() - chanBackups = &lnrpc.ChanBackupSnapshot{ - MultiChanBackup: &lnrpc.MultiChanBackup{ - MultiChanBackup: multiBackup, - }, - } - } - } - // With either the user's prior cipher seed, or a newly generated one, // we'll go ahead and initialize the wallet. req := &lnrpc.InitWalletRequest{ @@ -1682,6 +1510,16 @@ var unlockCommand = cli.Command{ "maximum number of consecutive, unused " + "addresses ever generated by the wallet.", }, + cli.BoolFlag{ + Name: "stdin", + Usage: "read password from standard input instead of " + + "prompting for it. THIS IS CONSIDERED TO " + + "BE DANGEROUS if the password is located in " + + "a file that can be read by another user. " + + "This flag should only be used in " + + "combination with some sort of password " + + "manager or secrets vault.", + }, }, Action: actionDecorator(unlock), } @@ -1691,12 +1529,37 @@ func unlock(ctx *cli.Context) error { client, cleanUp := getWalletUnlockerClient(ctx) defer cleanUp() - fmt.Printf("Input wallet password: ") - pw, err := terminal.ReadPassword(int(syscall.Stdin)) + var ( + pw []byte + err error + ) + switch { + // Read the password from standard in as if it were a file. This should + // only be used if the password is piped into lncli from some sort of + // password manager. If the user types the password instead, it will be + // echoed in the console. + case ctx.IsSet("stdin"): + reader := bufio.NewReader(os.Stdin) + pw, err = reader.ReadBytes('\n') + + // Remove carriage return and newline characters. + pw = bytes.Trim(pw, "\r\n") + + // Read the password from a terminal by default. This requires the + // terminal to be a real tty and will fail if a string is piped into + // lncli. + default: + fmt.Printf("Input wallet password: ") + + // The variable syscall.Stdin is of a different type in the + // Windows API that's why we need the explicit cast. And of + // course the linter doesn't like it either. + pw, err = terminal.ReadPassword(int(syscall.Stdin)) // nolint:unconvert + fmt.Println() + } if err != nil { return err } - fmt.Println() args := ctx.Args() @@ -1842,11 +1705,6 @@ var getInfoCommand = cli.Command{ Action: actionDecorator(getInfo), } -type chain struct { - Chain string `json:"chain"` - Network string `json:"network"` -} - func getInfo(ctx *cli.Context) error { ctxb := context.Background() client, cleanUp := getClient(ctx) @@ -1858,51 +1716,7 @@ func getInfo(ctx *cli.Context) error { return err } - chains := make([]chain, len(resp.Chains)) - for i, c := range resp.Chains { - chains[i] = chain{ - Chain: c.Chain, - Network: c.Network, - } - } - - // We print a struct that mimics the proto definition of GetInfoResponse - // but has a better ordering for the same list of fields. - printJSON(struct { - Version string `json:"version"` - IdentityPubkey string `json:"identity_pubkey"` - Alias string `json:"alias"` - Color string `json:"color"` - NumPendingChannels uint32 `json:"num_pending_channels"` - NumActiveChannels uint32 `json:"num_active_channels"` - NumInactiveChannels uint32 `json:"num_inactive_channels"` - NumPeers uint32 `json:"num_peers"` - BlockHeight uint32 `json:"block_height"` - BlockHash string `json:"block_hash"` - BestHeaderTimestamp int64 `json:"best_header_timestamp"` - SyncedToChain bool `json:"synced_to_chain"` - SyncedToGraph bool `json:"synced_to_graph"` - Testnet bool `json:"testnet"` - Chains []chain `json:"chains"` - Uris []string `json:"uris"` - }{ - Version: resp.Version, - IdentityPubkey: resp.IdentityPubkey, - Alias: resp.Alias, - Color: resp.Color, - NumPendingChannels: resp.NumPendingChannels, - NumActiveChannels: resp.NumActiveChannels, - NumInactiveChannels: resp.NumInactiveChannels, - NumPeers: resp.NumPeers, - BlockHeight: resp.BlockHeight, - BlockHash: resp.BlockHash, - BestHeaderTimestamp: resp.BestHeaderTimestamp, - SyncedToChain: resp.SyncedToChain, - SyncedToGraph: resp.SyncedToGraph, - Testnet: resp.Testnet, - Chains: chains, - Uris: resp.Uris, - }) + printRespJSON(resp) return nil } @@ -1950,6 +1764,12 @@ var listChannelsCommand = cli.Command{ Name: "private_only", Usage: "only list channels which are currently private", }, + cli.StringFlag{ + Name: "peer", + Usage: "(optional) only display channels with a " + + "particular peer, accepts 66-byte, " + + "hex-encoded pubkeys", + }, }, Action: actionDecorator(listChannels), } @@ -1959,11 +1779,26 @@ func listChannels(ctx *cli.Context) error { client, cleanUp := getClient(ctx) defer cleanUp() + peer := ctx.String("peer") + + // If the user requested channels with a particular key, parse the + // provided pubkey. + var peerKey []byte + if len(peer) > 0 { + pk, err := route.NewVertexFromStr(peer) + if err != nil { + return fmt.Errorf("invalid --peer pubkey: %v", err) + } + + peerKey = pk[:] + } + req := &lnrpc.ListChannelsRequest{ ActiveOnly: ctx.Bool("active_only"), InactiveOnly: ctx.Bool("inactive_only"), PublicOnly: ctx.Bool("public_only"), PrivateOnly: ctx.Bool("private_only"), + Peer: peerKey, } resp, err := client.ListChannels(ctxb, req) @@ -1971,8 +1806,6 @@ func listChannels(ctx *cli.Context) error { return err } - // TODO(roasbeef): defer close the client for the all - printRespJSON(resp) return nil @@ -2040,807 +1873,105 @@ func closedChannels(ctx *cli.Context) error { return nil } -var cltvLimitFlag = cli.UintFlag{ - Name: "cltv_limit", - Usage: "the maximum time lock that may be used for " + - "this payment", -} - -// paymentFlags returns common flags for sendpayment and payinvoice. -func paymentFlags() []cli.Flag { - return []cli.Flag{ - cli.StringFlag{ - Name: "pay_req", - Usage: "a zpay32 encoded payment request to fulfill", - }, - cli.Int64Flag{ - Name: "fee_limit", - Usage: "maximum fee allowed in satoshis when " + - "sending the payment", - }, - cli.Int64Flag{ - Name: "fee_limit_percent", - Usage: "percentage of the payment's amount used as " + - "the maximum fee allowed when sending the " + - "payment", - }, - cltvLimitFlag, - cli.Uint64Flag{ - Name: "outgoing_chan_id", - Usage: "short channel id of the outgoing channel to " + - "use for the first hop of the payment", - Value: 0, - }, +var describeGraphCommand = cli.Command{ + Name: "describegraph", + Category: "Graph", + Description: "Prints a human readable version of the known channel " + + "graph from the PoV of the node", + Usage: "Describe the network graph.", + Flags: []cli.Flag{ cli.BoolFlag{ - Name: "force, f", - Usage: "will skip payment request confirmation", + Name: "include_unannounced", + Usage: "If set, unannounced channels will be included in the " + + "graph. Unannounced channels are both private channels, and " + + "public channels that are not yet announced to the network.", }, - } + }, + Action: actionDecorator(describeGraph), } -var sendPaymentCommand = cli.Command{ - Name: "sendpayment", - Category: "Payments", - Usage: "Send a payment over lightning.", - Description: ` - Send a payment over Lightning. One can either specify the full - parameters of the payment, or just use a payment request which encodes - all the payment details. - - If payment isn't manually specified, then only a payment request needs - to be passed using the --pay_req argument. - - If the payment *is* manually specified, then all four alternative - arguments need to be specified in order to complete the payment: - * --dest=N - * --amt=A - * --final_cltv_delta=T - * --payment_hash=H - - The --debug_send flag is provided for usage *purely* in test - environments. If specified, then the payment hash isn't required, as - it'll use the hash of all zeroes. This mode allows one to quickly test - payment connectivity without having to create an invoice at the - destination. - `, - ArgsUsage: "dest amt payment_hash final_cltv_delta | --pay_req=[payment request]", - Flags: append(paymentFlags(), - cli.StringFlag{ - Name: "dest, d", - Usage: "the compressed identity pubkey of the " + - "payment recipient", - }, - cli.Int64Flag{ - Name: "amt, a", - Usage: "number of satoshis to send", - }, - cli.StringFlag{ - Name: "payment_hash, r", - Usage: "the hash to use within the payment's HTLC", - }, - cli.BoolFlag{ - Name: "debug_send", - Usage: "use the debug rHash when sending the HTLC", - }, - cli.Int64Flag{ - Name: "final_cltv_delta", - Usage: "the number of blocks the last hop has to reveal the preimage", - }, - ), - Action: sendPayment, -} +func describeGraph(ctx *cli.Context) error { + client, cleanUp := getClient(ctx) + defer cleanUp() -// retrieveFeeLimit retrieves the fee limit based on the different fee limit -// flags passed. -func retrieveFeeLimit(ctx *cli.Context) (*lnrpc.FeeLimit, error) { - switch { - case ctx.IsSet("fee_limit") && ctx.IsSet("fee_limit_percent"): - return nil, fmt.Errorf("either fee_limit or fee_limit_percent " + - "can be set, but not both") - case ctx.IsSet("fee_limit"): - return &lnrpc.FeeLimit{ - Limit: &lnrpc.FeeLimit_Fixed{ - Fixed: ctx.Int64("fee_limit"), - }, - }, nil - case ctx.IsSet("fee_limit_percent"): - return &lnrpc.FeeLimit{ - Limit: &lnrpc.FeeLimit_Percent{ - Percent: ctx.Int64("fee_limit_percent"), - }, - }, nil - } - - // Since the fee limit flags aren't required, we don't return an error - // if they're not set. - return nil, nil -} - -func confirmPayReq(resp *lnrpc.PayReq, amt int64) error { - fmt.Printf("Description: %v\n", resp.GetDescription()) - fmt.Printf("Amount (in satoshis): %v\n", amt) - fmt.Printf("Destination: %v\n", resp.GetDestination()) - - confirm := promptForConfirmation("Confirm payment (yes/no): ") - if !confirm { - return fmt.Errorf("payment not confirmed") - } - - return nil -} - -func sendPayment(ctx *cli.Context) error { - // Show command help if no arguments provided - if ctx.NArg() == 0 && ctx.NumFlags() == 0 { - cli.ShowCommandHelp(ctx, "sendpayment") - return nil - } - - // If a payment request was provided, we can exit early since all of the - // details of the payment are encoded within the request. - if ctx.IsSet("pay_req") { - req := &lnrpc.SendRequest{ - PaymentRequest: ctx.String("pay_req"), - Amt: ctx.Int64("amt"), - } - - return sendPaymentRequest(ctx, req) - } - - var ( - destNode []byte - amount int64 - err error - ) - - args := ctx.Args() - - switch { - case ctx.IsSet("dest"): - destNode, err = hex.DecodeString(ctx.String("dest")) - case args.Present(): - destNode, err = hex.DecodeString(args.First()) - args = args.Tail() - default: - return fmt.Errorf("destination txid argument missing") - } - if err != nil { - return err - } - - if len(destNode) != 33 { - return fmt.Errorf("dest node pubkey must be exactly 33 bytes, is "+ - "instead: %v", len(destNode)) - } - - if ctx.IsSet("amt") { - amount = ctx.Int64("amt") - } else if args.Present() { - amount, err = strconv.ParseInt(args.First(), 10, 64) - args = args.Tail() - if err != nil { - return fmt.Errorf("unable to decode payment amount: %v", err) - } - } - - req := &lnrpc.SendRequest{ - Dest: destNode, - Amt: amount, - } - - if ctx.Bool("debug_send") && (ctx.IsSet("payment_hash") || args.Present()) { - return fmt.Errorf("do not provide a payment hash with debug send") - } else if !ctx.Bool("debug_send") { - var rHash []byte - - switch { - case ctx.IsSet("payment_hash"): - rHash, err = hex.DecodeString(ctx.String("payment_hash")) - case args.Present(): - rHash, err = hex.DecodeString(args.First()) - args = args.Tail() - default: - return fmt.Errorf("payment hash argument missing") - } - - if err != nil { - return err - } - if len(rHash) != 32 { - return fmt.Errorf("payment hash must be exactly 32 "+ - "bytes, is instead %v", len(rHash)) - } - req.PaymentHash = rHash - - switch { - case ctx.IsSet("final_cltv_delta"): - req.FinalCltvDelta = int32(ctx.Int64("final_cltv_delta")) - case args.Present(): - delta, err := strconv.ParseInt(args.First(), 10, 64) - if err != nil { - return err - } - req.FinalCltvDelta = int32(delta) - } - } - - return sendPaymentRequest(ctx, req) -} - -func sendPaymentRequest(ctx *cli.Context, req *lnrpc.SendRequest) error { - client, cleanUp := getClient(ctx) - defer cleanUp() - - // First, we'll retrieve the fee limit value passed since it can apply - // to both ways of sending payments (with the payment request or - // providing the details manually). - feeLimit, err := retrieveFeeLimit(ctx) - if err != nil { - return err - } - req.FeeLimit = feeLimit - - req.OutgoingChanId = ctx.Uint64("outgoing_chan_id") - req.CltvLimit = uint32(ctx.Int(cltvLimitFlag.Name)) - - amt := req.Amt - - if req.PaymentRequest != "" { - req := &lnrpc.PayReqString{PayReq: req.PaymentRequest} - resp, err := client.DecodePayReq(context.Background(), req) - if err != nil { - return err - } - - invoiceAmt := resp.GetNumSatoshis() - if invoiceAmt != 0 { - amt = invoiceAmt - } - - if !ctx.Bool("force") { - err := confirmPayReq(resp, amt) - if err != nil { - return err - } - } - } - - paymentStream, err := client.SendPayment(context.Background()) - if err != nil { - return err - } - - if err := paymentStream.Send(req); err != nil { - return err - } - - resp, err := paymentStream.Recv() - if err != nil { - return err - } - - paymentStream.CloseSend() - - printJSON(struct { - E string `json:"payment_error"` - P string `json:"payment_preimage"` - R *lnrpc.Route `json:"payment_route"` - }{ - E: resp.PaymentError, - P: hex.EncodeToString(resp.PaymentPreimage), - R: resp.PaymentRoute, - }) - - // If we get a payment error back, we pass an error - // up to main which eventually calls fatal() and returns - // with a non-zero exit code. - if resp.PaymentError != "" { - return errors.New(resp.PaymentError) - } - - return nil -} - -var payInvoiceCommand = cli.Command{ - Name: "payinvoice", - Category: "Payments", - Usage: "Pay an invoice over lightning.", - ArgsUsage: "pay_req", - Flags: append(paymentFlags(), - cli.Int64Flag{ - Name: "amt", - Usage: "(optional) number of satoshis to fulfill the " + - "invoice", - }, - ), - Action: actionDecorator(payInvoice), -} - -func payInvoice(ctx *cli.Context) error { - args := ctx.Args() - - var payReq string - switch { - case ctx.IsSet("pay_req"): - payReq = ctx.String("pay_req") - case args.Present(): - payReq = args.First() - default: - return fmt.Errorf("pay_req argument missing") - } - - req := &lnrpc.SendRequest{ - PaymentRequest: payReq, - Amt: ctx.Int64("amt"), - } - - return sendPaymentRequest(ctx, req) -} - -var sendToRouteCommand = cli.Command{ - Name: "sendtoroute", - Category: "Payments", - Usage: "Send a payment over a predefined route.", - Description: ` - Send a payment over Lightning using a specific route. One must specify - the route to attempt and the payment hash. This command can even - be chained with the response to queryroutes or buildroute. This command - can be used to implement channel rebalancing by crafting a self-route, - or even atomic swaps using a self-route that crosses multiple chains. - - There are three ways to specify a route: - * using the --routes parameter to manually specify a JSON encoded - route in the format of the return value of queryroutes or - buildroute: - (lncli sendtoroute --payment_hash= --routes=) - - * passing the route as a positional argument: - (lncli sendtoroute --payment_hash=pay_hash ) - - * or reading in the route from stdin, which can allow chaining the - response from queryroutes or buildroute, or even read in a file - with a pre-computed route: - (lncli queryroutes --args.. | lncli sendtoroute --payment_hash= - - - notice the '-' at the end, which signals that lncli should read - the route in from stdin - `, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "payment_hash, pay_hash", - Usage: "the hash to use within the payment's HTLC", - }, - cli.StringFlag{ - Name: "routes, r", - Usage: "a json array string in the format of the response " + - "of queryroutes that denotes which routes to use", - }, - }, - Action: sendToRoute, -} - -func sendToRoute(ctx *cli.Context) error { - // Show command help if no arguments provided. - if ctx.NArg() == 0 && ctx.NumFlags() == 0 { - cli.ShowCommandHelp(ctx, "sendtoroute") - return nil - } - - args := ctx.Args() - - var ( - rHash []byte - err error - ) - switch { - case ctx.IsSet("payment_hash"): - rHash, err = hex.DecodeString(ctx.String("payment_hash")) - case args.Present(): - rHash, err = hex.DecodeString(args.First()) - - args = args.Tail() - default: - return fmt.Errorf("payment hash argument missing") - } - - if err != nil { - return err - } - - if len(rHash) != 32 { - return fmt.Errorf("payment hash must be exactly 32 "+ - "bytes, is instead %d", len(rHash)) - } - - var jsonRoutes string - switch { - // The user is specifying the routes explicitly via the key word - // argument. - case ctx.IsSet("routes"): - jsonRoutes = ctx.String("routes") - - // The user is specifying the routes as a positional argument. - case args.Present() && args.First() != "-": - jsonRoutes = args.First() - - // The user is signalling that we should read stdin in order to parse - // the set of target routes. - case args.Present() && args.First() == "-": - b, err := ioutil.ReadAll(os.Stdin) - if err != nil { - return err - } - if len(b) == 0 { - return fmt.Errorf("queryroutes output is empty") - } - - jsonRoutes = string(b) - } - - // Try to parse the provided json both in the legacy QueryRoutes format - // that contains a list of routes and the single route BuildRoute - // format. - var route *lnrpc.Route - routes := &lnrpc.QueryRoutesResponse{} - err = jsonpb.UnmarshalString(jsonRoutes, routes) - if err == nil { - if len(routes.Routes) == 0 { - return fmt.Errorf("no routes provided") - } - - if len(routes.Routes) != 1 { - return fmt.Errorf("expected a single route, but got %v", - len(routes.Routes)) - } - - route = routes.Routes[0] - } else { - routes := &routerrpc.BuildRouteResponse{} - err = jsonpb.UnmarshalString(jsonRoutes, routes) - if err != nil { - return fmt.Errorf("unable to unmarshal json string "+ - "from incoming array of routes: %v", err) - } - - route = routes.Route - } - - req := &lnrpc.SendToRouteRequest{ - PaymentHash: rHash, - Route: route, - } - - return sendToRouteRequest(ctx, req) -} - -func sendToRouteRequest(ctx *cli.Context, req *lnrpc.SendToRouteRequest) error { - client, cleanUp := getClient(ctx) - defer cleanUp() - - paymentStream, err := client.SendToRoute(context.Background()) - if err != nil { - return err - } - - if err := paymentStream.Send(req); err != nil { - return err - } - - resp, err := paymentStream.Recv() - if err != nil { - return err - } - - printJSON(struct { - E string `json:"payment_error"` - P string `json:"payment_preimage"` - R *lnrpc.Route `json:"payment_route"` - }{ - E: resp.PaymentError, - P: hex.EncodeToString(resp.PaymentPreimage), - R: resp.PaymentRoute, - }) - - return nil -} - -var addInvoiceCommand = cli.Command{ - Name: "addinvoice", - Category: "Payments", - Usage: "Add a new invoice.", - Description: ` - Add a new invoice, expressing intent for a future payment. - - Invoices without an amount can be created by not supplying any - parameters or providing an amount of 0. These invoices allow the payee - to specify the amount of satoshis they wish to send.`, - ArgsUsage: "value preimage", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "memo", - Usage: "a description of the payment to attach along " + - "with the invoice (default=\"\")", - }, - cli.StringFlag{ - Name: "receipt", - Usage: "an optional cryptographic receipt of payment", - }, - cli.StringFlag{ - Name: "preimage", - Usage: "the hex-encoded preimage (32 byte) which will " + - "allow settling an incoming HTLC payable to this " + - "preimage. If not set, a random preimage will be " + - "created.", - }, - cli.Int64Flag{ - Name: "amt", - Usage: "the amt of satoshis in this invoice", - }, - cli.StringFlag{ - Name: "description_hash", - Usage: "SHA-256 hash of the description of the payment. " + - "Used if the purpose of payment cannot naturally " + - "fit within the memo. If provided this will be " + - "used instead of the description(memo) field in " + - "the encoded invoice.", - }, - cli.StringFlag{ - Name: "fallback_addr", - Usage: "fallback on-chain address that can be used in " + - "case the lightning payment fails", - }, - cli.Int64Flag{ - Name: "expiry", - Usage: "the invoice's expiry time in seconds. If not " + - "specified an expiry of 3600 seconds (1 hour) " + - "is implied.", - }, - cli.BoolTFlag{ - Name: "private", - Usage: "encode routing hints in the invoice with " + - "private channels in order to assist the " + - "payer in reaching you", - }, - }, - Action: actionDecorator(addInvoice), -} - -func addInvoice(ctx *cli.Context) error { - var ( - preimage []byte - descHash []byte - receipt []byte - amt int64 - err error - ) - - client, cleanUp := getClient(ctx) - defer cleanUp() - - args := ctx.Args() - - switch { - case ctx.IsSet("amt"): - amt = ctx.Int64("amt") - case args.Present(): - amt, err = strconv.ParseInt(args.First(), 10, 64) - args = args.Tail() - if err != nil { - return fmt.Errorf("unable to decode amt argument: %v", err) - } - } - - switch { - case ctx.IsSet("preimage"): - preimage, err = hex.DecodeString(ctx.String("preimage")) - case args.Present(): - preimage, err = hex.DecodeString(args.First()) - } - - if err != nil { - return fmt.Errorf("unable to parse preimage: %v", err) - } - - descHash, err = hex.DecodeString(ctx.String("description_hash")) - if err != nil { - return fmt.Errorf("unable to parse description_hash: %v", err) - } - - receipt, err = hex.DecodeString(ctx.String("receipt")) - if err != nil { - return fmt.Errorf("unable to parse receipt: %v", err) - } - - invoice := &lnrpc.Invoice{ - Memo: ctx.String("memo"), - Receipt: receipt, - RPreimage: preimage, - Value: amt, - DescriptionHash: descHash, - FallbackAddr: ctx.String("fallback_addr"), - Expiry: ctx.Int64("expiry"), - Private: ctx.Bool("private"), + req := &lnrpc.ChannelGraphRequest{ + IncludeUnannounced: ctx.Bool("include_unannounced"), } - resp, err := client.AddInvoice(context.Background(), invoice) + graph, err := client.DescribeGraph(context.Background(), req) if err != nil { return err } - printJSON(struct { - RHash string `json:"r_hash"` - PayReq string `json:"pay_req"` - AddIndex uint64 `json:"add_index"` - }{ - RHash: hex.EncodeToString(resp.RHash), - PayReq: resp.PaymentRequest, - AddIndex: resp.AddIndex, - }) - + printRespJSON(graph) return nil } -var lookupInvoiceCommand = cli.Command{ - Name: "lookupinvoice", - Category: "Payments", - Usage: "Lookup an existing invoice by its payment hash.", - ArgsUsage: "rhash", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "rhash", - Usage: "the 32 byte payment hash of the invoice to query for, the hash " + - "should be a hex-encoded string", - }, - }, - Action: actionDecorator(lookupInvoice), +var getNodeMetricsCommand = cli.Command{ + Name: "getnodemetrics", + Category: "Graph", + Description: "Prints out node metrics calculated from the current graph", + Usage: "Get node metrics.", + Action: actionDecorator(getNodeMetrics), } -func lookupInvoice(ctx *cli.Context) error { +func getNodeMetrics(ctx *cli.Context) error { client, cleanUp := getClient(ctx) defer cleanUp() - var ( - rHash []byte - err error - ) - - switch { - case ctx.IsSet("rhash"): - rHash, err = hex.DecodeString(ctx.String("rhash")) - case ctx.Args().Present(): - rHash, err = hex.DecodeString(ctx.Args().First()) - default: - return fmt.Errorf("rhash argument missing") - } - - if err != nil { - return fmt.Errorf("unable to decode rhash argument: %v", err) - } - - req := &lnrpc.PaymentHash{ - RHash: rHash, + req := &lnrpc.NodeMetricsRequest{ + Types: []lnrpc.NodeMetricType{lnrpc.NodeMetricType_BETWEENNESS_CENTRALITY}, } - invoice, err := client.LookupInvoice(context.Background(), req) + nodeMetrics, err := client.GetNodeMetrics(context.Background(), req) if err != nil { return err } - printRespJSON(invoice) - + printRespJSON(nodeMetrics) return nil } -var listInvoicesCommand = cli.Command{ - Name: "listinvoices", +var listPaymentsCommand = cli.Command{ + Name: "listpayments", Category: "Payments", - Usage: "List all invoices currently stored within the database. Any " + - "active debug invoices are ignored.", - Description: ` - This command enables the retrieval of all invoices currently stored - within the database. It has full support for paginationed responses, - allowing users to query for specific invoices through their add_index. - This can be done by using either the first_index_offset or - last_index_offset fields included in the response as the index_offset of - the next request. The reversed flag is set by default in order to - paginate backwards. If you wish to paginate forwards, you must - explicitly set the flag to false. If none of the parameters are - specified, then the last 100 invoices will be returned. - - For example: if you have 200 invoices, "lncli listinvoices" will return - the last 100 created. If you wish to retrieve the previous 100, the - first_offset_index of the response can be used as the index_offset of - the next listinvoices request.`, + Usage: "List all outgoing payments.", + Description: "This command enables the retrieval of payments stored " + + "in the database. Pagination is supported by the usage of " + + "index_offset in combination with the paginate_forwards flag. " + + "Reversed pagination is enabled by default to receive " + + "current payments first. Pagination can be resumed by using " + + "the returned last_index_offset (for forwards order), or " + + "first_index_offset (for reversed order) as the offset_index. ", Flags: []cli.Flag{ cli.BoolFlag{ - Name: "pending_only", - Usage: "toggles if all invoices should be returned, " + - "or only those that are currently unsettled", + Name: "include_incomplete", + Usage: "if set to true, payments still in flight (or " + + "failed) will be returned as well, keeping" + + "indices for payments the same as without " + + "the flag", }, - cli.Uint64Flag{ + cli.UintFlag{ Name: "index_offset", - Usage: "the index of an invoice that will be used as " + - "either the start or end of a query to " + - "determine which invoices should be returned " + - "in the response", + Usage: "The index of a payment that will be used as " + + "either the start (in forwards mode) or end " + + "(in reverse mode) of a query to determine " + + "which payments should be returned in the " + + "response, where the index_offset is " + + "excluded. If index_offset is set to zero in " + + "reversed mode, the query will end with the " + + "last payment made.", + }, + cli.UintFlag{ + Name: "max_payments", + Usage: "the max number of payments to return, by " + + "default, all completed payments are returned", }, - cli.Uint64Flag{ - Name: "max_invoices", - Usage: "the max number of invoices to return", - }, - cli.BoolTFlag{ - Name: "reversed", - Usage: "if set, the invoices returned precede the " + - "given index_offset, allowing backwards " + - "pagination", - }, - }, - Action: actionDecorator(listInvoices), -} - -func listInvoices(ctx *cli.Context) error { - client, cleanUp := getClient(ctx) - defer cleanUp() - - req := &lnrpc.ListInvoiceRequest{ - PendingOnly: ctx.Bool("pending_only"), - IndexOffset: ctx.Uint64("index_offset"), - NumMaxInvoices: ctx.Uint64("max_invoices"), - Reversed: ctx.Bool("reversed"), - } - - invoices, err := client.ListInvoices(context.Background(), req) - if err != nil { - return err - } - - printRespJSON(invoices) - - return nil -} - -var describeGraphCommand = cli.Command{ - Name: "describegraph", - Category: "Peers", - Description: "Prints a human readable version of the known channel " + - "graph from the PoV of the node", - Usage: "Describe the network graph.", - Flags: []cli.Flag{ - cli.BoolFlag{ - Name: "include_unannounced", - Usage: "If set, unannounced channels will be included in the " + - "graph. Unannounced channels are both private channels, and " + - "public channels that are not yet announced to the network.", - }, - }, - Action: actionDecorator(describeGraph), -} - -func describeGraph(ctx *cli.Context) error { - client, cleanUp := getClient(ctx) - defer cleanUp() - - req := &lnrpc.ChannelGraphRequest{ - IncludeUnannounced: ctx.Bool("include_unannounced"), - } - - graph, err := client.DescribeGraph(context.Background(), req) - if err != nil { - return err - } - - printRespJSON(graph) - return nil -} - -var listPaymentsCommand = cli.Command{ - Name: "listpayments", - Category: "Payments", - Usage: "List all outgoing payments.", - Flags: []cli.Flag{ cli.BoolFlag{ - Name: "include_incomplete", - Usage: "if set to true, payments still in flight (or failed) will be returned as well", + Name: "paginate_forwards", + Usage: "if set, payments succeeding the " + + "index_offset will be returned, allowing " + + "forwards pagination", }, }, Action: actionDecorator(listPayments), @@ -2852,6 +1983,9 @@ func listPayments(ctx *cli.Context) error { req := &lnrpc.ListPaymentsRequest{ IncludeIncomplete: ctx.Bool("include_incomplete"), + IndexOffset: uint64(ctx.Uint("index_offset")), + MaxPayments: uint64(ctx.Uint("max_payments")), + Reversed: !ctx.Bool("paginate_forwards"), } payments, err := client.ListPayments(context.Background(), req) @@ -2865,7 +1999,7 @@ func listPayments(ctx *cli.Context) error { var getChanInfoCommand = cli.Command{ Name: "getchaninfo", - Category: "Channels", + Category: "Graph", Usage: "Get the state of a channel.", Description: "Prints out the latest authenticated state for a " + "particular channel", @@ -2916,7 +2050,7 @@ func getChanInfo(ctx *cli.Context) error { var getNodeInfoCommand = cli.Command{ Name: "getnodeinfo", - Category: "Peers", + Category: "Graph", Usage: "Get information on a specific node.", Description: "Prints out the latest authenticated node state for an " + "advertised node", @@ -3041,7 +2175,7 @@ func queryRoutes(ctx *cli.Context) error { return fmt.Errorf("amt argument missing") } - feeLimit, err := retrieveFeeLimit(ctx) + feeLimit, err := retrieveFeeLimitLegacy(ctx) if err != nil { return err } @@ -3064,6 +2198,38 @@ func queryRoutes(ctx *cli.Context) error { return nil } +// retrieveFeeLimitLegacy retrieves the fee limit based on the different fee +// limit flags passed. This function will eventually disappear in favor of +// retrieveFeeLimit and the new payment rpc. +func retrieveFeeLimitLegacy(ctx *cli.Context) (*lnrpc.FeeLimit, error) { + switch { + case ctx.IsSet("fee_limit") && ctx.IsSet("fee_limit_percent"): + return nil, fmt.Errorf("either fee_limit or fee_limit_percent " + + "can be set, but not both") + case ctx.IsSet("fee_limit"): + return &lnrpc.FeeLimit{ + Limit: &lnrpc.FeeLimit_Fixed{ + Fixed: ctx.Int64("fee_limit"), + }, + }, nil + case ctx.IsSet("fee_limit_percent"): + feeLimitPercent := ctx.Int64("fee_limit_percent") + if feeLimitPercent < 0 { + return nil, errors.New("negative fee limit percentage " + + "provided") + } + return &lnrpc.FeeLimit{ + Limit: &lnrpc.FeeLimit_Percent{ + Percent: feeLimitPercent, + }, + }, nil + } + + // Since the fee limit flags aren't required, we don't return an error + // if they're not set. + return nil, nil +} + var getNetworkInfoCommand = cli.Command{ Name: "getnetworkinfo", Category: "Channels", @@ -3128,48 +2294,6 @@ func debugLevel(ctx *cli.Context) error { return nil } -var decodePayReqCommand = cli.Command{ - Name: "decodepayreq", - Category: "Payments", - Usage: "Decode a payment request.", - Description: "Decode the passed payment request revealing the destination, payment hash and value of the payment request", - ArgsUsage: "pay_req", - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "pay_req", - Usage: "the bech32 encoded payment request", - }, - }, - Action: actionDecorator(decodePayReq), -} - -func decodePayReq(ctx *cli.Context) error { - ctxb := context.Background() - client, cleanUp := getClient(ctx) - defer cleanUp() - - var payreq string - - switch { - case ctx.IsSet("pay_req"): - payreq = ctx.String("pay_req") - case ctx.Args().Present(): - payreq = ctx.Args().First() - default: - return fmt.Errorf("pay_req argument missing") - } - - resp, err := client.DecodePayReq(ctxb, &lnrpc.PayReqString{ - PayReq: payreq, - }) - if err != nil { - return err - } - - printRespJSON(resp) - return nil -} - var listChainTxnsCommand = cli.Command{ Name: "listchaintxns", Category: "On-chain", @@ -3380,6 +2504,12 @@ var updateChannelPolicyCommand = cli.Command{ Usage: "the CLTV delta that will be applied to all " + "forwarded HTLCs", }, + cli.Uint64Flag{ + Name: "min_htlc_msat", + Usage: "if set, the min HTLC size that will be applied " + + "to all forwarded HTLCs. If unset, the min HTLC " + + "is left unchanged.", + }, cli.Uint64Flag{ Name: "max_htlc_msat", Usage: "if set, the max HTLC size that will be applied " + @@ -3502,6 +2632,11 @@ func updateChannelPolicy(ctx *cli.Context) error { MaxHtlcMsat: ctx.Uint64("max_htlc_msat"), } + if ctx.IsSet("min_htlc_msat") { + req.MinHtlcMsat = ctx.Uint64("min_htlc_msat") + req.MinHtlcMsatSpecified = true + } + if chanPoint != nil { req.Scope = &lnrpc.PolicyUpdateRequest_ChanPoint{ ChanPoint: chanPoint, @@ -3750,14 +2885,11 @@ func exportChanBackup(ctx *cli.Context) error { printJSON(struct { ChanPoint string `json:"chan_point"` - ChanBackup string `json:"chan_backup"` + ChanBackup []byte `json:"chan_backup"` }{ - ChanPoint: chanPoint.String(), - ChanBackup: hex.EncodeToString( - chanBackup.ChanBackup, - ), - }, - ) + ChanPoint: chanPoint.String(), + ChanBackup: chanBackup.ChanBackup, + }) return nil } @@ -3795,16 +2927,8 @@ func exportChanBackup(ctx *cli.Context) error { }.String()) } - printJSON(struct { - ChanPoints []string `json:"chan_points"` - MultiChanBackup string `json:"multi_chan_backup"` - }{ - ChanPoints: chanPoints, - MultiChanBackup: hex.EncodeToString( - chanBackup.MultiChanBackup.MultiChanBackup, - ), - }, - ) + printRespJSON(chanBackup) + return nil } diff --git a/cmd/lncli/invoicesrpc_active.go b/cmd/lncli/invoicesrpc_active.go index 1cb5471a8a..c565e7fef6 100644 --- a/cmd/lncli/invoicesrpc_active.go +++ b/cmd/lncli/invoicesrpc_active.go @@ -34,7 +34,7 @@ func getInvoicesClient(ctx *cli.Context) (invoicesrpc.InvoicesClient, func()) { var settleInvoiceCommand = cli.Command{ Name: "settleinvoice", - Category: "Payments", + Category: "Invoices", Usage: "Reveal a preimage and use it to settle the corresponding invoice.", Description: ` Todo.`, @@ -81,14 +81,14 @@ func settleInvoice(ctx *cli.Context) error { return err } - printJSON(resp) + printRespJSON(resp) return nil } var cancelInvoiceCommand = cli.Command{ Name: "cancelinvoice", - Category: "Payments", + Category: "Invoices", Usage: "Cancels a (hold) invoice", Description: ` Todo.`, @@ -134,14 +134,14 @@ func cancelInvoice(ctx *cli.Context) error { return err } - printJSON(resp) + printRespJSON(resp) return nil } var addHoldInvoiceCommand = cli.Command{ Name: "addholdinvoice", - Category: "Payments", + Category: "Invoices", Usage: "Add a new hold invoice.", Description: ` Add a new invoice, expressing intent for a future payment. @@ -160,6 +160,10 @@ var addHoldInvoiceCommand = cli.Command{ Name: "amt", Usage: "the amt of satoshis in this invoice", }, + cli.Int64Flag{ + Name: "amt_msat", + Usage: "the amt of millisatoshis in this invoice", + }, cli.StringFlag{ Name: "description_hash", Usage: "SHA-256 hash of the description of the payment. " + @@ -192,7 +196,6 @@ var addHoldInvoiceCommand = cli.Command{ func addHoldInvoice(ctx *cli.Context) error { var ( descHash []byte - amt int64 err error ) @@ -212,12 +215,11 @@ func addHoldInvoice(ctx *cli.Context) error { args = args.Tail() - switch { - case ctx.IsSet("amt"): - amt = ctx.Int64("amt") - case args.Present(): - amt, err = strconv.ParseInt(args.First(), 10, 64) + amt := ctx.Int64("amt") + amtMsat := ctx.Int64("amt_msat") + if !ctx.IsSet("amt") && !ctx.IsSet("amt_msat") && args.Present() { + amt, err = strconv.ParseInt(args.First(), 10, 64) if err != nil { return fmt.Errorf("unable to decode amt argument: %v", err) } @@ -236,6 +238,7 @@ func addHoldInvoice(ctx *cli.Context) error { Memo: ctx.String("memo"), Hash: hash, Value: amt, + ValueMsat: amtMsat, DescriptionHash: descHash, FallbackAddr: ctx.String("fallback_addr"), Expiry: ctx.Int64("expiry"), diff --git a/cmd/lncli/main.go b/cmd/lncli/main.go index 7b1d146fca..c54d7555a8 100644 --- a/cmd/lncli/main.go +++ b/cmd/lncli/main.go @@ -39,8 +39,8 @@ var ( defaultTLSCertPath = filepath.Join(defaultLndDir, defaultTLSCertFilename) // maxMsgRecvSize is the largest message our client will receive. We - // set this to ~50Mb atm. - maxMsgRecvSize = grpc.MaxCallRecvMsgSize(1 * 1024 * 1024 * 50) + // set this to 200MiB atm. + maxMsgRecvSize = grpc.MaxCallRecvMsgSize(1 * 1024 * 1024 * 200) ) func fatal(err error) { @@ -136,7 +136,7 @@ func getClientConn(ctx *cli.Context, skipMacaroons bool) *grpc.ClientConn { // We need to use a custom dialer so we can also connect to unix sockets // and not just TCP addresses. genericDialer := lncfg.ClientAddressDialer(defaultRPCPort) - opts = append(opts, grpc.WithDialer(genericDialer)) + opts = append(opts, grpc.WithContextDialer(genericDialer)) opts = append(opts, grpc.WithDefaultCallOptions(maxMsgRecvSize)) conn, err := grpc.Dial(ctx.GlobalString("rpcserver"), opts...) @@ -205,7 +205,7 @@ func extractPathArgs(ctx *cli.Context) (string, string, error) { func main() { app := cli.NewApp() app.Name = "lncli" - app.Version = build.Version() + app.Version = build.Version() + " commit=" + build.Commit app.Usage = "control plane for your Lightning Network Daemon (lnd)" app.Flags = []cli.Flag{ cli.StringFlag{ @@ -283,6 +283,7 @@ func main() { closedChannelsCommand, listPaymentsCommand, describeGraphCommand, + getNodeMetricsCommand, getChanInfoCommand, getNodeInfoCommand, queryRoutesCommand, @@ -299,6 +300,9 @@ func main() { exportChanBackupCommand, verifyChanBackupCommand, restoreChanBackupCommand, + bakeMacaroonCommand, + trackPaymentCommand, + versionCommand, } // Add any extra commands determined by build flags. diff --git a/cmd/lncli/routerrpc_active.go b/cmd/lncli/routerrpc.go similarity index 70% rename from cmd/lncli/routerrpc_active.go rename to cmd/lncli/routerrpc.go index 4d21726790..819f66d22f 100644 --- a/cmd/lncli/routerrpc_active.go +++ b/cmd/lncli/routerrpc.go @@ -1,13 +1,12 @@ -// +build routerrpc - package main import "github.com/urfave/cli" -// routerCommands will return nil for non-routerrpc builds. +// routerCommands returns a list of routerrpc commands. func routerCommands() []cli.Command { return []cli.Command{ queryMissionControlCommand, + queryProbCommand, resetMissionControlCommand, buildRouteCommand, } diff --git a/cmd/lncli/routerrpc_default.go b/cmd/lncli/routerrpc_default.go deleted file mode 100644 index c2a5fe7bed..0000000000 --- a/cmd/lncli/routerrpc_default.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !routerrpc - -package main - -import "github.com/urfave/cli" - -// routerCommands will return nil for non-routerrpc builds. -func routerCommands() []cli.Command { - return nil -} diff --git a/cmd/lncli/types.go b/cmd/lncli/types.go index 30d559a7e5..28f494742e 100644 --- a/cmd/lncli/types.go +++ b/cmd/lncli/types.go @@ -58,7 +58,7 @@ type Utxo struct { // printed in base64. func NewUtxoFromProto(utxo *lnrpc.Utxo) *Utxo { return &Utxo{ - Type: utxo.Type, + Type: utxo.AddressType, Address: utxo.Address, AmountSat: utxo.AmountSat, PkScript: utxo.PkScript, diff --git a/cmd/lncli/walletrpc_active.go b/cmd/lncli/walletrpc_active.go index 77e050fdfa..3fc9781508 100644 --- a/cmd/lncli/walletrpc_active.go +++ b/cmd/lncli/walletrpc_active.go @@ -4,9 +4,12 @@ package main import ( "context" + "errors" "fmt" "sort" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lnrpc/walletrpc" "github.com/urfave/cli" ) @@ -23,6 +26,7 @@ func walletCommands() []cli.Command { Subcommands: []cli.Command{ pendingSweepsCommand, bumpFeeCommand, + bumpCloseFeeCommand, }, }, } @@ -113,7 +117,13 @@ var bumpFeeCommand = cli.Command{ Note that this command currently doesn't perform any validation checks on the fee preference being provided. For now, the responsibility of ensuring that the new fee preference is sufficient is delegated to the - user.`, + user. + + The force flag enables sweeping of inputs that are negatively yielding. + Normally it does not make sense to lose money on sweeping, unless a + parent transaction needs to get confirmed and there is only a small + output available to attach the child transaction to. + `, Flags: []cli.Flag{ cli.Uint64Flag{ Name: "conf_target", @@ -125,6 +135,10 @@ var bumpFeeCommand = cli.Command{ Usage: "a manual fee expressed in sat/byte that " + "should be used when sweeping the output", }, + cli.BoolFlag{ + Name: "force", + Usage: "sweep even if the yield is negative", + }, }, Action: actionDecorator(bumpFee), } @@ -132,7 +146,7 @@ var bumpFeeCommand = cli.Command{ func bumpFee(ctx *cli.Context) error { // Display the command's help message if we do not have the expected // number of arguments/flags. - if ctx.NArg() != 1 || ctx.NumFlags() != 1 { + if ctx.NArg() != 1 { return cli.ShowCommandHelp(ctx, "bumpfee") } @@ -142,24 +156,14 @@ func bumpFee(ctx *cli.Context) error { return err } - var confTarget, satPerByte uint32 - switch { - case ctx.IsSet("conf_target") && ctx.IsSet("sat_per_byte"): - return fmt.Errorf("either conf_target or sat_per_byte should " + - "be set, but not both") - case ctx.IsSet("conf_target"): - confTarget = uint32(ctx.Uint64("conf_target")) - case ctx.IsSet("sat_per_byte"): - satPerByte = uint32(ctx.Uint64("sat_per_byte")) - } - client, cleanUp := getWalletClient(ctx) defer cleanUp() resp, err := client.BumpFee(context.Background(), &walletrpc.BumpFeeRequest{ Outpoint: protoOutPoint, - TargetConf: confTarget, - SatPerByte: satPerByte, + TargetConf: uint32(ctx.Uint64("conf_target")), + SatPerByte: uint32(ctx.Uint64("sat_per_byte")), + Force: ctx.Bool("force"), }) if err != nil { return err @@ -169,3 +173,130 @@ func bumpFee(ctx *cli.Context) error { return nil } + +var bumpCloseFeeCommand = cli.Command{ + Name: "bumpclosefee", + Usage: "Bumps the fee of a channel closing transaction.", + ArgsUsage: "channel_point", + Description: ` + This command allows the fee of a channel closing transaction to be + increased by using the child-pays-for-parent mechanism. It will instruct + the sweeper to sweep the anchor outputs of transactions in the set + of valid commitments for the specified channel at the requested fee + rate or confirmation target. + `, + Flags: []cli.Flag{ + cli.Uint64Flag{ + Name: "conf_target", + Usage: "the number of blocks that the output should " + + "be swept on-chain within", + }, + cli.Uint64Flag{ + Name: "sat_per_byte", + Usage: "a manual fee expressed in sat/byte that " + + "should be used when sweeping the output", + }, + }, + Action: actionDecorator(bumpCloseFee), +} + +func bumpCloseFee(ctx *cli.Context) error { + // Display the command's help message if we do not have the expected + // number of arguments/flags. + if ctx.NArg() != 1 { + return cli.ShowCommandHelp(ctx, "bumpclosefee") + } + + // Validate the channel point. + channelPoint := ctx.Args().Get(0) + _, err := NewProtoOutPoint(channelPoint) + if err != nil { + return err + } + + // Fetch all waiting close channels. + client, cleanUp := getClient(ctx) + defer cleanUp() + + // Fetch waiting close channel commitments. + commitments, err := getWaitingCloseCommitments(client, channelPoint) + if err != nil { + return err + } + + // Retrieve pending sweeps. + walletClient, cleanUp := getWalletClient(ctx) + defer cleanUp() + + ctxb := context.Background() + sweeps, err := walletClient.PendingSweeps( + ctxb, &walletrpc.PendingSweepsRequest{}, + ) + if err != nil { + return err + } + + // Match pending sweeps with commitments of the channel for which a bump + // is requested and bump their fees. + commitSet := map[string]struct{}{ + commitments.LocalTxid: {}, + commitments.RemoteTxid: {}, + } + if commitments.RemotePendingTxid != "" { + commitSet[commitments.RemotePendingTxid] = struct{}{} + } + + for _, sweep := range sweeps.PendingSweeps { + // Only bump anchor sweeps. + if sweep.WitnessType != walletrpc.WitnessType_COMMITMENT_ANCHOR { + continue + } + + // Skip unrelated sweeps. + sweepTxID, err := chainhash.NewHash(sweep.Outpoint.TxidBytes) + if err != nil { + return err + } + if _, match := commitSet[sweepTxID.String()]; !match { + continue + } + + // Bump fee of the anchor sweep. + fmt.Printf("Bumping fee of %v:%v\n", + sweepTxID, sweep.Outpoint.OutputIndex) + + _, err = walletClient.BumpFee(ctxb, &walletrpc.BumpFeeRequest{ + Outpoint: sweep.Outpoint, + TargetConf: uint32(ctx.Uint64("conf_target")), + SatPerByte: uint32(ctx.Uint64("sat_per_byte")), + Force: true, + }) + if err != nil { + return err + } + } + + return nil +} + +func getWaitingCloseCommitments(client lnrpc.LightningClient, + channelPoint string) (*lnrpc.PendingChannelsResponse_Commitments, + error) { + + ctxb := context.Background() + + req := &lnrpc.PendingChannelsRequest{} + resp, err := client.PendingChannels(ctxb, req) + if err != nil { + return nil, err + } + + // Lookup the channel commit tx hashes. + for _, channel := range resp.WaitingCloseChannels { + if channel.Channel.ChannelPoint == channelPoint { + return channel.Commitments, nil + } + } + + return nil, errors.New("channel not found") +} diff --git a/cmd/lncli/walletrpc_types.go b/cmd/lncli/walletrpc_types.go index f336e02e20..c2b5698191 100644 --- a/cmd/lncli/walletrpc_types.go +++ b/cmd/lncli/walletrpc_types.go @@ -11,6 +11,9 @@ type PendingSweep struct { SatPerByte uint32 `json:"sat_per_byte"` BroadcastAttempts uint32 `json:"broadcast_attempts"` NextBroadcastHeight uint32 `json:"next_broadcast_height"` + RequestedSatPerByte uint32 `json:"requested_sat_per_byte"` + RequestedConfTarget uint32 `json:"requested_conf_target"` + Force bool `json:"force"` } // NewPendingSweepFromProto converts the walletrpc.PendingSweep proto type into @@ -23,5 +26,8 @@ func NewPendingSweepFromProto(pendingSweep *walletrpc.PendingSweep) *PendingSwee SatPerByte: pendingSweep.SatPerByte, BroadcastAttempts: pendingSweep.BroadcastAttempts, NextBroadcastHeight: pendingSweep.NextBroadcastHeight, + RequestedSatPerByte: pendingSweep.RequestedSatPerByte, + RequestedConfTarget: pendingSweep.RequestedConfTarget, + Force: pendingSweep.Force, } } diff --git a/cmd/lncli/watchtower_active.go b/cmd/lncli/watchtower_active.go index bfa9af6668..489013f8bc 100644 --- a/cmd/lncli/watchtower_active.go +++ b/cmd/lncli/watchtower_active.go @@ -4,7 +4,6 @@ package main import ( "context" - "encoding/hex" "github.com/lightningnetwork/lnd/lnrpc/watchtowerrpc" "github.com/urfave/cli" @@ -51,15 +50,7 @@ func towerInfo(ctx *cli.Context) error { return err } - printJSON(struct { - Pubkey string `json:"pubkey"` - Listeners []string `json:"listeners"` - URIs []string `json:"uris"` - }{ - Pubkey: hex.EncodeToString(resp.Pubkey), - Listeners: resp.Listeners, - URIs: resp.Uris, - }) + printRespJSON(resp) return nil } diff --git a/cmd/lncli/wtclient.go b/cmd/lncli/wtclient.go index bcc0a4eb7a..5f887fc968 100644 --- a/cmd/lncli/wtclient.go +++ b/cmd/lncli/wtclient.go @@ -170,16 +170,8 @@ func listTowers(ctx *cli.Context) error { return err } - var listTowersResp = struct { - Towers []*Tower `json:"towers"` - }{ - Towers: make([]*Tower, len(resp.Towers)), - } - for i, tower := range resp.Towers { - listTowersResp.Towers[i] = NewTowerFromProto(tower) - } + printRespJSON(resp) - printJSON(listTowersResp) return nil } @@ -224,7 +216,7 @@ func getTower(ctx *cli.Context) error { return err } - printJSON(NewTowerFromProto(resp)) + printRespJSON(resp) return nil } diff --git a/cmd/lncli/wtclient_types.go b/cmd/lncli/wtclient_types.go deleted file mode 100644 index 4192bef020..0000000000 --- a/cmd/lncli/wtclient_types.go +++ /dev/null @@ -1,50 +0,0 @@ -package main - -import ( - "encoding/hex" - - "github.com/lightningnetwork/lnd/lnrpc/wtclientrpc" -) - -// TowerSession encompasses information about a tower session. -type TowerSession struct { - NumBackups uint32 `json:"num_backups"` - NumPendingBackups uint32 `json:"num_pending_backups"` - MaxBackups uint32 `json:"max_backups"` - SweepSatPerByte uint32 `json:"sweep_sat_per_byte"` -} - -// NewTowerSessionsFromProto converts a set of tower sessions from their RPC -// type to a CLI-friendly type. -func NewTowerSessionsFromProto(sessions []*wtclientrpc.TowerSession) []*TowerSession { - towerSessions := make([]*TowerSession, 0, len(sessions)) - for _, session := range sessions { - towerSessions = append(towerSessions, &TowerSession{ - NumBackups: session.NumBackups, - NumPendingBackups: session.NumPendingBackups, - MaxBackups: session.MaxBackups, - SweepSatPerByte: session.SweepSatPerByte, - }) - } - return towerSessions -} - -// Tower encompasses information about a registered watchtower. -type Tower struct { - PubKey string `json:"pubkey"` - Addresses []string `json:"addresses"` - ActiveSessionCandidate bool `json:"active_session_candidate"` - NumSessions uint32 `json:"num_sessions"` - Sessions []*TowerSession `json:"sessions"` -} - -// NewTowerFromProto converts a tower from its RPC type to a CLI-friendly type. -func NewTowerFromProto(tower *wtclientrpc.Tower) *Tower { - return &Tower{ - PubKey: hex.EncodeToString(tower.Pubkey), - Addresses: tower.Addresses, - ActiveSessionCandidate: tower.ActiveSessionCandidate, - NumSessions: tower.NumSessions, - Sessions: NewTowerSessionsFromProto(tower.Sessions), - } -} diff --git a/config.go b/config.go index 55a69416c1..eec73ef199 100644 --- a/config.go +++ b/config.go @@ -14,7 +14,6 @@ import ( "path" "path/filepath" "regexp" - "sort" "strconv" "strings" "time" @@ -59,15 +58,16 @@ const ( // pending channels permitted per peer. DefaultMaxPendingChannels = 1 - defaultNoSeedBackup = false - defaultTrickleDelay = 90 * 1000 - defaultChanStatusSampleInterval = time.Minute - defaultChanEnableTimeout = 19 * time.Minute - defaultChanDisableTimeout = 20 * time.Minute - defaultMaxLogFiles = 3 - defaultMaxLogFileSize = 10 - defaultMinBackoff = time.Second - defaultMaxBackoff = time.Hour + defaultNoSeedBackup = false + defaultPaymentsExpirationGracePeriod = time.Duration(0) + defaultTrickleDelay = 90 * 1000 + defaultChanStatusSampleInterval = time.Minute + defaultChanEnableTimeout = 19 * time.Minute + defaultChanDisableTimeout = 20 * time.Minute + defaultMaxLogFiles = 3 + defaultMaxLogFileSize = 10 + defaultMinBackoff = time.Second + defaultMaxBackoff = time.Hour defaultTorSOCKSPort = 9050 defaultTorDNSHost = "soa.nodes.lightning.directory" @@ -152,6 +152,11 @@ var ( defaultTorSOCKS = net.JoinHostPort("localhost", strconv.Itoa(defaultTorSOCKSPort)) defaultTorDNS = net.JoinHostPort(defaultTorDNSHost, strconv.Itoa(defaultTorDNSPort)) defaultTorControl = net.JoinHostPort("localhost", strconv.Itoa(defaultTorControlPort)) + + // bitcoindEsimateModes defines all the legal values for bitcoind's + // estimatesmartfee RPC call. + defaultBitcoindEstimateMode = "CONSERVATIVE" + bitcoindEstimateModes = [2]string{"ECONOMICAL", defaultBitcoindEstimateMode} ) type chainConfig struct { @@ -168,7 +173,8 @@ type chainConfig struct { DefaultNumChanConfs int `long:"defaultchanconfs" description:"The default number of confirmations a channel must have before it's considered open. If this is not set, we will scale the value according to the channel size."` DefaultRemoteDelay int `long:"defaultremotedelay" description:"The default number of blocks we will require our channel counterparty to wait before accessing its funds in case of unilateral close. If this is not set, we will scale the value according to the channel size."` - MinHTLC lnwire.MilliSatoshi `long:"minhtlc" description:"The smallest HTLC we are willing to forward on our channels, in millisatoshi"` + MinHTLCIn lnwire.MilliSatoshi `long:"minhtlc" description:"The smallest HTLC we are willing to accept on our channels, in millisatoshi"` + MinHTLCOut lnwire.MilliSatoshi `long:"minhtlcout" description:"The smallest HTLC we are willing to send out on our channels, in millisatoshi"` BaseFee lnwire.MilliSatoshi `long:"basefee" description:"The base fee in millisatoshi we will charge for forwarding payments on our channels"` FeeRate lnwire.MilliSatoshi `long:"feerate" description:"The fee rate used when forwarding payments on our channels. The total fee charged is basefee + (amount * feerate / 1000000), where amount is the forwarded amount."` TimeLockDelta uint32 `long:"timelockdelta" description:"The CLTV delta we will subtract from a forwarded HTLC's timelock value"` @@ -200,6 +206,7 @@ type bitcoindConfig struct { RPCPass string `long:"rpcpass" default-mask:"-" description:"Password for RPC connections"` ZMQPubRawBlock string `long:"zmqpubrawblock" description:"The address listening for ZMQ connections to deliver raw block notifications"` ZMQPubRawTx string `long:"zmqpubrawtx" description:"The address listening for ZMQ connections to deliver raw transaction notifications"` + EstimateMode string `long:"estimatemode" description:"The fee estimate mode. Must be either ECONOMICAL or CONSERVATIVE."` } type autoPilotConfig struct { @@ -215,14 +222,17 @@ type autoPilotConfig struct { } type torConfig struct { - Active bool `long:"active" description:"Allow outbound and inbound connections to be routed through Tor"` - SOCKS string `long:"socks" description:"The host:port that Tor's exposed SOCKS5 proxy is listening on"` - DNS string `long:"dns" description:"The DNS server as host:port that Tor will use for SRV queries - NOTE must have TCP resolution enabled"` - StreamIsolation bool `long:"streamisolation" description:"Enable Tor stream isolation by randomizing user credentials for each connection."` - Control string `long:"control" description:"The host:port that Tor is listening on for Tor control connections"` - V2 bool `long:"v2" description:"Automatically set up a v2 onion service to listen for inbound connections"` - V3 bool `long:"v3" description:"Automatically set up a v3 onion service to listen for inbound connections"` - PrivateKeyPath string `long:"privatekeypath" description:"The path to the private key of the onion service being created"` + Active bool `long:"active" description:"Allow outbound and inbound connections to be routed through Tor"` + SOCKS string `long:"socks" description:"The host:port that Tor's exposed SOCKS5 proxy is listening on"` + DNS string `long:"dns" description:"The DNS server as host:port that Tor will use for SRV queries - NOTE must have TCP resolution enabled"` + StreamIsolation bool `long:"streamisolation" description:"Enable Tor stream isolation by randomizing user credentials for each connection."` + Control string `long:"control" description:"The host:port that Tor is listening on for Tor control connections"` + TargetIPAddress string `long:"targetipaddress" description:"IP address that Tor should use as the target of the hidden service"` + Password string `long:"password" description:"The password used to arrive at the HashedControlPassword for the control port. If provided, the HASHEDPASSWORD authentication method will be used instead of the SAFECOOKIE one."` + V2 bool `long:"v2" description:"Automatically set up a v2 onion service to listen for inbound connections"` + V3 bool `long:"v3" description:"Automatically set up a v3 onion service to listen for inbound connections"` + PrivateKeyPath string `long:"privatekeypath" description:"The path to the private key of the onion service being created"` + WatchtowerKeyPath string `long:"watchtowerkeypath" description:"The path to the private key of the watchtower onion service being created"` } // config defines the configuration options for lnd. @@ -232,21 +242,24 @@ type torConfig struct { type config struct { ShowVersion bool `short:"V" long:"version" description:"Display version information and exit"` - LndDir string `long:"lnddir" description:"The base directory that contains lnd's data, logs, configuration file, etc."` - ConfigFile string `short:"C" long:"configfile" description:"Path to configuration file"` - DataDir string `short:"b" long:"datadir" description:"The directory to store lnd's data within"` - SyncFreelist bool `long:"sync-freelist" description:"Whether the databases used within lnd should sync their freelist to disk. This is disabled by default resulting in improved memory performance during operation, but with an increase in startup time."` + LndDir string `long:"lnddir" description:"The base directory that contains lnd's data, logs, configuration file, etc."` + ConfigFile string `short:"C" long:"configfile" description:"Path to configuration file"` + DataDir string `short:"b" long:"datadir" description:"The directory to store lnd's data within"` + SyncFreelist bool `long:"sync-freelist" description:"Whether the databases used within lnd should sync their freelist to disk. This is disabled by default resulting in improved memory performance during operation, but with an increase in startup time."` + TLSCertPath string `long:"tlscertpath" description:"Path to write the TLS certificate for lnd's RPC and REST services"` TLSKeyPath string `long:"tlskeypath" description:"Path to write the TLS private key for lnd's RPC and REST services"` TLSExtraIPs []string `long:"tlsextraip" description:"Adds an extra ip to the generated certificate"` TLSExtraDomains []string `long:"tlsextradomain" description:"Adds an extra domain to the generated certificate"` - NoMacaroons bool `long:"no-macaroons" description:"Disable macaroon authentication"` - AdminMacPath string `long:"adminmacaroonpath" description:"Path to write the admin macaroon for lnd's RPC and REST services if it doesn't exist"` - ReadMacPath string `long:"readonlymacaroonpath" description:"Path to write the read-only macaroon for lnd's RPC and REST services if it doesn't exist"` - InvoiceMacPath string `long:"invoicemacaroonpath" description:"Path to the invoice-only macaroon for lnd's RPC and REST services if it doesn't exist"` - LogDir string `long:"logdir" description:"Directory to log output."` - MaxLogFiles int `long:"maxlogfiles" description:"Maximum logfiles to keep (0 for no rotation)"` - MaxLogFileSize int `long:"maxlogfilesize" description:"Maximum logfile size in MB"` + TLSAutoRefresh bool `long:"tlsautorefresh" description:"Re-generate TLS certificate and key if the IPs or domains are changed"` + + NoMacaroons bool `long:"no-macaroons" description:"Disable macaroon authentication"` + AdminMacPath string `long:"adminmacaroonpath" description:"Path to write the admin macaroon for lnd's RPC and REST services if it doesn't exist"` + ReadMacPath string `long:"readonlymacaroonpath" description:"Path to write the read-only macaroon for lnd's RPC and REST services if it doesn't exist"` + InvoiceMacPath string `long:"invoicemacaroonpath" description:"Path to the invoice-only macaroon for lnd's RPC and REST services if it doesn't exist"` + LogDir string `long:"logdir" description:"Directory to log output."` + MaxLogFiles int `long:"maxlogfiles" description:"Maximum logfiles to keep (0 for no rotation)"` + MaxLogFileSize int `long:"maxlogfilesize" description:"Maximum logfile size in MB"` // We'll parse these 'raw' string arguments into real net.Addrs in the // loadConfig function. We need to expose the 'raw' strings so the @@ -272,7 +285,7 @@ type config struct { Profile string `long:"profile" description:"Enable HTTP profiling on given port -- NOTE port must be between 1024 and 65535"` - UnsafeDisconnect bool `long:"unsafe-disconnect" description:"Allows the rpcserver to intentionally disconnect from peers with open channels. USED FOR TESTING ONLY."` + UnsafeDisconnect bool `long:"unsafe-disconnect" description:"DEPRECATED: Allows the rpcserver to intentionally disconnect from peers with open channels. THIS FLAG WILL BE REMOVED IN 0.10.0"` UnsafeReplay bool `long:"unsafe-replay" description:"Causes a link to replay the adds on its commitment txn after starting up, this enables testing of the sphinx replay logic."` MaxPendingChannels int `long:"maxpendingchannels" description:"The maximum number of incoming pending channels permitted per peer."` BackupFilePath string `long:"backupfilepath" description:"The target location of the channel backup file"` @@ -298,10 +311,11 @@ type config struct { NoSeedBackup bool `long:"noseedbackup" description:"If true, NO SEED WILL BE EXPOSED AND THE WALLET WILL BE ENCRYPTED USING THE DEFAULT PASSPHRASE -- EVER. THIS FLAG IS ONLY FOR TESTING AND IS BEING DEPRECATED."` - TrickleDelay int `long:"trickledelay" description:"Time in milliseconds between each release of announcements to the network"` - ChanEnableTimeout time.Duration `long:"chan-enable-timeout" description:"The duration that a peer connection must be stable before attempting to send a channel update to reenable or cancel a pending disables of the peer's channels on the network (default: 19m)."` - ChanDisableTimeout time.Duration `long:"chan-disable-timeout" description:"The duration that must elapse after first detecting that an already active channel is actually inactive and sending channel update disabling it to the network. The pending disable can be canceled if the peer reconnects and becomes stable for chan-enable-timeout before the disable update is sent. (default: 20m)"` - ChanStatusSampleInterval time.Duration `long:"chan-status-sample-interval" description:"The polling interval between attempts to detect if an active channel has become inactive due to its peer going offline. (default: 1m)"` + PaymentsExpirationGracePeriod time.Duration `long:"payments-expiration-grace-period" description:"A period to wait before force closing channels with outgoing htlcs that have timed-out and are a result of this node initiated payments."` + TrickleDelay int `long:"trickledelay" description:"Time in milliseconds between each release of announcements to the network"` + ChanEnableTimeout time.Duration `long:"chan-enable-timeout" description:"The duration that a peer connection must be stable before attempting to send a channel update to reenable or cancel a pending disables of the peer's channels on the network."` + ChanDisableTimeout time.Duration `long:"chan-disable-timeout" description:"The duration that must elapse after first detecting that an already active channel is actually inactive and sending channel update disabling it to the network. The pending disable can be canceled if the peer reconnects and becomes stable for chan-enable-timeout before the disable update is sent."` + ChanStatusSampleInterval time.Duration `long:"chan-status-sample-interval" description:"The polling interval between attempts to detect if an active channel has become inactive due to its peer going offline."` Alias string `long:"alias" description:"The node alias. Used as a moniker by peers and intelligence services"` Color string `long:"color" description:"The color of the node in hex format (i.e. '#3399FF'). Used to customize node appearance in intelligence services"` @@ -324,6 +338,10 @@ type config struct { net tor.Net + EnableUpfrontShutdown bool `long:"enable-upfront-shutdown" description:"If true, option upfront shutdown script will be enabled. If peers that we open channels with support this feature, we will automatically set the script to which cooperative closes should be paid out to on channel open. This offers the partial protection of a channel peer disconnecting from us if cooperative close is attempted with a different script."` + + AcceptKeySend bool `long:"accept-keysend" description:"If true, spontaneous payments through keysend will be accepted. [experimental]"` + Routing *routing.Conf `group:"routing" namespace:"routing"` Workers *lncfg.Workers `group:"workers" namespace:"workers"` @@ -336,7 +354,9 @@ type config struct { Watchtower *lncfg.Watchtower `group:"watchtower" namespace:"watchtower"` - LegacyProtocol *lncfg.LegacyProtocol `group:"legacyprotocol" namespace:"legacyprotocol"` + ProtocolOptions *lncfg.ProtocolOptions `group:"protocol" namespace:"protocol"` + + AllowCircularRoute bool `long:"allow-circular-route" description:"If true, our node will allow htlc forwards that arrive and depart on the same channel."` } // loadConfig initializes and parses the config using a config file and command @@ -360,7 +380,8 @@ func loadConfig() (*config, error) { MaxLogFileSize: defaultMaxLogFileSize, Bitcoin: &chainConfig{ Active: true, // Force groestlcoin chain active - MinHTLC: defaultBitcoinMinHTLCMSat, + MinHTLCIn: defaultBitcoinMinHTLCInMSat, + MinHTLCOut: defaultBitcoinMinHTLCOutMSat, BaseFee: DefaultBitcoinBaseFeeMSat, FeeRate: DefaultBitcoinFeeRate, TimeLockDelta: DefaultBitcoinTimeLockDelta, @@ -372,11 +393,13 @@ func loadConfig() (*config, error) { RPCCert: defaultBtcdRPCCertFile, }, BitcoindMode: &bitcoindConfig{ - Dir: defaultBitcoindDir, - RPCHost: defaultRPCHost, + Dir: defaultBitcoindDir, + RPCHost: defaultRPCHost, + EstimateMode: defaultBitcoindEstimateMode, }, Litecoin: &chainConfig{ - MinHTLC: defaultLitecoinMinHTLCMSat, + MinHTLCIn: defaultLitecoinMinHTLCInMSat, + MinHTLCOut: defaultLitecoinMinHTLCOutMSat, BaseFee: defaultLitecoinBaseFeeMSat, FeeRate: defaultLitecoinFeeRate, TimeLockDelta: defaultLitecoinTimeLockDelta, @@ -388,9 +411,11 @@ func loadConfig() (*config, error) { RPCCert: defaultLtcdRPCCertFile, }, LitecoindMode: &bitcoindConfig{ - Dir: defaultLitecoindDir, - RPCHost: defaultRPCHost, + Dir: defaultLitecoindDir, + RPCHost: defaultRPCHost, + EstimateMode: defaultBitcoindEstimateMode, }, + UnsafeDisconnect: true, MaxPendingChannels: DefaultMaxPendingChannels, NoSeedBackup: defaultNoSeedBackup, MinBackoff: defaultMinBackoff, @@ -410,15 +435,16 @@ func loadConfig() (*config, error) { "preferential": 1.0, }, }, - TrickleDelay: defaultTrickleDelay, - ChanStatusSampleInterval: defaultChanStatusSampleInterval, - ChanEnableTimeout: defaultChanEnableTimeout, - ChanDisableTimeout: defaultChanDisableTimeout, - Alias: defaultAlias, - Color: defaultColor, - MinChanSize: int64(minChanFundingSize), - NumGraphSyncPeers: defaultMinPeers, - HistoricalSyncInterval: discovery.DefaultHistoricalSyncInterval, + PaymentsExpirationGracePeriod: defaultPaymentsExpirationGracePeriod, + TrickleDelay: defaultTrickleDelay, + ChanStatusSampleInterval: defaultChanStatusSampleInterval, + ChanEnableTimeout: defaultChanEnableTimeout, + ChanDisableTimeout: defaultChanDisableTimeout, + Alias: defaultAlias, + Color: defaultColor, + MinChanSize: int64(minChanFundingSize), + NumGraphSyncPeers: defaultMinPeers, + HistoricalSyncInterval: discovery.DefaultHistoricalSyncInterval, Tor: &torConfig{ SOCKS: defaultTorSOCKS, DNS: defaultTorDNS, @@ -454,7 +480,8 @@ func loadConfig() (*config, error) { appName = strings.TrimSuffix(appName, filepath.Ext(appName)) usageMessage := fmt.Sprintf("Use %s -h to show usage", appName) if preCfg.ShowVersion { - fmt.Println(appName, "version", build.Version()) + fmt.Println(appName, "version", build.Version(), + "commit="+build.Commit) os.Exit(0) } @@ -544,6 +571,7 @@ func loadConfig() (*config, error) { cfg.BitcoindMode.Dir = cleanAndExpandPath(cfg.BitcoindMode.Dir) cfg.LitecoindMode.Dir = cleanAndExpandPath(cfg.LitecoindMode.Dir) cfg.Tor.PrivateKeyPath = cleanAndExpandPath(cfg.Tor.PrivateKeyPath) + cfg.Tor.WatchtowerKeyPath = cleanAndExpandPath(cfg.Tor.WatchtowerKeyPath) cfg.Watchtower.TowerDir = cleanAndExpandPath(cfg.Watchtower.TowerDir) // Ensure that the user didn't attempt to specify negative values for @@ -659,6 +687,19 @@ func loadConfig() (*config, error) { } } + if cfg.Tor.WatchtowerKeyPath == "" { + switch { + case cfg.Tor.V2: + cfg.Tor.WatchtowerKeyPath = filepath.Join( + cfg.Watchtower.TowerDir, defaultTorV2PrivateKeyFilename, + ) + case cfg.Tor.V3: + cfg.Tor.WatchtowerKeyPath = filepath.Join( + cfg.Watchtower.TowerDir, defaultTorV3PrivateKeyFilename, + ) + } + } + // Set up the network-related functions that will be used throughout // the daemon. We use the standard Go "net" package functions by // default. If we should be proxying all traffic through Tor, then @@ -954,19 +995,27 @@ func loadConfig() (*config, error) { // Special show command to list supported subsystems and exit. if cfg.DebugLevel == "show" { - fmt.Println("Supported subsystems", supportedSubsystems()) + fmt.Println("Supported subsystems", + logWriter.SupportedSubsystems()) os.Exit(0) } // Initialize logging at the default logging level. - initLogRotator( + err = logWriter.InitLogRotator( filepath.Join(cfg.LogDir, defaultLogFilename), cfg.MaxLogFileSize, cfg.MaxLogFiles, ) + if err != nil { + str := "%s: log rotation setup failed: %v" + err = fmt.Errorf(str, funcName, err.Error()) + fmt.Fprintln(os.Stderr, err) + return nil, err + } // Parse, validate, and set debug log level(s). - if err := parseAndSetDebugLevels(cfg.DebugLevel); err != nil { - err := fmt.Errorf("%s: %v", funcName, err.Error()) + err = build.ParseAndSetDebugLevels(cfg.DebugLevel, logWriter) + if err != nil { + err = fmt.Errorf("%s: %v", funcName, err.Error()) fmt.Fprintln(os.Stderr, err) fmt.Fprintln(os.Stderr, usageMessage) return nil, err @@ -1141,92 +1190,6 @@ func cleanAndExpandPath(path string) string { return filepath.Clean(os.ExpandEnv(path)) } -// parseAndSetDebugLevels attempts to parse the specified debug level and set -// the levels accordingly. An appropriate error is returned if anything is -// invalid. -func parseAndSetDebugLevels(debugLevel string) error { - // When the specified string doesn't have any delimiters, treat it as - // the log level for all subsystems. - if !strings.Contains(debugLevel, ",") && !strings.Contains(debugLevel, "=") { - // Validate debug log level. - if !validLogLevel(debugLevel) { - str := "The specified debug level [%v] is invalid" - return fmt.Errorf(str, debugLevel) - } - - // Change the logging level for all subsystems. - setLogLevels(debugLevel) - - return nil - } - - // Split the specified string into subsystem/level pairs while detecting - // issues and update the log levels accordingly. - for _, logLevelPair := range strings.Split(debugLevel, ",") { - if !strings.Contains(logLevelPair, "=") { - str := "The specified debug level contains an invalid " + - "subsystem/level pair [%v]" - return fmt.Errorf(str, logLevelPair) - } - - // Extract the specified subsystem and log level. - fields := strings.Split(logLevelPair, "=") - subsysID, logLevel := fields[0], fields[1] - - // Validate subsystem. - if _, exists := subsystemLoggers[subsysID]; !exists { - str := "The specified subsystem [%v] is invalid -- " + - "supported subsystems %v" - return fmt.Errorf(str, subsysID, supportedSubsystems()) - } - - // Validate log level. - if !validLogLevel(logLevel) { - str := "The specified debug level [%v] is invalid" - return fmt.Errorf(str, logLevel) - } - - setLogLevel(subsysID, logLevel) - } - - return nil -} - -// validLogLevel returns whether or not logLevel is a valid debug log level. -func validLogLevel(logLevel string) bool { - switch logLevel { - case "trace": - fallthrough - case "debug": - fallthrough - case "info": - fallthrough - case "warn": - fallthrough - case "error": - fallthrough - case "critical": - fallthrough - case "off": - return true - } - return false -} - -// supportedSubsystems returns a sorted slice of the supported subsystems for -// logging purposes. -func supportedSubsystems() []string { - // Convert the subsystemLoggers map keys to a slice. - subsystems := make([]string, 0, len(subsystemLoggers)) - for subsysID := range subsystemLoggers { - subsystems = append(subsystems, subsysID) - } - - // Sort the subsystems for stable display. - sort.Strings(subsystems) - return subsystems -} - func parseRPCParams(cConfig *chainConfig, nodeConfig interface{}, net chainCode, funcName string) error { @@ -1273,6 +1236,15 @@ func parseRPCParams(cConfig *chainConfig, nodeConfig interface{}, net chainCode, } } + // Ensure that if the estimate mode is set, that it is a legal + // value. + if conf.EstimateMode != "" { + err := checkEstimateMode(conf.EstimateMode) + if err != nil { + return err + } + } + // If all of RPCUser, RPCPass, ZMQBlockHost, and ZMQTxHost are // set, we assume those parameters are good to use. if conf.RPCUser != "" && conf.RPCPass != "" && @@ -1510,6 +1482,18 @@ func checkZMQOptions(zmqBlockHost, zmqTxHost string) error { return nil } +// checkEstimateMode ensures that the provided estimate mode is legal. +func checkEstimateMode(estimateMode string) error { + for _, mode := range bitcoindEstimateModes { + if estimateMode == mode { + return nil + } + } + + return fmt.Errorf("estimatemode must be one of the following: %v", + bitcoindEstimateModes[:]) +} + // normalizeNetwork returns the common name of a network type used to create // file paths. This allows differently versioned networks to use the same path. func normalizeNetwork(network string) string { diff --git a/contractcourt/anchor_resolver.go b/contractcourt/anchor_resolver.go new file mode 100644 index 0000000000..1cf7a820ad --- /dev/null +++ b/contractcourt/anchor_resolver.go @@ -0,0 +1,205 @@ +package contractcourt + +import ( + "errors" + "io" + "sync" + + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" + "github.com/lightningnetwork/lnd/input" + "github.com/lightningnetwork/lnd/lnwallet" + "github.com/lightningnetwork/lnd/sweep" +) + +// anchorResolver is a resolver that will attempt to sweep our anchor output. +type anchorResolver struct { + // anchorSignDescriptor contains the information that is required to + // sweep the anchor. + anchorSignDescriptor input.SignDescriptor + + // anchor is the outpoint on the commitment transaction. + anchor wire.OutPoint + + // resolved reflects if the contract has been fully resolved or not. + resolved bool + + // broadcastHeight is the height that the original contract was + // broadcast to the main-chain at. We'll use this value to bound any + // historical queries to the chain for spends/confirmations. + broadcastHeight uint32 + + // chanPoint is the channel point of the original contract. + chanPoint wire.OutPoint + + // currentReport stores the current state of the resolver for reporting + // over the rpc interface. + currentReport ContractReport + + // reportLock prevents concurrent access to the resolver report. + reportLock sync.Mutex + + contractResolverKit +} + +// newAnchorResolver instantiates a new anchor resolver. +func newAnchorResolver(anchorSignDescriptor input.SignDescriptor, + anchor wire.OutPoint, broadcastHeight uint32, + chanPoint wire.OutPoint, resCfg ResolverConfig) *anchorResolver { + + amt := btcutil.Amount(anchorSignDescriptor.Output.Value) + + report := ContractReport{ + Outpoint: anchor, + Type: ReportOutputAnchor, + Amount: amt, + LimboBalance: amt, + RecoveredBalance: 0, + } + + r := &anchorResolver{ + contractResolverKit: *newContractResolverKit(resCfg), + anchorSignDescriptor: anchorSignDescriptor, + anchor: anchor, + broadcastHeight: broadcastHeight, + chanPoint: chanPoint, + currentReport: report, + } + + r.initLogger(r) + + return r +} + +// ResolverKey returns an identifier which should be globally unique for this +// particular resolver within the chain the original contract resides within. +func (c *anchorResolver) ResolverKey() []byte { + // The anchor resolver is stateless and doesn't need a database key. + return nil +} + +// Resolve offers the anchor output to the sweeper and waits for it to be swept. +func (c *anchorResolver) Resolve() (ContractResolver, error) { + // Attempt to update the sweep parameters to the post-confirmation + // situation. We don't want to force sweep anymore, because the anchor + // lost its special purpose to get the commitment confirmed. It is just + // an output that we want to sweep only if it is economical to do so. + relayFeeRate := c.Sweeper.RelayFeePerKW() + + resultChan, err := c.Sweeper.UpdateParams( + c.anchor, + sweep.ParamsUpdate{ + Fee: sweep.FeePreference{ + FeeRate: relayFeeRate, + }, + Force: false, + }, + ) + + // After a restart or when the remote force closes, the sweeper is not + // yet aware of the anchor. In that case, offer it as a new input to the + // sweeper. An exclusive group is not necessary anymore, because we know + // that this is the only anchor that can be swept. + if err == lnwallet.ErrNotMine { + anchorInput := input.MakeBaseInput( + &c.anchor, + input.CommitmentAnchor, + &c.anchorSignDescriptor, + c.broadcastHeight, + ) + + resultChan, err = c.Sweeper.SweepInput( + &anchorInput, + sweep.Params{ + Fee: sweep.FeePreference{ + FeeRate: relayFeeRate, + }, + }, + ) + if err != nil { + return nil, err + } + } + + var anchorRecovered bool + select { + case sweepRes := <-resultChan: + switch sweepRes.Err { + + // Anchor was swept successfully. + case nil: + c.log.Debugf("anchor swept by tx %v", + sweepRes.Tx.TxHash()) + + anchorRecovered = true + + // Anchor was swept by someone else. This is possible after the + // 16 block csv lock. + case sweep.ErrRemoteSpend: + c.log.Warnf("our anchor spent by someone else") + + // The sweeper gave up on sweeping the anchor. This happens + // after the maximum number of sweep attempts has been reached. + // See sweep.DefaultMaxSweepAttempts. Sweep attempts are + // interspaced with random delays picked from a range that + // increases exponentially. + // + // We consider the anchor as being lost. + case sweep.ErrTooManyAttempts: + c.log.Warnf("anchor sweep abandoned") + + // An unexpected error occurred. + default: + c.log.Errorf("unable to sweep anchor: %v", sweepRes.Err) + + return nil, sweepRes.Err + } + + case <-c.quit: + return nil, errResolverShuttingDown + } + + // Update report to reflect that funds are no longer in limbo. + c.reportLock.Lock() + if anchorRecovered { + c.currentReport.RecoveredBalance = c.currentReport.LimboBalance + } + c.currentReport.LimboBalance = 0 + c.reportLock.Unlock() + + c.resolved = true + return nil, nil +} + +// Stop signals the resolver to cancel any current resolution processes, and +// suspend. +// +// NOTE: Part of the ContractResolver interface. +func (c *anchorResolver) Stop() { + close(c.quit) +} + +// IsResolved returns true if the stored state in the resolve is fully +// resolved. In this case the target output can be forgotten. +// +// NOTE: Part of the ContractResolver interface. +func (c *anchorResolver) IsResolved() bool { + return c.resolved +} + +// report returns a report on the resolution state of the contract. +func (c *anchorResolver) report() *ContractReport { + c.reportLock.Lock() + defer c.reportLock.Unlock() + + reportCopy := c.currentReport + return &reportCopy +} + +func (c *anchorResolver) Encode(w io.Writer) error { + return errors.New("serialization not supported") +} + +// A compile time assertion to ensure anchorResolver meets the +// ContractResolver interface. +var _ ContractResolver = (*anchorResolver)(nil) diff --git a/contractcourt/briefcase.go b/contractcourt/briefcase.go index 989786f1a0..03a878b7b7 100644 --- a/contractcourt/briefcase.go +++ b/contractcourt/briefcase.go @@ -8,8 +8,8 @@ import ( "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" - "github.com/coreos/bbolt" "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/lnwallet" ) @@ -29,6 +29,11 @@ type ContractResolutions struct { // HtlcResolutions contains all data required to fully resolve any // incoming+outgoing HTLC's present within the commitment transaction. HtlcResolutions lnwallet.HtlcResolutions + + // AnchorResolution contains the data required to sweep the anchor + // output. If the channel type doesn't include anchors, the value of + // this field will be nil. + AnchorResolution *lnwallet.AnchorResolution } // IsEmpty returns true if the set of resolutions is "empty". A resolution is @@ -37,7 +42,8 @@ type ContractResolutions struct { func (c *ContractResolutions) IsEmpty() bool { return c.CommitResolution == nil && len(c.HtlcResolutions.IncomingHTLCs) == 0 && - len(c.HtlcResolutions.OutgoingHTLCs) == 0 + len(c.HtlcResolutions.OutgoingHTLCs) == 0 && + c.AnchorResolution == nil } // ArbitratorLog is the primary source of persistent storage for the @@ -263,6 +269,10 @@ var ( // the full set of resolutions for a channel. resolutionsKey = []byte("resolutions") + // anchorResolutionKey is the key under the logScope that we'll use to + // store the anchor resolution, if any. + anchorResolutionKey = []byte("anchor-resolution") + // actionsBucketKey is the key under the logScope that we'll use to // store all chain actions once they're determined. actionsBucketKey = []byte("chain-actions") @@ -299,7 +309,7 @@ var ( // boltArbitratorLog is an implementation of the ArbitratorLog interface backed // by a bolt DB instance. type boltArbitratorLog struct { - db *bbolt.DB + db kvdb.Backend cfg ChannelArbitratorConfig @@ -308,7 +318,7 @@ type boltArbitratorLog struct { // newBoltArbitratorLog returns a new instance of the boltArbitratorLog given // an arbitrator config, and the items needed to create its log scope. -func newBoltArbitratorLog(db *bbolt.DB, cfg ChannelArbitratorConfig, +func newBoltArbitratorLog(db kvdb.Backend, cfg ChannelArbitratorConfig, chainHash chainhash.Hash, chanPoint wire.OutPoint) (*boltArbitratorLog, error) { scope, err := newLogScope(chainHash, chanPoint) @@ -327,13 +337,13 @@ func newBoltArbitratorLog(db *bbolt.DB, cfg ChannelArbitratorConfig, // interface. var _ ArbitratorLog = (*boltArbitratorLog)(nil) -func fetchContractReadBucket(tx *bbolt.Tx, scopeKey []byte) (*bbolt.Bucket, error) { - scopeBucket := tx.Bucket(scopeKey) +func fetchContractReadBucket(tx kvdb.ReadTx, scopeKey []byte) (kvdb.ReadBucket, error) { + scopeBucket := tx.ReadBucket(scopeKey) if scopeBucket == nil { return nil, errScopeBucketNoExist } - contractBucket := scopeBucket.Bucket(contractsBucketKey) + contractBucket := scopeBucket.NestedReadBucket(contractsBucketKey) if contractBucket == nil { return nil, errNoContracts } @@ -341,8 +351,8 @@ func fetchContractReadBucket(tx *bbolt.Tx, scopeKey []byte) (*bbolt.Bucket, erro return contractBucket, nil } -func fetchContractWriteBucket(tx *bbolt.Tx, scopeKey []byte) (*bbolt.Bucket, error) { - scopeBucket, err := tx.CreateBucketIfNotExists(scopeKey) +func fetchContractWriteBucket(tx kvdb.RwTx, scopeKey []byte) (kvdb.RwBucket, error) { + scopeBucket, err := tx.CreateTopLevelBucket(scopeKey) if err != nil { return nil, err } @@ -359,9 +369,16 @@ func fetchContractWriteBucket(tx *bbolt.Tx, scopeKey []byte) (*bbolt.Bucket, err // writeResolver is a helper method that writes a contract resolver and stores // it it within the passed contractBucket using its unique resolutionsKey key. -func (b *boltArbitratorLog) writeResolver(contractBucket *bbolt.Bucket, +func (b *boltArbitratorLog) writeResolver(contractBucket kvdb.RwBucket, res ContractResolver) error { + // Only persist resolvers that are stateful. Stateless resolvers don't + // expose a resolver key. + resKey := res.ResolverKey() + if resKey == nil { + return nil + } + // First, we'll write to the buffer the type of this resolver. Using // this byte, we can later properly deserialize the resolver properly. var ( @@ -390,8 +407,6 @@ func (b *boltArbitratorLog) writeResolver(contractBucket *bbolt.Bucket, return err } - resKey := res.ResolverKey() - return contractBucket.Put(resKey, buf.Bytes()) } @@ -400,8 +415,8 @@ func (b *boltArbitratorLog) writeResolver(contractBucket *bbolt.Bucket, // NOTE: Part of the ContractResolver interface. func (b *boltArbitratorLog) CurrentState() (ArbitratorState, error) { var s ArbitratorState - err := b.db.View(func(tx *bbolt.Tx) error { - scopeBucket := tx.Bucket(b.scopeKey[:]) + err := kvdb.View(b.db, func(tx kvdb.ReadTx) error { + scopeBucket := tx.ReadBucket(b.scopeKey[:]) if scopeBucket == nil { return errScopeBucketNoExist } @@ -425,8 +440,9 @@ func (b *boltArbitratorLog) CurrentState() (ArbitratorState, error) { // // NOTE: Part of the ContractResolver interface. func (b *boltArbitratorLog) CommitState(s ArbitratorState) error { - return b.db.Batch(func(tx *bbolt.Tx) error { - scopeBucket, err := tx.CreateBucketIfNotExists(b.scopeKey[:]) + fmt.Printf("yeee: %T\n", b.db) + return kvdb.Batch(b.db, func(tx kvdb.RwTx) error { + scopeBucket, err := tx.CreateTopLevelBucket(b.scopeKey[:]) if err != nil { return err } @@ -440,12 +456,12 @@ func (b *boltArbitratorLog) CommitState(s ArbitratorState) error { // // NOTE: Part of the ContractResolver interface. func (b *boltArbitratorLog) FetchUnresolvedContracts() ([]ContractResolver, error) { - resKit := ResolverKit{ + resolverCfg := ResolverConfig{ ChannelArbitratorConfig: b.cfg, Checkpoint: b.checkpointContract, } var contracts []ContractResolver - err := b.db.View(func(tx *bbolt.Tx) error { + err := kvdb.View(b.db, func(tx kvdb.ReadTx) error { contractBucket, err := fetchContractReadBucket(tx, b.scopeKey[:]) if err != nil { return err @@ -469,56 +485,38 @@ func (b *boltArbitratorLog) FetchUnresolvedContracts() ([]ContractResolver, erro switch resType { case resolverTimeout: - timeoutRes := &htlcTimeoutResolver{} - if err := timeoutRes.Decode(resReader); err != nil { - return err - } - timeoutRes.AttachResolverKit(resKit) - - res = timeoutRes + res, err = newTimeoutResolverFromReader( + resReader, resolverCfg, + ) case resolverSuccess: - successRes := &htlcSuccessResolver{} - if err := successRes.Decode(resReader); err != nil { - return err - } - - res = successRes + res, err = newSuccessResolverFromReader( + resReader, resolverCfg, + ) case resolverOutgoingContest: - outContestRes := &htlcOutgoingContestResolver{ - htlcTimeoutResolver: htlcTimeoutResolver{}, - } - if err := outContestRes.Decode(resReader); err != nil { - return err - } - - res = outContestRes + res, err = newOutgoingContestResolverFromReader( + resReader, resolverCfg, + ) case resolverIncomingContest: - inContestRes := &htlcIncomingContestResolver{ - htlcSuccessResolver: htlcSuccessResolver{}, - } - if err := inContestRes.Decode(resReader); err != nil { - return err - } - - res = inContestRes + res, err = newIncomingContestResolverFromReader( + resReader, resolverCfg, + ) case resolverUnilateralSweep: - sweepRes := &commitSweepResolver{} - if err := sweepRes.Decode(resReader); err != nil { - return err - } - - res = sweepRes + res, err = newCommitSweepResolverFromReader( + resReader, resolverCfg, + ) default: return fmt.Errorf("unknown resolver type: %v", resType) } - resKit.Quit = make(chan struct{}) - res.AttachResolverKit(resKit) + if err != nil { + return err + } + contracts = append(contracts, res) return nil }) @@ -536,7 +534,7 @@ func (b *boltArbitratorLog) FetchUnresolvedContracts() ([]ContractResolver, erro // // NOTE: Part of the ContractResolver interface. func (b *boltArbitratorLog) InsertUnresolvedContracts(resolvers ...ContractResolver) error { - return b.db.Batch(func(tx *bbolt.Tx) error { + return kvdb.Batch(b.db, func(tx kvdb.RwTx) error { contractBucket, err := fetchContractWriteBucket(tx, b.scopeKey[:]) if err != nil { return err @@ -559,7 +557,7 @@ func (b *boltArbitratorLog) InsertUnresolvedContracts(resolvers ...ContractResol // // NOTE: Part of the ContractResolver interface. func (b *boltArbitratorLog) SwapContract(oldContract, newContract ContractResolver) error { - return b.db.Batch(func(tx *bbolt.Tx) error { + return kvdb.Batch(b.db, func(tx kvdb.RwTx) error { contractBucket, err := fetchContractWriteBucket(tx, b.scopeKey[:]) if err != nil { return err @@ -579,7 +577,7 @@ func (b *boltArbitratorLog) SwapContract(oldContract, newContract ContractResolv // // NOTE: Part of the ContractResolver interface. func (b *boltArbitratorLog) ResolveContract(res ContractResolver) error { - return b.db.Batch(func(tx *bbolt.Tx) error { + return kvdb.Batch(b.db, func(tx kvdb.RwTx) error { contractBucket, err := fetchContractWriteBucket(tx, b.scopeKey[:]) if err != nil { return err @@ -597,8 +595,8 @@ func (b *boltArbitratorLog) ResolveContract(res ContractResolver) error { // // NOTE: Part of the ContractResolver interface. func (b *boltArbitratorLog) LogContractResolutions(c *ContractResolutions) error { - return b.db.Batch(func(tx *bbolt.Tx) error { - scopeBucket, err := tx.CreateBucketIfNotExists(b.scopeKey[:]) + return kvdb.Batch(b.db, func(tx kvdb.RwTx) error { + scopeBucket, err := tx.CreateTopLevelBucket(b.scopeKey[:]) if err != nil { return err } @@ -648,7 +646,26 @@ func (b *boltArbitratorLog) LogContractResolutions(c *ContractResolutions) error } } - return scopeBucket.Put(resolutionsKey, b.Bytes()) + err = scopeBucket.Put(resolutionsKey, b.Bytes()) + if err != nil { + return err + } + + // Write out the anchor resolution if present. + if c.AnchorResolution != nil { + var b bytes.Buffer + err := encodeAnchorResolution(&b, c.AnchorResolution) + if err != nil { + return err + } + + err = scopeBucket.Put(anchorResolutionKey, b.Bytes()) + if err != nil { + return err + } + } + + return nil }) } @@ -658,8 +675,8 @@ func (b *boltArbitratorLog) LogContractResolutions(c *ContractResolutions) error // NOTE: Part of the ContractResolver interface. func (b *boltArbitratorLog) FetchContractResolutions() (*ContractResolutions, error) { c := &ContractResolutions{} - err := b.db.View(func(tx *bbolt.Tx) error { - scopeBucket := tx.Bucket(b.scopeKey[:]) + err := kvdb.View(b.db, func(tx kvdb.ReadTx) error { + scopeBucket := tx.ReadBucket(b.scopeKey[:]) if scopeBucket == nil { return errScopeBucketNoExist } @@ -728,6 +745,18 @@ func (b *boltArbitratorLog) FetchContractResolutions() (*ContractResolutions, er } } + anchorResBytes := scopeBucket.Get(anchorResolutionKey) + if anchorResBytes != nil { + c.AnchorResolution = &lnwallet.AnchorResolution{} + resReader := bytes.NewReader(anchorResBytes) + err := decodeAnchorResolution( + resReader, c.AnchorResolution, + ) + if err != nil { + return err + } + } + return nil }) if err != nil { @@ -745,13 +774,13 @@ func (b *boltArbitratorLog) FetchContractResolutions() (*ContractResolutions, er func (b *boltArbitratorLog) FetchChainActions() (ChainActionMap, error) { actionsMap := make(ChainActionMap) - err := b.db.View(func(tx *bbolt.Tx) error { - scopeBucket := tx.Bucket(b.scopeKey[:]) + err := kvdb.View(b.db, func(tx kvdb.ReadTx) error { + scopeBucket := tx.ReadBucket(b.scopeKey[:]) if scopeBucket == nil { return errScopeBucketNoExist } - actionsBucket := scopeBucket.Bucket(actionsBucketKey) + actionsBucket := scopeBucket.NestedReadBucket(actionsBucketKey) if actionsBucket == nil { return errNoActions } @@ -787,8 +816,8 @@ func (b *boltArbitratorLog) FetchChainActions() (ChainActionMap, error) { // // NOTE: Part of the ContractResolver interface. func (b *boltArbitratorLog) InsertConfirmedCommitSet(c *CommitSet) error { - return b.db.Update(func(tx *bbolt.Tx) error { - scopeBucket, err := tx.CreateBucketIfNotExists(b.scopeKey[:]) + return kvdb.Batch(b.db, func(tx kvdb.RwTx) error { + scopeBucket, err := tx.CreateTopLevelBucket(b.scopeKey[:]) if err != nil { return err } @@ -808,8 +837,8 @@ func (b *boltArbitratorLog) InsertConfirmedCommitSet(c *CommitSet) error { // NOTE: Part of the ContractResolver interface. func (b *boltArbitratorLog) FetchConfirmedCommitSet() (*CommitSet, error) { var c *CommitSet - err := b.db.View(func(tx *bbolt.Tx) error { - scopeBucket := tx.Bucket(b.scopeKey[:]) + err := kvdb.View(b.db, func(tx kvdb.ReadTx) error { + scopeBucket := tx.ReadBucket(b.scopeKey[:]) if scopeBucket == nil { return errScopeBucketNoExist } @@ -840,8 +869,8 @@ func (b *boltArbitratorLog) FetchConfirmedCommitSet() (*CommitSet, error) { // // NOTE: Part of the ContractResolver interface. func (b *boltArbitratorLog) WipeHistory() error { - return b.db.Update(func(tx *bbolt.Tx) error { - scopeBucket, err := tx.CreateBucketIfNotExists(b.scopeKey[:]) + return kvdb.Update(b.db, func(tx kvdb.RwTx) error { + scopeBucket, err := tx.CreateTopLevelBucket(b.scopeKey[:]) if err != nil { return err } @@ -854,8 +883,8 @@ func (b *boltArbitratorLog) WipeHistory() error { // Next, we'll delete any lingering contract state within the // contracts bucket by removing the bucket itself. - err = scopeBucket.DeleteBucket(contractsBucketKey) - if err != nil && err != bbolt.ErrBucketNotFound { + err = scopeBucket.DeleteNestedBucket(contractsBucketKey) + if err != nil && err != kvdb.ErrBucketNotFound { return err } @@ -867,13 +896,13 @@ func (b *boltArbitratorLog) WipeHistory() error { // We'll delete any chain actions that are still stored by // removing the enclosing bucket. - err = scopeBucket.DeleteBucket(actionsBucketKey) - if err != nil && err != bbolt.ErrBucketNotFound { + err = scopeBucket.DeleteNestedBucket(actionsBucketKey) + if err != nil && err != kvdb.ErrBucketNotFound { return err } // Finally, we'll delete the enclosing bucket itself. - return tx.DeleteBucket(b.scopeKey[:]) + return tx.DeleteTopLevelBucket(b.scopeKey[:]) }) } @@ -881,7 +910,7 @@ func (b *boltArbitratorLog) WipeHistory() error { // ContractResolver instances to checkpoint their state once they reach // milestones during contract resolution. func (b *boltArbitratorLog) checkpointContract(c ContractResolver) error { - return b.db.Batch(func(tx *bbolt.Tx) error { + return kvdb.Update(b.db, func(tx kvdb.RwTx) error { contractBucket, err := fetchContractWriteBucket(tx, b.scopeKey[:]) if err != nil { return err @@ -1063,6 +1092,35 @@ func decodeCommitResolution(r io.Reader, return binary.Read(r, endian, &c.MaturityDelay) } +func encodeAnchorResolution(w io.Writer, + a *lnwallet.AnchorResolution) error { + + if _, err := w.Write(a.CommitAnchor.Hash[:]); err != nil { + return err + } + err := binary.Write(w, endian, a.CommitAnchor.Index) + if err != nil { + return err + } + + return input.WriteSignDescriptor(w, &a.AnchorSignDescriptor) +} + +func decodeAnchorResolution(r io.Reader, + a *lnwallet.AnchorResolution) error { + + _, err := io.ReadFull(r, a.CommitAnchor.Hash[:]) + if err != nil { + return err + } + err = binary.Read(r, endian, &a.CommitAnchor.Index) + if err != nil { + return err + } + + return input.ReadSignDescriptor(r, &a.AnchorSignDescriptor) +} + func encodeHtlcSetKey(w io.Writer, h *HtlcSetKey) error { err := binary.Write(w, endian, h.IsRemote) if err != nil { diff --git a/contractcourt/briefcase_test.go b/contractcourt/briefcase_test.go index 59f9bd3b97..165016d322 100644 --- a/contractcourt/briefcase_test.go +++ b/contractcourt/briefcase_test.go @@ -14,9 +14,9 @@ import ( "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" - "github.com/coreos/bbolt" "github.com/davecgh/go-spew/spew" "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/lnwallet" ) @@ -46,6 +46,15 @@ var ( Index: 2, } + testChanPoint3 = wire.OutPoint{ + Hash: chainhash.Hash{ + 0x48, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17, + 0x51, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda, + 0x2d, 0xe7, 0x93, 0xe4, + }, + Index: 3, + } + testPreimage = [32]byte{ 0x52, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda, 0x48, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17, @@ -95,7 +104,7 @@ var ( } ) -func makeTestDB() (*bbolt.DB, func(), error) { +func makeTestDB() (kvdb.Backend, func(), error) { // First, create a temporary directory to be used for the duration of // this test. tempDirName, err := ioutil.TempDir("", "arblog") @@ -103,7 +112,7 @@ func makeTestDB() (*bbolt.DB, func(), error) { return nil, nil, err } - db, err := bbolt.Open(tempDirName+"/test.db", 0600, nil) + db, err := kvdb.Create(kvdb.BoltBackendName, tempDirName+"/test.db", true) if err != nil { return nil, nil, err } @@ -161,9 +170,9 @@ func assertResolversEqual(t *testing.T, originalResolver ContractResolver, t.Fatalf("expected %v, got %v", ogRes.broadcastHeight, diskRes.broadcastHeight) } - if ogRes.htlcIndex != diskRes.htlcIndex { - t.Fatalf("expected %v, got %v", ogRes.htlcIndex, - diskRes.htlcIndex) + if ogRes.htlc.HtlcIndex != diskRes.htlc.HtlcIndex { + t.Fatalf("expected %v, got %v", ogRes.htlc.HtlcIndex, + diskRes.htlc.HtlcIndex) } } @@ -184,9 +193,9 @@ func assertResolversEqual(t *testing.T, originalResolver ContractResolver, t.Fatalf("expected %v, got %v", ogRes.broadcastHeight, diskRes.broadcastHeight) } - if ogRes.payHash != diskRes.payHash { - t.Fatalf("expected %v, got %v", ogRes.payHash, - diskRes.payHash) + if ogRes.htlc.RHash != diskRes.htlc.RHash { + t.Fatalf("expected %v, got %v", ogRes.htlc.RHash, + diskRes.htlc.RHash) } } @@ -265,7 +274,9 @@ func TestContractInsertionRetrieval(t *testing.T) { outputIncubating: true, resolved: true, broadcastHeight: 102, - htlcIndex: 12, + htlc: channeldb.HTLC{ + HtlcIndex: 12, + }, } successResolver := htlcSuccessResolver{ htlcResolution: lnwallet.IncomingHtlcResolution{ @@ -278,8 +289,10 @@ func TestContractInsertionRetrieval(t *testing.T) { outputIncubating: true, resolved: true, broadcastHeight: 109, - payHash: testPreimage, - sweepTx: nil, + htlc: channeldb.HTLC{ + RHash: testPreimage, + }, + sweepTx: nil, } resolvers := []ContractResolver{ &timeoutResolver, @@ -395,7 +408,9 @@ func TestContractResolution(t *testing.T) { outputIncubating: true, resolved: true, broadcastHeight: 192, - htlcIndex: 9912, + htlc: channeldb.HTLC{ + HtlcIndex: 9912, + }, } // First, we'll insert the resolver into the database and ensure that @@ -454,7 +469,9 @@ func TestContractSwapping(t *testing.T) { outputIncubating: true, resolved: true, broadcastHeight: 102, - htlcIndex: 12, + htlc: channeldb.HTLC{ + HtlcIndex: 12, + }, } contestResolver := &htlcOutgoingContestResolver{ htlcTimeoutResolver: timeoutResolver, @@ -532,6 +549,10 @@ func TestContractResolutionsStorage(t *testing.T) { }, }, }, + AnchorResolution: &lnwallet.AnchorResolution{ + CommitAnchor: testChanPoint3, + AnchorSignDescriptor: testSignDesc, + }, } // First make sure that fetching unlogged contract resolutions will diff --git a/contractcourt/chain_arbitrator.go b/contractcourt/chain_arbitrator.go index 96759f7ec3..9aa691a9b0 100644 --- a/contractcourt/chain_arbitrator.go +++ b/contractcourt/chain_arbitrator.go @@ -5,16 +5,18 @@ import ( "fmt" "sync" "sync/atomic" + "time" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcutil" "github.com/lightningnetwork/lnd/chainntnfs" "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/clock" "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/lnwallet" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" "github.com/lightningnetwork/lnd/lnwire" - "github.com/lightningnetwork/lnd/sweep" ) // ErrChainArbExiting signals that the chain arbitrator is shutting down. @@ -112,8 +114,7 @@ type ChainArbitratorConfig struct { // the process of incubation. This is used when a resolver wishes to // pass off the output to the nursery as we're only waiting on an // absolute/relative item block. - IncubateOutputs func(wire.OutPoint, *lnwallet.CommitOutputResolution, - *lnwallet.OutgoingHtlcResolution, + IncubateOutputs func(wire.OutPoint, *lnwallet.OutgoingHtlcResolution, *lnwallet.IncomingHtlcResolution, uint32) error // PreimageDB is a global store of all known pre-images. We'll use this @@ -131,7 +132,7 @@ type ChainArbitratorConfig struct { Signer input.Signer // FeeEstimator will be used to return fee estimates. - FeeEstimator lnwallet.FeeEstimator + FeeEstimator chainfee.Estimator // ChainIO allows us to query the state of the current main chain. ChainIO lnwallet.BlockChainIO @@ -141,7 +142,7 @@ type ChainArbitratorConfig struct { DisableChannel func(wire.OutPoint) error // Sweeper allows resolvers to sweep their final outputs. - Sweeper *sweep.UtxoSweeper + Sweeper UtxoSweeper // Registry is the invoice database that is used by resolvers to lookup // preimages and settle invoices. @@ -150,6 +151,23 @@ type ChainArbitratorConfig struct { // NotifyClosedChannel is a function closure that the ChainArbitrator // will use to notify the ChannelNotifier about a newly closed channel. NotifyClosedChannel func(wire.OutPoint) + + // OnionProcessor is used to decode onion payloads for on-chain + // resolution. + OnionProcessor OnionProcessor + + // PaymentsExpirationGracePeriod indicates is a time window we let the + // other node to cancel an outgoing htlc that our node has initiated and + // has timed out. + PaymentsExpirationGracePeriod time.Duration + + // IsForwardedHTLC checks for a given htlc, identified by channel id and + // htlcIndex, if it is a forwarded one. + IsForwardedHTLC func(chanID lnwire.ShortChannelID, htlcIndex uint64) bool + + // Clock is the clock implementation that ChannelArbitrator uses. + // It is useful for testing. + Clock clock.Clock } // ChainArbitrator is a sub-system that oversees the on-chain resolution of all @@ -202,6 +220,88 @@ func NewChainArbitrator(cfg ChainArbitratorConfig, } } +// arbChannel is a wrapper around an open channel that channel arbitrators +// interact with. +type arbChannel struct { + // channel is the in-memory channel state. + channel *channeldb.OpenChannel + + // c references the chain arbitrator and is used by arbChannel + // internally. + c *ChainArbitrator +} + +// NewAnchorResolutions returns the anchor resolutions for currently valid +// commitment transactions. +// +// NOTE: Part of the ArbChannel interface. +func (a *arbChannel) NewAnchorResolutions() ([]*lnwallet.AnchorResolution, + error) { + + // Get a fresh copy of the database state to base the anchor resolutions + // on. Unfortunately the channel instance that we have here isn't the + // same instance that is used by the link. + chanPoint := a.channel.FundingOutpoint + + channel, err := a.c.chanSource.FetchChannel(chanPoint) + if err != nil { + return nil, err + } + + chanMachine, err := lnwallet.NewLightningChannel( + a.c.cfg.Signer, channel, nil, + ) + if err != nil { + return nil, err + } + + return chanMachine.NewAnchorResolutions() +} + +// ForceCloseChan should force close the contract that this attendant is +// watching over. We'll use this when we decide that we need to go to chain. It +// should in addition tell the switch to remove the corresponding link, such +// that we won't accept any new updates. The returned summary contains all items +// needed to eventually resolve all outputs on chain. +// +// NOTE: Part of the ArbChannel interface. +func (a *arbChannel) ForceCloseChan() (*lnwallet.LocalForceCloseSummary, error) { + // First, we mark the channel as borked, this ensure + // that no new state transitions can happen, and also + // that the link won't be loaded into the switch. + if err := a.channel.MarkBorked(); err != nil { + return nil, err + } + + // With the channel marked as borked, we'll now remove + // the link from the switch if its there. If the link + // is active, then this method will block until it + // exits. + chanPoint := a.channel.FundingOutpoint + + if err := a.c.cfg.MarkLinkInactive(chanPoint); err != nil { + log.Errorf("unable to mark link inactive: %v", err) + } + + // Now that we know the link can't mutate the channel + // state, we'll read the channel from disk the target + // channel according to its channel point. + channel, err := a.c.chanSource.FetchChannel(chanPoint) + if err != nil { + return nil, err + } + + // Finally, we'll force close the channel completing + // the force close workflow. + chanMachine, err := lnwallet.NewLightningChannel( + a.c.cfg.Signer, channel, nil, + ) + if err != nil { + return nil, err + } + return chanMachine.ForceClose() +} + // newActiveChannelArbitrator creates a new instance of an active channel // arbitrator given the state of the target channel. func newActiveChannelArbitrator(channel *channeldb.OpenChannel, @@ -229,45 +329,16 @@ func newActiveChannelArbitrator(channel *channeldb.OpenChannel, // all interfaces and methods the arbitrator needs to do its job. arbCfg := ChannelArbitratorConfig{ ChanPoint: chanPoint, + Channel: c.getArbChannel(channel), ShortChanID: channel.ShortChanID(), BlockEpochs: blockEpoch, - ForceCloseChan: func() (*lnwallet.LocalForceCloseSummary, error) { - // First, we mark the channel as borked, this ensure - // that no new state transitions can happen, and also - // that the link won't be loaded into the switch. - if err := channel.MarkBorked(); err != nil { - return nil, err - } - // With the channel marked as borked, we'll now remove - // the link from the switch if its there. If the link - // is active, then this method will block until it - // exits. - if err := c.cfg.MarkLinkInactive(chanPoint); err != nil { - log.Errorf("unable to mark link inactive: %v", err) - } - - // Now that we know the link can't mutate the channel - // state, we'll read the channel from disk the target - // channel according to its channel point. - channel, err := c.chanSource.FetchChannel(chanPoint) - if err != nil { - return nil, err - } + MarkCommitmentBroadcasted: channel.MarkCommitmentBroadcasted, + MarkChannelClosed: func(summary *channeldb.ChannelCloseSummary, + statuses ...channeldb.ChannelStatus) error { - // Finally, we'll force close the channel completing - // the force close workflow. - chanMachine, err := lnwallet.NewLightningChannel( - c.cfg.Signer, channel, nil, - ) + err := channel.CloseChannel(summary, statuses...) if err != nil { - return nil, err - } - return chanMachine.ForceClose() - }, - MarkCommitmentBroadcasted: channel.MarkCommitmentBroadcasted, - MarkChannelClosed: func(summary *channeldb.ChannelCloseSummary) error { - if err := channel.CloseChannel(summary); err != nil { return err } c.cfg.NotifyClosedChannel(summary.ChanPoint) @@ -285,7 +356,7 @@ func newActiveChannelArbitrator(channel *channeldb.OpenChannel, // TODO(roasbeef); abstraction leak... // * rework: adaptor method to set log scope w/ factory func chanLog, err := newBoltArbitratorLog( - c.chanSource.DB, arbCfg, c.cfg.ChainHash, chanPoint, + c.chanSource.Backend, arbCfg, c.cfg.ChainHash, chanPoint, ) if err != nil { blockEpoch.Cancel() @@ -293,7 +364,7 @@ func newActiveChannelArbitrator(channel *channeldb.OpenChannel, } arbCfg.MarkChannelResolved = func() error { - return c.resolveContract(chanPoint, chanLog) + return c.ResolveContract(chanPoint) } // Finally, we'll need to construct a series of htlc Sets based on all @@ -318,11 +389,20 @@ func newActiveChannelArbitrator(channel *channeldb.OpenChannel, ), nil } -// resolveContract marks a contract as fully resolved within the database. +// getArbChannel returns an open channel wrapper for use by channel arbitrators. +func (c *ChainArbitrator) getArbChannel( + channel *channeldb.OpenChannel) *arbChannel { + + return &arbChannel{ + channel: channel, + c: c, + } +} + +// ResolveContract marks a contract as fully resolved within the database. // This is only to be done once all contracts which were live on the channel // before hitting the chain have been resolved. -func (c *ChainArbitrator) resolveContract(chanPoint wire.OutPoint, - arbLog ArbitratorLog) error { +func (c *ChainArbitrator) ResolveContract(chanPoint wire.OutPoint) error { log.Infof("Marking ChannelPoint(%v) fully resolved", chanPoint) @@ -335,27 +415,44 @@ func (c *ChainArbitrator) resolveContract(chanPoint wire.OutPoint, return err } - if arbLog != nil { - // Once this has been marked as resolved, we'll wipe the log - // that the channel arbitrator was using to store its - // persistent state. We do this after marking the channel - // resolved, as otherwise, the arbitrator would be re-created, - // and think it was starting from the default state. - if err := arbLog.WipeHistory(); err != nil { - return err - } - } - + // Now that the channel has been marked as fully closed, we'll stop + // both the channel arbitrator and chain watcher for this channel if + // they're still active. + var arbLog ArbitratorLog c.Lock() + chainArb := c.activeChannels[chanPoint] delete(c.activeChannels, chanPoint) - chainWatcher, ok := c.activeWatchers[chanPoint] - if ok { - chainWatcher.Stop() - } + chainWatcher := c.activeWatchers[chanPoint] delete(c.activeWatchers, chanPoint) c.Unlock() + if chainArb != nil { + arbLog = chainArb.log + + if err := chainArb.Stop(); err != nil { + log.Warnf("unable to stop ChannelArbitrator(%v): %v", + chanPoint, err) + } + } + if chainWatcher != nil { + if err := chainWatcher.Stop(); err != nil { + log.Warnf("unable to stop ChainWatcher(%v): %v", + chanPoint, err) + } + } + + // Once this has been marked as resolved, we'll wipe the log that the + // channel arbitrator was using to store its persistent state. We do + // this after marking the channel resolved, as otherwise, the + // arbitrator would be re-created, and think it was starting from the + // default state. + if arbLog != nil { + if err := arbLog.WipeHistory(); err != nil { + return err + } + } + return nil } @@ -413,35 +510,11 @@ func (c *ChainArbitrator) Start() error { c.activeChannels[chanPoint] = channelArb - // If the channel has had its commitment broadcasted already, - // republish it in case it didn't propagate. - if !channel.HasChanStatus( - channeldb.ChanStatusCommitBroadcasted, - ) { - continue - } - - closeTx, err := channel.BroadcastedCommitment() - switch { - - // This can happen for channels that had their closing tx - // published before we started storing it to disk. - case err == channeldb.ErrNoCloseTx: - log.Warnf("Channel %v is in state CommitBroadcasted, "+ - "but no closing tx to re-publish...", chanPoint) - continue - - case err != nil: + // Republish any closing transactions for this channel. + err = c.publishClosingTxs(channel) + if err != nil { return err } - - log.Infof("Re-publishing closing tx(%v) for channel %v", - closeTx.TxHash(), chanPoint) - err = c.cfg.PublishTx(closeTx) - if err != nil && err != lnwallet.ErrDoubleSpend { - log.Warnf("Unable to broadcast close tx(%v): %v", - closeTx.TxHash(), err) - } } // In addition to the channels that we know to be open, we'll also @@ -481,14 +554,14 @@ func (c *ChainArbitrator) Start() error { CloseType: closeChanInfo.CloseType, } chanLog, err := newBoltArbitratorLog( - c.chanSource.DB, arbCfg, c.cfg.ChainHash, chanPoint, + c.chanSource.Backend, arbCfg, c.cfg.ChainHash, chanPoint, ) if err != nil { blockEpoch.Cancel() return err } arbCfg.MarkChannelResolved = func() error { - return c.resolveContract(chanPoint, chanLog) + return c.ResolveContract(chanPoint) } // We can also leave off the set of HTLC's here as since the @@ -551,6 +624,90 @@ func (c *ChainArbitrator) Start() error { return nil } +// publishClosingTxs will load any stored cooperative or unilater closing +// transactions and republish them. This helps ensure propagation of the +// transactions in the event that prior publications failed. +func (c *ChainArbitrator) publishClosingTxs( + channel *channeldb.OpenChannel) error { + + // If the channel has had its unilateral close broadcasted already, + // republish it in case it didn't propagate. + if channel.HasChanStatus(channeldb.ChanStatusCommitBroadcasted) { + err := c.rebroadcast( + channel, channeldb.ChanStatusCommitBroadcasted, + ) + if err != nil { + return err + } + } + + // If the channel has had its cooperative close broadcasted + // already, republish it in case it didn't propagate. + if channel.HasChanStatus(channeldb.ChanStatusCoopBroadcasted) { + err := c.rebroadcast( + channel, channeldb.ChanStatusCoopBroadcasted, + ) + if err != nil { + return err + } + } + + return nil +} + +// rebroadcast is a helper method which will republish the unilateral or +// cooperative close transaction or a channel in a particular state. +// +// NOTE: There is no risk to caling this method if the channel isn't in either +// CommimentBroadcasted or CoopBroadcasted, but the logs will be misleading. +func (c *ChainArbitrator) rebroadcast(channel *channeldb.OpenChannel, + state channeldb.ChannelStatus) error { + + chanPoint := channel.FundingOutpoint + + var ( + closeTx *wire.MsgTx + kind string + err error + ) + switch state { + case channeldb.ChanStatusCommitBroadcasted: + kind = "force" + closeTx, err = channel.BroadcastedCommitment() + + case channeldb.ChanStatusCoopBroadcasted: + kind = "coop" + closeTx, err = channel.BroadcastedCooperative() + + default: + return fmt.Errorf("unknown closing state: %v", state) + } + + switch { + + // This can happen for channels that had their closing tx published + // before we started storing it to disk. + case err == channeldb.ErrNoCloseTx: + log.Warnf("Channel %v is in state %v, but no %s closing tx "+ + "to re-publish...", chanPoint, state, kind) + return nil + + case err != nil: + return err + } + + log.Infof("Re-publishing %s close tx(%v) for channel %v", + kind, closeTx.TxHash(), chanPoint) + + err = c.cfg.PublishTx(closeTx) + if err != nil && err != lnwallet.ErrDoubleSpend { + log.Warnf("Unable to broadcast %s close tx(%v): %v", + kind, closeTx.TxHash(), err) + } + + return nil +} + // Stop signals the ChainArbitrator to trigger a graceful shutdown. Any active // channel arbitrators will be signalled to exit, and this method will block // until they've all exited. diff --git a/contractcourt/chain_arbitrator_test.go b/contractcourt/chain_arbitrator_test.go index 38ea2a35bc..2692e29753 100644 --- a/contractcourt/chain_arbitrator_test.go +++ b/contractcourt/chain_arbitrator_test.go @@ -9,13 +9,14 @@ import ( "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/clock" "github.com/lightningnetwork/lnd/lnwallet" ) -// TestChainArbitratorRepulishCommitment testst that the chain arbitrator will -// republish closing transactions for channels marked CommitementBroadcast in -// the database at startup. -func TestChainArbitratorRepublishCommitment(t *testing.T) { +// TestChainArbitratorRepulishCloses tests that the chain arbitrator will +// republish closing transactions for channels marked CommitementBroadcast or +// CoopBroadcast in the database at startup. +func TestChainArbitratorRepublishCloses(t *testing.T) { t.Parallel() tempPath, err := ioutil.TempDir("", "testdb") @@ -34,7 +35,9 @@ func TestChainArbitratorRepublishCommitment(t *testing.T) { const numChans = 10 var channels []*channeldb.OpenChannel for i := 0; i < numChans; i++ { - lChannel, _, cleanup, err := lnwallet.CreateTestChannels(true) + lChannel, _, cleanup, err := lnwallet.CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) if err != nil { t.Fatal(err) } @@ -61,7 +64,12 @@ func TestChainArbitratorRepublishCommitment(t *testing.T) { for i := 0; i < numChans/2; i++ { closeTx := channels[i].FundingTxn.Copy() closeTx.TxIn[0].PreviousOutPoint = channels[i].FundingOutpoint - err := channels[i].MarkCommitmentBroadcasted(closeTx) + err := channels[i].MarkCommitmentBroadcasted(closeTx, true) + if err != nil { + t.Fatal(err) + } + + err = channels[i].MarkCoopBroadcasted(closeTx, true) if err != nil { t.Fatal(err) } @@ -69,15 +77,16 @@ func TestChainArbitratorRepublishCommitment(t *testing.T) { // We keep track of the transactions published by the ChainArbitrator // at startup. - published := make(map[chainhash.Hash]struct{}) + published := make(map[chainhash.Hash]int) chainArbCfg := ChainArbitratorConfig{ ChainIO: &mockChainIO{}, Notifier: &mockNotifier{}, PublishTx: func(tx *wire.MsgTx) error { - published[tx.TxHash()] = struct{}{} + published[tx.TxHash()]++ return nil }, + Clock: clock.NewDefaultClock(), } chainArb := NewChainArbitrator( chainArbCfg, db, @@ -103,11 +112,16 @@ func TestChainArbitratorRepublishCommitment(t *testing.T) { closeTx := channels[i].FundingTxn.Copy() closeTx.TxIn[0].PreviousOutPoint = channels[i].FundingOutpoint - _, ok := published[closeTx.TxHash()] + count, ok := published[closeTx.TxHash()] if !ok { t.Fatalf("closing tx not re-published") } + // We expect one coop close and one force close. + if count != 2 { + t.Fatalf("expected 2 closing txns, only got %d", count) + } + delete(published, closeTx.TxHash()) } @@ -115,3 +129,108 @@ func TestChainArbitratorRepublishCommitment(t *testing.T) { t.Fatalf("unexpected tx published") } } + +// TestResolveContract tests that if we have an active channel being watched by +// the chain arb, then a call to ResolveContract will mark the channel as fully +// closed in the database, and also clean up all arbitrator state. +func TestResolveContract(t *testing.T) { + t.Parallel() + + // To start with, we'll create a new temp DB for the duration of this + // test. + tempPath, err := ioutil.TempDir("", "testdb") + if err != nil { + t.Fatalf("unable to make temp dir: %v", err) + } + defer os.RemoveAll(tempPath) + db, err := channeldb.Open(tempPath) + if err != nil { + t.Fatalf("unable to open db: %v", err) + } + defer db.Close() + + // With the DB created, we'll make a new channel, and mark it as + // pending open within the database. + newChannel, _, cleanup, err := lnwallet.CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) + if err != nil { + t.Fatalf("unable to make new test channel: %v", err) + } + defer cleanup() + channel := newChannel.State() + channel.Db = db + addr := &net.TCPAddr{ + IP: net.ParseIP("127.0.0.1"), + Port: 18556, + } + if err := channel.SyncPending(addr, 101); err != nil { + t.Fatalf("unable to write channel to db: %v", err) + } + + // With the channel inserted into the database, we'll now create a new + // chain arbitrator that should pick up these new channels and launch + // resolver for them. + chainArbCfg := ChainArbitratorConfig{ + ChainIO: &mockChainIO{}, + Notifier: &mockNotifier{}, + PublishTx: func(tx *wire.MsgTx) error { + return nil + }, + Clock: clock.NewDefaultClock(), + } + chainArb := NewChainArbitrator( + chainArbCfg, db, + ) + if err := chainArb.Start(); err != nil { + t.Fatal(err) + } + defer func() { + if err := chainArb.Stop(); err != nil { + t.Fatal(err) + } + }() + + channelArb := chainArb.activeChannels[channel.FundingOutpoint] + + // While the resolver are active, we'll now remove the channel from the + // database (mark is as closed). + err = db.AbandonChannel(&channel.FundingOutpoint, 4) + if err != nil { + t.Fatalf("unable to remove channel: %v", err) + } + + // With the channel removed, we'll now manually call ResolveContract. + // This stimulates needing to remove a channel from the chain arb due + // to any possible external consistency issues. + err = chainArb.ResolveContract(channel.FundingOutpoint) + if err != nil { + t.Fatalf("unable to resolve contract: %v", err) + } + + // The shouldn't be an active chain watcher or channel arb for this + // channel. + if len(chainArb.activeChannels) != 0 { + t.Fatalf("expected zero active channels, instead have %v", + len(chainArb.activeChannels)) + } + if len(chainArb.activeWatchers) != 0 { + t.Fatalf("expected zero active watchers, instead have %v", + len(chainArb.activeWatchers)) + } + + // At this point, the channel's arbitrator log should also be empty as + // well. + _, err = channelArb.log.FetchContractResolutions() + if err != errScopeBucketNoExist { + t.Fatalf("channel arb log state should have been "+ + "removed: %v", err) + } + + // If we attempt to call this method again, then we should get a nil + // error, as there is no more state to be cleaned up. + err = chainArb.ResolveContract(channel.FundingOutpoint) + if err != nil { + t.Fatalf("second resolve call shouldn't fail: %v", err) + } +} diff --git a/contractcourt/chain_watcher.go b/contractcourt/chain_watcher.go index 7ea3da1427..9956550593 100644 --- a/contractcourt/chain_watcher.go +++ b/contractcourt/chain_watcher.go @@ -331,7 +331,8 @@ func (c *chainWatcher) SubscribeChannelEvents() *ChainEventSubscription { // based off of only the set of outputs included. func isOurCommitment(localChanCfg, remoteChanCfg channeldb.ChannelConfig, commitSpend *chainntnfs.SpendDetail, broadcastStateNum uint64, - revocationProducer shachain.Producer, tweakless bool) (bool, error) { + revocationProducer shachain.Producer, + chanType channeldb.ChannelType) (bool, error) { // First, we'll re-derive our commitment point for this state since // this is what we use to randomize each of the keys for this state. @@ -345,13 +346,13 @@ func isOurCommitment(localChanCfg, remoteChanCfg channeldb.ChannelConfig, // and remote keys for this state. We use our point as only we can // revoke our own commitment. commitKeyRing := lnwallet.DeriveCommitmentKeys( - commitPoint, true, tweakless, &localChanCfg, &remoteChanCfg, + commitPoint, true, chanType, &localChanCfg, &remoteChanCfg, ) // With the keys derived, we'll construct the remote script that'll be // present if they have a non-dust balance on the commitment. - remotePkScript, err := input.CommitScriptUnencumbered( - commitKeyRing.NoDelayKey, + remoteScript, _, err := lnwallet.CommitScriptToRemote( + chanType, commitKeyRing.ToRemoteKey, ) if err != nil { return false, err @@ -361,7 +362,7 @@ func isOurCommitment(localChanCfg, remoteChanCfg channeldb.ChannelConfig, // the remote party allowing them to claim this output before the CSV // delay if we breach. localScript, err := input.CommitScriptToSelf( - uint32(localChanCfg.CsvDelay), commitKeyRing.DelayKey, + uint32(localChanCfg.CsvDelay), commitKeyRing.ToLocalKey, commitKeyRing.RevocationKey, ) if err != nil { @@ -382,7 +383,7 @@ func isOurCommitment(localChanCfg, remoteChanCfg channeldb.ChannelConfig, case bytes.Equal(localPkScript, pkScript): return true, nil - case bytes.Equal(remotePkScript, pkScript): + case bytes.Equal(remoteScript.PkScript, pkScript): return true, nil } } @@ -392,6 +393,101 @@ func isOurCommitment(localChanCfg, remoteChanCfg channeldb.ChannelConfig, return false, nil } +// chainSet includes all the information we need to dispatch a channel close +// event to any subscribers. +type chainSet struct { + // remoteStateNum is the commitment number of the lowest valid + // commitment the remote party holds from our PoV. This value is used + // to determine if the remote party is playing a state that's behind, + // in line, or ahead of the latest state we know for it. + remoteStateNum uint64 + + // commitSet includes information pertaining to the set of active HTLCs + // on each commitment. + commitSet CommitSet + + // remoteCommit is the current commitment of the remote party. + remoteCommit channeldb.ChannelCommitment + + // localCommit is our current commitment. + localCommit channeldb.ChannelCommitment + + // remotePendingCommit points to the dangling commitment of the remote + // party, if it exists. If there's no dangling commitment, then this + // pointer will be nil. + remotePendingCommit *channeldb.ChannelCommitment +} + +// newChainSet creates a new chainSet given the current up to date channel +// state. +func newChainSet(chanState *channeldb.OpenChannel) (*chainSet, error) { + // First, we'll grab the current unrevoked commitments for ourselves + // and the remote party. + localCommit, remoteCommit, err := chanState.LatestCommitments() + if err != nil { + return nil, fmt.Errorf("unable to fetch channel state for "+ + "chan_point=%v", chanState.FundingOutpoint) + } + + log.Debugf("ChannelPoint(%v): local_commit_type=%v, local_commit=%v", + chanState.FundingOutpoint, chanState.ChanType, + spew.Sdump(localCommit)) + log.Debugf("ChannelPoint(%v): remote_commit_type=%v, remote_commit=%v", + chanState.FundingOutpoint, chanState.ChanType, + spew.Sdump(remoteCommit)) + + // Fetch the current known commit height for the remote party, and + // their pending commitment chain tip if it exists. + remoteStateNum := remoteCommit.CommitHeight + remoteChainTip, err := chanState.RemoteCommitChainTip() + if err != nil && err != channeldb.ErrNoPendingCommit { + return nil, fmt.Errorf("unable to obtain chain tip for "+ + "ChannelPoint(%v): %v", + chanState.FundingOutpoint, err) + } + + // Now that we have all the possible valid commitments, we'll make the + // CommitSet the ChannelArbitrator will need in order to carry out its + // duty. + commitSet := CommitSet{ + HtlcSets: map[HtlcSetKey][]channeldb.HTLC{ + LocalHtlcSet: localCommit.Htlcs, + RemoteHtlcSet: remoteCommit.Htlcs, + }, + } + + var remotePendingCommit *channeldb.ChannelCommitment + if remoteChainTip != nil { + remotePendingCommit = &remoteChainTip.Commitment + log.Debugf("ChannelPoint(%v): remote_pending_commit_type=%v, "+ + "remote_pending_commit=%v", chanState.FundingOutpoint, + chanState.ChanType, + spew.Sdump(remoteChainTip.Commitment)) + + htlcs := remoteChainTip.Commitment.Htlcs + commitSet.HtlcSets[RemotePendingHtlcSet] = htlcs + } + + // We'll now retrieve the latest state of the revocation store so we + // can populate the revocation information within the channel state + // object that we have. + // + // TODO(roasbeef): mutation is bad mkay + _, err = chanState.RemoteRevocationStore() + if err != nil { + return nil, fmt.Errorf("unable to fetch revocation state for "+ + "chan_point=%v", chanState.FundingOutpoint) + } + + return &chainSet{ + remoteStateNum: remoteStateNum, + commitSet: commitSet, + localCommit: *localCommit, + remoteCommit: *remoteCommit, + remotePendingCommit: remotePendingCommit, + }, nil +} + // closeObserver is a dedicated goroutine that will watch for any closes of the // channel that it's watching on chain. In the event of an on-chain event, the // close observer will assembled the proper materials required to claim the @@ -422,51 +518,12 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) { // revoked state...!!! commitTxBroadcast := commitSpend.SpendingTx - // An additional piece of information we need to properly - // dispatch a close event if is this channel was using the - // tweakless remove key format or not. - tweaklessCommit := c.cfg.chanState.ChanType.IsTweakless() - - localCommit, remoteCommit, err := c.cfg.chanState.LatestCommitments() + // First, we'll construct the chainset which includes all the + // data we need to dispatch an event to our subscribers about + // this possible channel close event. + chainSet, err := newChainSet(c.cfg.chanState) if err != nil { - log.Errorf("Unable to fetch channel state for "+ - "chan_point=%v", c.cfg.chanState.FundingOutpoint) - return - } - - // Fetch the current known commit height for the remote party, - // and their pending commitment chain tip if it exist. - remoteStateNum := remoteCommit.CommitHeight - remoteChainTip, err := c.cfg.chanState.RemoteCommitChainTip() - if err != nil && err != channeldb.ErrNoPendingCommit { - log.Errorf("unable to obtain chain tip for "+ - "ChannelPoint(%v): %v", - c.cfg.chanState.FundingOutpoint, err) - return - } - - // Now that we have all the possible valid commitments, we'll - // make the CommitSet the ChannelArbitrator will need it in - // order to carry out its duty. - commitSet := CommitSet{ - HtlcSets: make(map[HtlcSetKey][]channeldb.HTLC), - } - commitSet.HtlcSets[LocalHtlcSet] = localCommit.Htlcs - commitSet.HtlcSets[RemoteHtlcSet] = remoteCommit.Htlcs - if remoteChainTip != nil { - htlcs := remoteChainTip.Commitment.Htlcs - commitSet.HtlcSets[RemotePendingHtlcSet] = htlcs - } - - // We'll not retrieve the latest sate of the revocation store - // so we can populate the information within the channel state - // object that we have. - // - // TODO(roasbeef): mutation is bad mkay - _, err = c.cfg.chanState.RemoteRevocationStore() - if err != nil { - log.Errorf("Unable to fetch revocation state for "+ - "chan_point=%v", c.cfg.chanState.FundingOutpoint) + log.Errorf("unable to create commit set: %v", err) return } @@ -484,7 +541,7 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) { c.cfg.chanState.LocalChanCfg, c.cfg.chanState.RemoteChanCfg, commitSpend, broadcastStateNum, c.cfg.chanState.RevocationProducer, - tweaklessCommit, + c.cfg.chanState.ChanType, ) if err != nil { log.Errorf("unable to determine self commit for "+ @@ -497,10 +554,11 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) { // as we don't have any further processing we need to do (we // can't cheat ourselves :p). if isOurCommit { - commitSet.ConfCommitKey = &LocalHtlcSet + chainSet.commitSet.ConfCommitKey = &LocalHtlcSet if err := c.dispatchLocalForceClose( - commitSpend, *localCommit, commitSet, + commitSpend, chainSet.localCommit, + chainSet.commitSet, ); err != nil { log.Errorf("unable to handle local"+ "close for chan_point=%v: %v", @@ -541,11 +599,16 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) { // latest state, then they've initiated a unilateral close. So // we'll trigger the unilateral close signal so subscribers can // clean up the state as necessary. - case broadcastStateNum == remoteStateNum && !isRecoveredChan: - commitSet.ConfCommitKey = &RemoteHtlcSet + case broadcastStateNum == chainSet.remoteStateNum && + !isRecoveredChan: + log.Infof("Remote party broadcast base set, "+ + "commit_num=%v", chainSet.remoteStateNum) + + chainSet.commitSet.ConfCommitKey = &RemoteHtlcSet err := c.dispatchRemoteForceClose( - commitSpend, *remoteCommit, commitSet, + commitSpend, chainSet.remoteCommit, + chainSet.commitSet, c.cfg.chanState.RemoteCurrentRevocation, ) if err != nil { @@ -559,13 +622,16 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) { // This case can arise when we initiate a state transition, but // the remote party has a fail crash _after_ accepting the new // state, but _before_ sending their signature to us. - case broadcastStateNum == remoteStateNum+1 && - remoteChainTip != nil && !isRecoveredChan: + case broadcastStateNum == chainSet.remoteStateNum+1 && + chainSet.remotePendingCommit != nil && !isRecoveredChan: - commitSet.ConfCommitKey = &RemotePendingHtlcSet + log.Infof("Remote party broadcast pending set, "+ + "commit_num=%v", chainSet.remoteStateNum+1) + chainSet.commitSet.ConfCommitKey = &RemotePendingHtlcSet err := c.dispatchRemoteForceClose( - commitSpend, *remoteCommit, commitSet, + commitSpend, *chainSet.remotePendingCommit, + chainSet.commitSet, c.cfg.chanState.RemoteNextRevocation, ) if err != nil { @@ -583,11 +649,11 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) { // current state is, so we assume either the remote party // forced closed or we've been breached. In the latter case, // our tower will take care of us. - case broadcastStateNum > remoteStateNum || isRecoveredChan: + case broadcastStateNum > chainSet.remoteStateNum || isRecoveredChan: log.Warnf("Remote node broadcast state #%v, "+ "which is more than 1 beyond best known "+ "state #%v!!! Attempting recovery...", - broadcastStateNum, remoteStateNum) + broadcastStateNum, chainSet.remoteStateNum) // If this isn't a tweakless commitment, then we'll // need to wait for the remote party's latest unrevoked @@ -596,6 +662,7 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) { // close and sweep immediately using a fake commitPoint // as it isn't actually needed for recovery anymore. commitPoint := c.cfg.chanState.RemoteCurrentRevocation + tweaklessCommit := c.cfg.chanState.ChanType.IsTweakless() if !tweaklessCommit { commitPoint = c.waitForCommitmentPoint() if commitPoint == nil { @@ -609,8 +676,9 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) { c.cfg.chanState.FundingOutpoint) } else { - log.Infof("ChannelPoint(%v) is tweakless, " + - "moving to sweep directly on chain") + log.Infof("ChannelPoint(%v) is tweakless, "+ + "moving to sweep directly on chain", + c.cfg.chanState.FundingOutpoint) } // Since we don't have the commitment stored for this @@ -619,10 +687,10 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) { // able to recover any HTLC funds. // // TODO(halseth): can we try to recover some HTLCs? - commitSet.ConfCommitKey = &RemoteHtlcSet + chainSet.commitSet.ConfCommitKey = &RemoteHtlcSet err = c.dispatchRemoteForceClose( commitSpend, channeldb.ChannelCommitment{}, - commitSet, commitPoint, + chainSet.commitSet, commitPoint, ) if err != nil { log.Errorf("unable to handle remote "+ @@ -635,9 +703,9 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) { // VIOLATE THE CONTRACT LAID OUT WITHIN THE PAYMENT CHANNEL. // Therefore we close the signal indicating a revoked broadcast // to allow subscribers to swiftly dispatch justice!!! - case broadcastStateNum < remoteStateNum: + case broadcastStateNum < chainSet.remoteStateNum: err := c.dispatchContractBreach( - commitSpend, remoteCommit, + commitSpend, &chainSet.remoteCommit, broadcastStateNum, ) if err != nil { @@ -928,8 +996,8 @@ func (c *chainWatcher) dispatchContractBreach(spendEvent *chainntnfs.SpendDetail retribution.KeyRing.CommitPoint.Curve = nil retribution.KeyRing.LocalHtlcKey = nil retribution.KeyRing.RemoteHtlcKey = nil - retribution.KeyRing.DelayKey = nil - retribution.KeyRing.NoDelayKey = nil + retribution.KeyRing.ToLocalKey = nil + retribution.KeyRing.ToRemoteKey = nil retribution.KeyRing.RevocationKey = nil return spew.Sdump(retribution) })) @@ -986,7 +1054,9 @@ func (c *chainWatcher) dispatchContractBreach(spendEvent *chainntnfs.SpendDetail closeSummary.LastChanSyncMsg = chanSync } - if err := c.cfg.chanState.CloseChannel(&closeSummary); err != nil { + if err := c.cfg.chanState.CloseChannel( + &closeSummary, channeldb.ChanStatusRemoteCloseInitiator, + ); err != nil { return err } diff --git a/contractcourt/chain_watcher_test.go b/contractcourt/chain_watcher_test.go index f0e07628be..6dc47f530f 100644 --- a/contractcourt/chain_watcher_test.go +++ b/contractcourt/chain_watcher_test.go @@ -26,6 +26,7 @@ func (m *mockNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash, _ []byte, heightHint uint32) (*chainntnfs.ConfirmationEvent, error) { return &chainntnfs.ConfirmationEvent{ Confirmed: m.confChan, + Cancel: func() {}, }, nil } @@ -62,7 +63,9 @@ func TestChainWatcherRemoteUnilateralClose(t *testing.T) { // First, we'll create two channels which already have established a // commitment contract between themselves. - aliceChannel, bobChannel, cleanUp, err := lnwallet.CreateTestChannels(true) + aliceChannel, bobChannel, cleanUp, err := lnwallet.CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -149,7 +152,9 @@ func TestChainWatcherRemoteUnilateralClosePendingCommit(t *testing.T) { // First, we'll create two channels which already have established a // commitment contract between themselves. - aliceChannel, bobChannel, cleanUp, err := lnwallet.CreateTestChannels(true) + aliceChannel, bobChannel, cleanUp, err := lnwallet.CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -273,7 +278,7 @@ func TestChainWatcherDataLossProtect(t *testing.T) { // First, we'll create two channels which already have // established a commitment contract between themselves. aliceChannel, bobChannel, cleanUp, err := lnwallet.CreateTestChannels( - false, + channeldb.SingleFunderBit, ) if err != nil { t.Fatalf("unable to create test channels: %v", err) @@ -442,7 +447,7 @@ func TestChainWatcherLocalForceCloseDetect(t *testing.T) { // First, we'll create two channels which already have // established a commitment contract between themselves. aliceChannel, bobChannel, cleanUp, err := lnwallet.CreateTestChannels( - false, + channeldb.SingleFunderBit, ) if err != nil { t.Fatalf("unable to create test channels: %v", err) diff --git a/contractcourt/channel_arbitrator.go b/contractcourt/channel_arbitrator.go index 3bd669027b..f4e0d96a32 100644 --- a/contractcourt/channel_arbitrator.go +++ b/contractcourt/channel_arbitrator.go @@ -6,15 +6,18 @@ import ( "fmt" "sync" "sync/atomic" + "time" "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcutil" "github.com/davecgh/go-spew/spew" "github.com/lightningnetwork/lnd/chainntnfs" "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lnwallet" "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/sweep" ) var ( @@ -63,6 +66,22 @@ type WitnessBeacon interface { AddPreimages(preimages ...lntypes.Preimage) error } +// ArbChannel is an abstraction that allows the channel arbitrator to interact +// with an open channel. +type ArbChannel interface { + // ForceCloseChan should force close the contract that this attendant + // is watching over. We'll use this when we decide that we need to go + // to chain. It should in addition tell the switch to remove the + // corresponding link, such that we won't accept any new updates. The + // returned summary contains all items needed to eventually resolve all + // outputs on chain. + ForceCloseChan() (*lnwallet.LocalForceCloseSummary, error) + + // NewAnchorResolutions returns the anchor resolutions for currently + // valid commitment transactions. + NewAnchorResolutions() ([]*lnwallet.AnchorResolution, error) +} + // ChannelArbitratorConfig contains all the functionality that the // ChannelArbitrator needs in order to properly arbitrate any contract dispute // on chain. @@ -71,6 +90,10 @@ type ChannelArbitratorConfig struct { // channel. ChanPoint wire.OutPoint + // Channel is the full channel data structure. For legacy channels, this + // field may not always be set after a restart. + Channel ArbChannel + // ShortChanID describes the exact location of the channel within the // chain. We'll use this to address any messages that we need to send // to the switch during contract resolution. @@ -87,24 +110,18 @@ type ChannelArbitratorConfig struct { // channel. ChainEvents *ChainEventSubscription - // ForceCloseChan should force close the contract that this attendant - // is watching over. We'll use this when we decide that we need to go - // to chain. It should in addition tell the switch to remove the - // corresponding link, such that we won't accept any new updates. The - // returned summary contains all items needed to eventually resolve all - // outputs on chain. - ForceCloseChan func() (*lnwallet.LocalForceCloseSummary, error) - // MarkCommitmentBroadcasted should mark the channel as the commitment // being broadcast, and we are waiting for the commitment to confirm. - MarkCommitmentBroadcasted func(*wire.MsgTx) error + MarkCommitmentBroadcasted func(*wire.MsgTx, bool) error // MarkChannelClosed marks the channel closed in the database, with the // passed close summary. After this method successfully returns we can // no longer expect to receive chain events for this channel, and must // be able to recover from a failure without getting the close event - // again. - MarkChannelClosed func(*channeldb.ChannelCloseSummary) error + // again. It takes an optional channel status which will update the + // channel status in the record that we keep of historical channels. + MarkChannelClosed func(*channeldb.ChannelCloseSummary, + ...channeldb.ChannelStatus) error // IsPendingClose is a boolean indicating whether the channel is marked // as pending close in the database. @@ -129,13 +146,34 @@ type ChannelArbitratorConfig struct { ChainArbitratorConfig } +// ReportOutputType describes the type of output that is being reported +// on. +type ReportOutputType uint8 + +const ( + // ReportOutputIncomingHtlc is an incoming hash time locked contract on + // the commitment tx. + ReportOutputIncomingHtlc ReportOutputType = iota + + // ReportOutputOutgoingHtlc is an outgoing hash time locked contract on + // the commitment tx. + ReportOutputOutgoingHtlc + + // ReportOutputUnencumbered is an uncontested output on the commitment + // transaction paying to us directly. + ReportOutputUnencumbered + + // ReportOutputAnchor is an anchor output on the commitment tx. + ReportOutputAnchor +) + // ContractReport provides a summary of a commitment tx output. type ContractReport struct { // Outpoint is the final output that will be swept back to the wallet. Outpoint wire.OutPoint - // Incoming indicates whether the htlc was incoming to this channel. - Incoming bool + // Type indicates the type of the reported output. + Type ReportOutputType // Amount is the final value that will be swept in back to the wallet. Amount btcutil.Amount @@ -242,6 +280,9 @@ type ChannelArbitrator struct { started int32 // To be used atomically. stopped int32 // To be used atomically. + // startTimestamp is the time when this ChannelArbitrator was started. + startTimestamp time.Time + // log is a persistent log that the attendant will use to checkpoint // its next action, and the state of any unresolved contracts. log ArbitratorLog @@ -310,6 +351,7 @@ func (c *ChannelArbitrator) Start() error { if !atomic.CompareAndSwapInt32(&c.started, 0, 1) { return nil } + c.startTimestamp = c.cfg.Clock.Now() var ( err error @@ -329,9 +371,6 @@ func (c *ChannelArbitrator) Start() error { return err } - log.Infof("ChannelArbitrator(%v): starting state=%v", c.cfg.ChanPoint, - c.state) - _, bestHeight, err := c.cfg.ChainIO.GetBestBlock() if err != nil { c.cfg.BlockEpochs.Cancel() @@ -366,14 +405,19 @@ func (c *ChannelArbitrator) Start() error { case channeldb.RemoteForceClose: trigger = remoteCloseTrigger } - triggerHeight = c.cfg.ClosingHeight log.Warnf("ChannelArbitrator(%v): detected stalled "+ - "state=%v for closed channel, using "+ - "trigger=%v", c.cfg.ChanPoint, c.state, trigger) + "state=%v for closed channel", + c.cfg.ChanPoint, c.state) } + + triggerHeight = c.cfg.ClosingHeight } + log.Infof("ChannelArbitrator(%v): starting state=%v, trigger=%v, "+ + "triggerHeight=%v", c.cfg.ChanPoint, c.state, trigger, + triggerHeight) + // Next we'll fetch our confirmed commitment set. This will only exist // if the channel has been closed out on chain for modern nodes. For // older nodes, this won't be found at all, and will rely on the @@ -426,7 +470,7 @@ func (c *ChannelArbitrator) Start() error { // receive a chain event from the chain watcher than the // commitment has been confirmed on chain, and before we // advance our state step, we call InsertConfirmedCommitSet. - if err := c.relaunchResolvers(commitSet); err != nil { + if err := c.relaunchResolvers(commitSet, triggerHeight); err != nil { c.cfg.BlockEpochs.Cancel() return err } @@ -442,7 +486,9 @@ func (c *ChannelArbitrator) Start() error { // starting the ChannelArbitrator. This information should ideally be stored in // the database, so this only serves as a intermediate work-around to prevent a // migration. -func (c *ChannelArbitrator) relaunchResolvers(commitSet *CommitSet) error { +func (c *ChannelArbitrator) relaunchResolvers(commitSet *CommitSet, + heightHint uint32) error { + // We'll now query our log to see if there are any active unresolved // contracts. If this is the case, then we'll relaunch all contract // resolvers. @@ -501,96 +547,37 @@ func (c *ChannelArbitrator) relaunchResolvers(commitSet *CommitSet) error { "resolvers", c.cfg.ChanPoint, len(unresolvedContracts)) for _, resolver := range unresolvedContracts { - if err := c.supplementResolver(resolver, htlcMap); err != nil { - return err + htlcResolver, ok := resolver.(htlcContractResolver) + if !ok { + continue } - } - - c.launchResolvers(unresolvedContracts) - - return nil -} -// supplementResolver takes a resolver as it is restored from the log and fills -// in missing data from the htlcMap. -func (c *ChannelArbitrator) supplementResolver(resolver ContractResolver, - htlcMap map[wire.OutPoint]*channeldb.HTLC) error { - - switch r := resolver.(type) { - - case *htlcSuccessResolver: - return c.supplementSuccessResolver(r, htlcMap) - - case *htlcIncomingContestResolver: - return c.supplementIncomingContestResolver(r, htlcMap) - - case *htlcTimeoutResolver: - return c.supplementTimeoutResolver(r, htlcMap) + htlcPoint := htlcResolver.HtlcPoint() + htlc, ok := htlcMap[htlcPoint] + if !ok { + return fmt.Errorf( + "htlc resolver %T unavailable", resolver, + ) + } - case *htlcOutgoingContestResolver: - return c.supplementTimeoutResolver( - &r.htlcTimeoutResolver, htlcMap, - ) + htlcResolver.Supplement(*htlc) } - return nil -} - -// supplementSuccessResolver takes a htlcIncomingContestResolver as it is -// restored from the log and fills in missing data from the htlcMap. -func (c *ChannelArbitrator) supplementIncomingContestResolver( - r *htlcIncomingContestResolver, - htlcMap map[wire.OutPoint]*channeldb.HTLC) error { - - res := r.htlcResolution - htlcPoint := res.HtlcPoint() - htlc, ok := htlcMap[htlcPoint] - if !ok { - return errors.New( - "htlc for incoming contest resolver unavailable", + // The anchor resolver is stateless and can always be re-instantiated. + if contractResolutions.AnchorResolution != nil { + anchorResolver := newAnchorResolver( + contractResolutions.AnchorResolution.AnchorSignDescriptor, + contractResolutions.AnchorResolution.CommitAnchor, + heightHint, c.cfg.ChanPoint, + ResolverConfig{ + ChannelArbitratorConfig: c.cfg, + }, ) + unresolvedContracts = append(unresolvedContracts, anchorResolver) } - r.htlcAmt = htlc.Amt - r.circuitKey = channeldb.CircuitKey{ - ChanID: c.cfg.ShortChanID, - HtlcID: htlc.HtlcIndex, - } - - return nil -} - -// supplementSuccessResolver takes a htlcSuccessResolver as it is restored from -// the log and fills in missing data from the htlcMap. -func (c *ChannelArbitrator) supplementSuccessResolver(r *htlcSuccessResolver, - htlcMap map[wire.OutPoint]*channeldb.HTLC) error { - - res := r.htlcResolution - htlcPoint := res.HtlcPoint() - htlc, ok := htlcMap[htlcPoint] - if !ok { - return errors.New( - "htlc for success resolver unavailable", - ) - } - r.htlcAmt = htlc.Amt - return nil -} + c.launchResolvers(unresolvedContracts) -// supplementTimeoutResolver takes a htlcSuccessResolver as it is restored from -// the log and fills in missing data from the htlcMap. -func (c *ChannelArbitrator) supplementTimeoutResolver(r *htlcTimeoutResolver, - htlcMap map[wire.OutPoint]*channeldb.HTLC) error { - - res := r.htlcResolution - htlcPoint := res.HtlcPoint() - htlc, ok := htlcMap[htlcPoint] - if !ok { - return errors.New( - "htlc for timeout resolver unavailable", - ) - } - r.htlcAmt = htlc.Amt return nil } @@ -606,10 +593,6 @@ func (c *ChannelArbitrator) Report() []*ContractReport { continue } - if r.IsResolved() { - continue - } - report := r.report() if report == nil { continue @@ -836,7 +819,7 @@ func (c *ChannelArbitrator) stateStep( // We'll tell the switch that it should remove the link for // this channel, in addition to fetching the force close // summary needed to close this channel on chain. - closeSummary, err := c.cfg.ForceCloseChan() + closeSummary, err := c.cfg.Channel.ForceCloseChan() if err != nil { log.Errorf("ChannelArbitrator(%v): unable to "+ "force close: %v", c.cfg.ChanPoint, err) @@ -846,8 +829,10 @@ func (c *ChannelArbitrator) stateStep( // Before publishing the transaction, we store it to the // database, such that we can re-publish later in case it - // didn't propagate. - if err := c.cfg.MarkCommitmentBroadcasted(closeTx); err != nil { + // didn't propagate. We initiated the force close, so we + // mark broadcast with local initiator set to true. + err = c.cfg.MarkCommitmentBroadcasted(closeTx, true) + if err != nil { log.Errorf("ChannelArbitrator(%v): unable to "+ "mark commitment broadcasted: %v", c.cfg.ChanPoint, err) @@ -857,8 +842,9 @@ func (c *ChannelArbitrator) stateStep( // With the close transaction in hand, broadcast the // transaction to the network, thereby entering the post // channel resolution state. - log.Infof("Broadcasting force close transaction, "+ - "ChannelPoint(%v): %v", c.cfg.ChanPoint, + log.Infof("Broadcasting force close transaction %v, "+ + "ChannelPoint(%v): %v", closeTx.TxHash(), + c.cfg.ChanPoint, newLogClosure(func() string { return spew.Sdump(closeTx) })) @@ -882,31 +868,49 @@ func (c *ChannelArbitrator) stateStep( // to be confirmed. case StateCommitmentBroadcasted: switch trigger { - // We are waiting for a commitment to be confirmed, so any - // other trigger will be ignored. + + // We are waiting for a commitment to be confirmed. case chainTrigger, userTrigger: - log.Infof("ChannelArbitrator(%v): noop trigger %v", - c.cfg.ChanPoint, trigger) + // The commitment transaction has been broadcast, but it + // doesn't necessarily need to be the commitment + // transaction version that is going to be confirmed. To + // be sure that any of those versions can be anchored + // down, we now submit all anchor resolutions to the + // sweeper. The sweeper will keep trying to sweep all of + // them. + // + // Note that the sweeper is idempotent. If we ever + // happen to end up at this point in the code again, no + // harm is done by re-offering the anchors to the + // sweeper. + anchors, err := c.cfg.Channel.NewAnchorResolutions() + if err != nil { + return StateError, closeTx, err + } + + err = c.sweepAnchors(anchors, triggerHeight) + if err != nil { + return StateError, closeTx, err + } + nextState = StateCommitmentBroadcasted // If this state advance was triggered by any of the // commitments being confirmed, then we'll jump to the state // where the contract has been closed. case localCloseTrigger, remoteCloseTrigger: - log.Infof("ChannelArbitrator(%v): trigger %v, "+ - " going to StateContractClosed", - c.cfg.ChanPoint, trigger) nextState = StateContractClosed // If a coop close or breach was confirmed, jump straight to // the fully resolved state. case coopCloseTrigger, breachCloseTrigger: - log.Infof("ChannelArbitrator(%v): trigger %v, "+ - " going to StateFullyResolved", - c.cfg.ChanPoint, trigger) nextState = StateFullyResolved } + log.Infof("ChannelArbitrator(%v): trigger %v moving from "+ + "state %v to %v", c.cfg.ChanPoint, trigger, c.state, + nextState) + // If we're in this state, then the contract has been fully closed to // outside sub-systems, so we'll process the prior set of on-chain // contract actions and launch a set of resolvers. @@ -931,27 +935,6 @@ func (c *ChannelArbitrator) stateStep( break } - // If we've have broadcast the commitment transaction, we send - // our commitment output for incubation, but only if it wasn't - // trimmed. We'll need to wait for a CSV timeout before we can - // reclaim the funds. - commitRes := contractResolutions.CommitResolution - if commitRes != nil && commitRes.MaturityDelay > 0 { - log.Infof("ChannelArbitrator(%v): sending commit "+ - "output for incubation", c.cfg.ChanPoint) - - err = c.cfg.IncubateOutputs( - c.cfg.ChanPoint, commitRes, - nil, nil, triggerHeight, - ) - if err != nil { - // TODO(roasbeef): check for AlreadyExists errors - log.Errorf("unable to incubate commitment "+ - "output: %v", err) - return StateError, closeTx, err - } - } - // Now that we know we'll need to act, we'll process the htlc // actions, wen create the structures we need to resolve all // outstanding contracts. @@ -1039,6 +1022,54 @@ func (c *ChannelArbitrator) stateStep( return nextState, closeTx, nil } +// sweepAnchors offers all given anchor resolutions to the sweeper. It requests +// sweeping at the minimum fee rate. This fee rate can be upped manually by the +// user via the BumpFee rpc. +func (c *ChannelArbitrator) sweepAnchors(anchors []*lnwallet.AnchorResolution, + heightHint uint32) error { + + // Use the chan id as the exclusive group. This prevents any of the + // anchors from being batched together. + exclusiveGroup := c.cfg.ShortChanID.ToUint64() + + // Retrieve the current minimum fee rate from the sweeper. + minFeeRate := c.cfg.Sweeper.RelayFeePerKW() + + for _, anchor := range anchors { + log.Debugf("ChannelArbitrator(%v): pre-confirmation sweep of "+ + "anchor of tx %v", c.cfg.ChanPoint, anchor.CommitAnchor) + + // Prepare anchor output for sweeping. + anchorInput := input.MakeBaseInput( + &anchor.CommitAnchor, + input.CommitmentAnchor, + &anchor.AnchorSignDescriptor, + heightHint, + ) + + // Sweep anchor output with the minimum fee rate. This usually + // (up to a min relay fee of 3 sat/b) means that the anchor + // sweep will be economical. Also signal that this is a force + // sweep. If the user decides to bump the fee on the anchor + // sweep, it will be swept even if it isn't economical. + _, err := c.cfg.Sweeper.SweepInput( + &anchorInput, + sweep.Params{ + Fee: sweep.FeePreference{ + FeeRate: minFeeRate, + }, + Force: true, + ExclusiveGroup: &exclusiveGroup, + }, + ) + if err != nil { + return err + } + } + + return nil +} + // launchResolvers updates the activeResolvers list and starts the resolvers. func (c *ChannelArbitrator) launchResolvers(resolvers []ContractResolver) { c.activeResolversLock.Lock() @@ -1188,17 +1219,17 @@ func (c ChainActionMap) Merge(actions ChainActionMap) { // we should go on chain to claim. We do this rather than waiting up until the // last minute as we want to ensure that when we *need* (HTLC is timed out) to // sweep, the commitment is already confirmed. -func (c *ChannelArbitrator) shouldGoOnChain(htlcExpiry, broadcastDelta, - currentHeight uint32) bool { +func (c *ChannelArbitrator) shouldGoOnChain(htlc channeldb.HTLC, + broadcastDelta, currentHeight uint32) bool { // We'll calculate the broadcast cut off for this HTLC. This is the // height that (based on our current fee estimation) we should // broadcast in order to ensure the commitment transaction is confirmed // before the HTLC fully expires. - broadcastCutOff := htlcExpiry - broadcastDelta + broadcastCutOff := htlc.RefundTimeout - broadcastDelta log.Tracef("ChannelArbitrator(%v): examining outgoing contract: "+ - "expiry=%v, cutoff=%v, height=%v", c.cfg.ChanPoint, htlcExpiry, + "expiry=%v, cutoff=%v, height=%v", c.cfg.ChanPoint, htlc.RefundTimeout, broadcastCutOff, currentHeight) // TODO(roasbeef): take into account default HTLC delta, don't need to @@ -1207,7 +1238,29 @@ func (c *ChannelArbitrator) shouldGoOnChain(htlcExpiry, broadcastDelta, // We should on-chain for this HTLC, iff we're within out broadcast // cutoff window. - return currentHeight >= broadcastCutOff + if currentHeight < broadcastCutOff { + return false + } + + // In case of incoming htlc we should go to chain. + if htlc.Incoming { + return true + } + + // For htlcs that are result of our initiated payments we give some grace + // period before force closing the channel. During this time we expect + // both nodes to connect and give a chance to the other node to send its + // updates and cancel the htlc. + // This shouldn't add any security risk as there is no incoming htlc to + // fulfill at this case and the expectation is that when the channel is + // active the other node will send update_fail_htlc to remove the htlc + // without closing the channel. It is up to the user to force close the + // channel if the peer misbehaves and doesn't send the update_fail_htlc. + // It is useful when this node is most of the time not online and is + // likely to miss the time slot where the htlc may be cancelled. + isForwarded := c.cfg.IsForwardedHTLC(c.cfg.ShortChanID, htlc.HtlcIndex) + upTime := c.cfg.Clock.Now().Sub(c.startTimestamp) + return isForwarded || upTime > c.cfg.PaymentsExpirationGracePeriod } // checkCommitChainActions is called for each new block connected to the end of @@ -1224,8 +1277,10 @@ func (c *ChannelArbitrator) checkCommitChainActions(height uint32, // * race condition if adding and we broadcast, etc // * or would make each instance sync? - log.Debugf("ChannelArbitrator(%v): checking chain actions at "+ - "height=%v", c.cfg.ChanPoint, height) + log.Debugf("ChannelArbitrator(%v): checking commit chain actions at "+ + "height=%v, in_htlc_count=%v, out_htlc_count=%v", + c.cfg.ChanPoint, height, + len(htlcs.incomingHTLCs), len(htlcs.outgoingHTLCs)) actionMap := make(ChainActionMap) @@ -1235,8 +1290,7 @@ func (c *ChannelArbitrator) checkCommitChainActions(height uint32, for _, htlc := range htlcs.outgoingHTLCs { // We'll need to go on-chain for an outgoing HTLC if it was // never resolved downstream, and it's "close" to timing out. - toChain := c.shouldGoOnChain( - htlc.RefundTimeout, c.cfg.OutgoingBroadcastDelta, + toChain := c.shouldGoOnChain(htlc, c.cfg.OutgoingBroadcastDelta, height, ) @@ -1267,8 +1321,7 @@ func (c *ChannelArbitrator) checkCommitChainActions(height uint32, continue } - toChain := c.shouldGoOnChain( - htlc.RefundTimeout, c.cfg.IncomingBroadcastDelta, + toChain := c.shouldGoOnChain(htlc, c.cfg.IncomingBroadcastDelta, height, ) @@ -1318,8 +1371,7 @@ func (c *ChannelArbitrator) checkCommitChainActions(height uint32, // mark it still "live". After we broadcast, we'll monitor it // until the HTLC times out to see if we can also redeem it // on-chain. - case !c.shouldGoOnChain( - htlc.RefundTimeout, c.cfg.OutgoingBroadcastDelta, + case !c.shouldGoOnChain(htlc, c.cfg.OutgoingBroadcastDelta, height, ): // TODO(roasbeef): also need to be able to query @@ -1488,8 +1540,7 @@ func (c *ChannelArbitrator) checkRemoteDanglingActions( for _, htlc := range pendingRemoteHTLCs { // We'll now check if we need to go to chain in order to cancel // the incoming HTLC. - goToChain := c.shouldGoOnChain( - htlc.RefundTimeout, c.cfg.OutgoingBroadcastDelta, + goToChain := c.shouldGoOnChain(htlc, c.cfg.OutgoingBroadcastDelta, height, ) @@ -1685,7 +1736,7 @@ func (c *ChannelArbitrator) prepContractResolutions( // We'll create the resolver kit that we'll be cloning for each // resolver so they each can do their duty. - resKit := ResolverKit{ + resolverCfg := ResolverConfig{ ChannelArbitratorConfig: c.cfg, Checkpoint: func(res ContractResolver) error { return c.log.InsertUnresolvedContracts(res) @@ -1719,6 +1770,8 @@ func (c *ChannelArbitrator) prepContractResolutions( // claim the HTLC (second-level or directly), then add the pre case HtlcClaimAction: for _, htlc := range htlcs { + htlc := htlc + htlcOp := wire.OutPoint{ Hash: commitHash, Index: uint32(htlc.OutputIndex), @@ -1733,14 +1786,9 @@ func (c *ChannelArbitrator) prepContractResolutions( continue } - resKit.Quit = make(chan struct{}) - resolver := &htlcSuccessResolver{ - htlcResolution: resolution, - broadcastHeight: height, - payHash: htlc.RHash, - htlcAmt: htlc.Amt, - ResolverKit: resKit, - } + resolver := newSuccessResolver( + resolution, height, htlc, resolverCfg, + ) htlcResolvers = append(htlcResolvers, resolver) } @@ -1749,6 +1797,8 @@ func (c *ChannelArbitrator) prepContractResolutions( // backwards. case HtlcTimeoutAction: for _, htlc := range htlcs { + htlc := htlc + htlcOp := wire.OutPoint{ Hash: commitHash, Index: uint32(htlc.OutputIndex), @@ -1761,14 +1811,9 @@ func (c *ChannelArbitrator) prepContractResolutions( continue } - resKit.Quit = make(chan struct{}) - resolver := &htlcTimeoutResolver{ - htlcResolution: resolution, - broadcastHeight: height, - htlcIndex: htlc.HtlcIndex, - htlcAmt: htlc.Amt, - ResolverKit: resKit, - } + resolver := newTimeoutResolver( + resolution, height, htlc, resolverCfg, + ) htlcResolvers = append(htlcResolvers, resolver) } @@ -1777,6 +1822,8 @@ func (c *ChannelArbitrator) prepContractResolutions( // learn of the pre-image, or let the remote party time out. case HtlcIncomingWatchAction: for _, htlc := range htlcs { + htlc := htlc + htlcOp := wire.OutPoint{ Hash: commitHash, Index: uint32(htlc.OutputIndex), @@ -1793,23 +1840,10 @@ func (c *ChannelArbitrator) prepContractResolutions( continue } - circuitKey := channeldb.CircuitKey{ - HtlcID: htlc.HtlcIndex, - ChanID: c.cfg.ShortChanID, - } - - resKit.Quit = make(chan struct{}) - resolver := &htlcIncomingContestResolver{ - htlcExpiry: htlc.RefundTimeout, - circuitKey: circuitKey, - htlcSuccessResolver: htlcSuccessResolver{ - htlcResolution: resolution, - broadcastHeight: height, - payHash: htlc.RHash, - htlcAmt: htlc.Amt, - ResolverKit: resKit, - }, - } + resolver := newIncomingContestResolver( + resolution, height, htlc, + resolverCfg, + ) htlcResolvers = append(htlcResolvers, resolver) } @@ -1818,6 +1852,8 @@ func (c *ChannelArbitrator) prepContractResolutions( // backwards), or just timeout. case HtlcOutgoingWatchAction: for _, htlc := range htlcs { + htlc := htlc + htlcOp := wire.OutPoint{ Hash: commitHash, Index: uint32(htlc.OutputIndex), @@ -1831,36 +1867,36 @@ func (c *ChannelArbitrator) prepContractResolutions( continue } - resKit.Quit = make(chan struct{}) - resolver := &htlcOutgoingContestResolver{ - htlcTimeoutResolver: htlcTimeoutResolver{ - htlcResolution: resolution, - broadcastHeight: height, - htlcIndex: htlc.HtlcIndex, - htlcAmt: htlc.Amt, - ResolverKit: resKit, - }, - } + resolver := newOutgoingContestResolver( + resolution, height, htlc, resolverCfg, + ) htlcResolvers = append(htlcResolvers, resolver) } } } - // Finally, if this is was a unilateral closure, then we'll also create - // a resolver to sweep our commitment output (but only if it wasn't + // If this is was an unilateral closure, then we'll also create a + // resolver to sweep our commitment output (but only if it wasn't // trimmed). if contractResolutions.CommitResolution != nil { - resKit.Quit = make(chan struct{}) - resolver := &commitSweepResolver{ - commitResolution: *contractResolutions.CommitResolution, - broadcastHeight: height, - chanPoint: c.cfg.ChanPoint, - ResolverKit: resKit, - } - + resolver := newCommitSweepResolver( + *contractResolutions.CommitResolution, + height, c.cfg.ChanPoint, resolverCfg, + ) htlcResolvers = append(htlcResolvers, resolver) } + // We instantiate an anchor resolver if the commitmentment tx has an + // anchor. + if contractResolutions.AnchorResolution != nil { + anchorResolver := newAnchorResolver( + contractResolutions.AnchorResolution.AnchorSignDescriptor, + contractResolutions.AnchorResolution.CommitAnchor, + height, c.cfg.ChanPoint, resolverCfg, + ) + htlcResolvers = append(htlcResolvers, anchorResolver) + } + return htlcResolvers, msgsToSend, nil } @@ -2152,6 +2188,7 @@ func (c *ChannelArbitrator) channelAttendant(bestHeight int32) { CommitHash: closeTx.TxHash(), CommitResolution: closeInfo.CommitResolution, HtlcResolutions: *closeInfo.HtlcResolutions, + AnchorResolution: closeInfo.AnchorResolution, } // When processing a unilateral close event, we'll @@ -2218,6 +2255,7 @@ func (c *ChannelArbitrator) channelAttendant(bestHeight int32) { CommitHash: *uniClosure.SpenderTxHash, CommitResolution: uniClosure.CommitResolution, HtlcResolutions: *uniClosure.HtlcResolutions, + AnchorResolution: uniClosure.AnchorResolution, } // When processing a unilateral close event, we'll @@ -2252,7 +2290,10 @@ func (c *ChannelArbitrator) channelAttendant(bestHeight int32) { // transition into StateContractClosed based on the // close status of the channel. closeSummary := &uniClosure.ChannelCloseSummary - err = c.cfg.MarkChannelClosed(closeSummary) + err = c.cfg.MarkChannelClosed( + closeSummary, + channeldb.ChanStatusRemoteCloseInitiator, + ) if err != nil { log.Errorf("Unable to mark channel closed: %v", err) diff --git a/contractcourt/channel_arbitrator_test.go b/contractcourt/channel_arbitrator_test.go index 85c76479d0..3dedb4200f 100644 --- a/contractcourt/channel_arbitrator_test.go +++ b/contractcourt/channel_arbitrator_test.go @@ -12,14 +12,24 @@ import ( "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" - "github.com/coreos/bbolt" + "github.com/btcsuite/btcutil" "github.com/lightningnetwork/lnd/chainntnfs" "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/channeldb/kvdb" + "github.com/lightningnetwork/lnd/clock" "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/lnwallet" "github.com/lightningnetwork/lnd/lnwire" ) +const ( + defaultTimeout = time.Second * 5 + + // stateTimeout is the timeout we allow when waiting for state + // transitions. + stateTimeout = time.Second * 15 +) + type mockArbitratorLog struct { state ArbitratorState newStates chan ArbitratorState @@ -73,6 +83,11 @@ func (b *mockArbitratorLog) InsertUnresolvedContracts( b.Lock() for _, resolver := range resolvers { + resKey := resolver.ResolverKey() + if resKey == nil { + continue + } + b.resolvers[resolver] = struct{}{} } b.Unlock() @@ -187,6 +202,8 @@ type chanArbTestCtx struct { resolutions chan []ResolutionMsg log ArbitratorLog + + sweeper *mockSweeper } func (c *chanArbTestCtx) CleanUp() { @@ -220,7 +237,7 @@ func (c *chanArbTestCtx) AssertStateTransitions(expectedStates ...ArbitratorStat var state ArbitratorState select { case state = <-newStatesChan: - case <-time.After(5 * time.Second): + case <-time.After(defaultTimeout): c.t.Fatalf("new state not received") } @@ -265,7 +282,26 @@ func (c *chanArbTestCtx) Restart(restartClosure func(*chanArbTestCtx)) (*chanArb return newCtx, nil } -func createTestChannelArbitrator(t *testing.T, log ArbitratorLog) (*chanArbTestCtx, error) { +// testChanArbOption applies custom settings to a channel arbitrator config for +// testing purposes. +type testChanArbOption func(cfg *ChannelArbitratorConfig) + +// remoteInitiatorOption sets the MarkChannelClosed function in the +// Channel Arbitrator's config. +func withMarkClosed(markClosed func(*channeldb.ChannelCloseSummary, + ...channeldb.ChannelStatus) error) testChanArbOption { + + return func(cfg *ChannelArbitratorConfig) { + cfg.MarkChannelClosed = markClosed + } +} + +// createTestChannelArbitrator returns a channel arbitrator test context which +// contains a channel arbitrator with default values. These values can be +// changed by providing options which overwrite the default config. +func createTestChannelArbitrator(t *testing.T, log ArbitratorLog, + opts ...testChanArbOption) (*chanArbTestCtx, error) { + blockEpochs := make(chan *chainntnfs.BlockEpoch) blockEpoch := &chainntnfs.BlockEpochEvent{ Epochs: blockEpochs, @@ -285,6 +321,7 @@ func createTestChannelArbitrator(t *testing.T, log ArbitratorLog) (*chanArbTestC incubateChan := make(chan struct{}) chainIO := &mockChainIO{} + mockSweeper := newMockSweeper() chainArbCfg := ChainArbitratorConfig{ ChainIO: chainIO, PublishTx: func(*wire.MsgTx) error { @@ -301,13 +338,21 @@ func createTestChannelArbitrator(t *testing.T, log ArbitratorLog) (*chanArbTestC spendChan: make(chan *chainntnfs.SpendDetail), confChan: make(chan *chainntnfs.TxConfirmation), }, - IncubateOutputs: func(wire.OutPoint, *lnwallet.CommitOutputResolution, + IncubateOutputs: func(wire.OutPoint, *lnwallet.OutgoingHtlcResolution, *lnwallet.IncomingHtlcResolution, uint32) error { incubateChan <- struct{}{} return nil }, + OnionProcessor: &mockOnionProcessor{}, + IsForwardedHTLC: func(chanID lnwire.ShortChannelID, + htlcIndex uint64) bool { + + return true + }, + Clock: clock.NewDefaultClock(), + Sweeper: mockSweeper, } // We'll use the resolvedChan to synchronize on call to @@ -316,7 +361,7 @@ func createTestChannelArbitrator(t *testing.T, log ArbitratorLog) (*chanArbTestC // Next we'll create the matching configuration struct that contains // all interfaces and methods the arbitrator needs to do its job. - arbCfg := ChannelArbitratorConfig{ + arbCfg := &ChannelArbitratorConfig{ ChanPoint: chanPoint, ShortChanID: shortChanID, BlockEpochs: blockEpoch, @@ -324,17 +369,12 @@ func createTestChannelArbitrator(t *testing.T, log ArbitratorLog) (*chanArbTestC resolvedChan <- struct{}{} return nil }, - ForceCloseChan: func() (*lnwallet.LocalForceCloseSummary, error) { - summary := &lnwallet.LocalForceCloseSummary{ - CloseTx: &wire.MsgTx{}, - HtlcResolutions: &lnwallet.HtlcResolutions{}, - } - return summary, nil - }, - MarkCommitmentBroadcasted: func(_ *wire.MsgTx) error { + Channel: &mockChannel{}, + MarkCommitmentBroadcasted: func(_ *wire.MsgTx, _ bool) error { return nil }, - MarkChannelClosed: func(*channeldb.ChannelCloseSummary) error { + MarkChannelClosed: func(*channeldb.ChannelCloseSummary, + ...channeldb.ChannelStatus) error { return nil }, IsPendingClose: false, @@ -342,6 +382,11 @@ func createTestChannelArbitrator(t *testing.T, log ArbitratorLog) (*chanArbTestC ChainEvents: chanEvents, } + // Apply all custom options to the config struct. + for _, option := range opts { + option(arbCfg) + } + var cleanUp func() if log == nil { dbDir, err := ioutil.TempDir("", "chanArb") @@ -349,13 +394,13 @@ func createTestChannelArbitrator(t *testing.T, log ArbitratorLog) (*chanArbTestC return nil, err } dbPath := filepath.Join(dbDir, "testdb") - db, err := bbolt.Open(dbPath, 0600, nil) + db, err := kvdb.Create(kvdb.BoltBackendName, dbPath, true) if err != nil { return nil, err } backingLog, err := newBoltArbitratorLog( - db, arbCfg, chainhash.Hash{}, chanPoint, + db, *arbCfg, chainhash.Hash{}, chanPoint, ) if err != nil { return nil, err @@ -373,7 +418,7 @@ func createTestChannelArbitrator(t *testing.T, log ArbitratorLog) (*chanArbTestC htlcSets := make(map[HtlcSetKey]htlcSet) - chanArb := NewChannelArbitrator(arbCfg, htlcSets, log) + chanArb := NewChannelArbitrator(*arbCfg, htlcSets, log) return &chanArbTestCtx{ t: t, @@ -384,6 +429,7 @@ func createTestChannelArbitrator(t *testing.T, log ArbitratorLog) (*chanArbTestC blockEpochs: blockEpochs, log: log, incubationRequests: incubateChan, + sweeper: mockSweeper, }, nil } @@ -416,7 +462,9 @@ func TestChannelArbitratorCooperativeClose(t *testing.T) { // We set up a channel to detect when MarkChannelClosed is called. closeInfos := make(chan *channeldb.ChannelCloseSummary) chanArbCtx.chanArb.cfg.MarkChannelClosed = func( - closeInfo *channeldb.ChannelCloseSummary) error { + closeInfo *channeldb.ChannelCloseSummary, + statuses ...channeldb.ChannelStatus) error { + closeInfos <- closeInfo return nil } @@ -433,7 +481,7 @@ func TestChannelArbitratorCooperativeClose(t *testing.T) { if c.CloseType != channeldb.CooperativeClose { t.Fatalf("expected cooperative close, got %v", c.CloseType) } - case <-time.After(5 * time.Second): + case <-time.After(defaultTimeout): t.Fatalf("timeout waiting for channel close") } @@ -441,7 +489,7 @@ func TestChannelArbitratorCooperativeClose(t *testing.T) { select { case <-chanArbCtx.resolvedChan: // Expected. - case <-time.After(5 * time.Second): + case <-time.After(defaultTimeout): t.Fatalf("contract was not resolved") } } @@ -496,7 +544,7 @@ func TestChannelArbitratorRemoteForceClose(t *testing.T) { select { case <-chanArbCtx.resolvedChan: // Expected. - case <-time.After(5 * time.Second): + case <-time.After(defaultTimeout): t.Fatalf("contract was not resolved") } } @@ -558,7 +606,7 @@ func TestChannelArbitratorLocalForceClose(t *testing.T) { if state != StateBroadcastCommit { t.Fatalf("state during PublishTx was %v", state) } - case <-time.After(15 * time.Second): + case <-time.After(stateTimeout): t.Fatalf("did not get state update") } @@ -568,7 +616,7 @@ func TestChannelArbitratorLocalForceClose(t *testing.T) { select { case <-respChan: - case <-time.After(5 * time.Second): + case <-time.After(defaultTimeout): t.Fatalf("no response received") } @@ -577,7 +625,7 @@ func TestChannelArbitratorLocalForceClose(t *testing.T) { if err != nil { t.Fatalf("error force closing channel: %v", err) } - case <-time.After(5 * time.Second): + case <-time.After(defaultTimeout): t.Fatalf("no response received") } @@ -602,7 +650,7 @@ func TestChannelArbitratorLocalForceClose(t *testing.T) { select { case <-chanArbCtx.resolvedChan: // Expected. - case <-time.After(5 * time.Second): + case <-time.After(defaultTimeout): t.Fatalf("contract was not resolved") } } @@ -646,7 +694,7 @@ func TestChannelArbitratorBreachClose(t *testing.T) { select { case <-chanArbCtx.resolvedChan: // Expected. - case <-time.After(5 * time.Second): + case <-time.After(defaultTimeout): t.Fatalf("contract was not resolved") } } @@ -730,7 +778,7 @@ func TestChannelArbitratorLocalForceClosePendingHtlc(t *testing.T) { ) select { case <-respChan: - case <-time.After(5 * time.Second): + case <-time.After(defaultTimeout): t.Fatalf("no response received") } @@ -739,7 +787,7 @@ func TestChannelArbitratorLocalForceClosePendingHtlc(t *testing.T) { if err != nil { t.Fatalf("error force closing channel: %v", err) } - case <-time.After(5 * time.Second): + case <-time.After(defaultTimeout): t.Fatalf("no response received") } @@ -817,7 +865,7 @@ func TestChannelArbitratorLocalForceClosePendingHtlc(t *testing.T) { t.Fatalf("wrong htlc index: expected %v, got %v", outgoingDustHtlc.HtlcIndex, msgs[0].HtlcIndex) } - case <-time.After(5 * time.Second): + case <-time.After(defaultTimeout): t.Fatalf("resolution msgs not sent") } @@ -858,10 +906,10 @@ func TestChannelArbitratorLocalForceClosePendingHtlc(t *testing.T) { resolver) } - // The resolver should have its htlcAmt field populated as it. - if int64(outgoingResolver.htlcAmt) != int64(htlcAmt) { + // The resolver should have its htlc amt field populated as it. + if int64(outgoingResolver.htlc.Amt) != int64(htlcAmt) { t.Fatalf("wrong htlc amount: expected %v, got %v,", - htlcAmt, int64(outgoingResolver.htlcAmt)) + htlcAmt, int64(outgoingResolver.htlc.Amt)) } // htlcOutgoingContestResolver is now active and waiting for the HTLC to @@ -879,7 +927,7 @@ func TestChannelArbitratorLocalForceClosePendingHtlc(t *testing.T) { // htlcTimeoutResolver and should send the contract off for incubation. select { case <-chanArbCtx.incubationRequests: - case <-time.After(5 * time.Second): + case <-time.After(defaultTimeout): t.Fatalf("no response received") } @@ -899,7 +947,7 @@ func TestChannelArbitratorLocalForceClosePendingHtlc(t *testing.T) { t.Fatalf("wrong htlc index: expected %v, got %v", htlc.HtlcIndex, msgs[0].HtlcIndex) } - case <-time.After(5 * time.Second): + case <-time.After(defaultTimeout): t.Fatalf("resolution msgs not sent") } @@ -919,7 +967,7 @@ func TestChannelArbitratorLocalForceClosePendingHtlc(t *testing.T) { chanArbCtxNew.AssertStateTransitions(StateFullyResolved) select { case <-chanArbCtxNew.resolvedChan: - case <-time.After(5 * time.Second): + case <-time.After(defaultTimeout): t.Fatalf("contract was not resolved") } } @@ -981,7 +1029,7 @@ func TestChannelArbitratorLocalForceCloseRemoteConfirmed(t *testing.T) { if state != StateBroadcastCommit { t.Fatalf("state during PublishTx was %v", state) } - case <-time.After(15 * time.Second): + case <-time.After(stateTimeout): t.Fatalf("no state update received") } @@ -992,7 +1040,7 @@ func TestChannelArbitratorLocalForceCloseRemoteConfirmed(t *testing.T) { // Wait for a response to the force close. select { case <-respChan: - case <-time.After(5 * time.Second): + case <-time.After(defaultTimeout): t.Fatalf("no response received") } @@ -1001,7 +1049,7 @@ func TestChannelArbitratorLocalForceCloseRemoteConfirmed(t *testing.T) { if err != nil { t.Fatalf("error force closing channel: %v", err) } - case <-time.After(5 * time.Second): + case <-time.After(defaultTimeout): t.Fatalf("no response received") } @@ -1027,7 +1075,7 @@ func TestChannelArbitratorLocalForceCloseRemoteConfirmed(t *testing.T) { select { case <-chanArbCtx.resolvedChan: // Expected. - case <-time.After(15 * time.Second): + case <-time.After(stateTimeout): t.Fatalf("contract was not resolved") } } @@ -1089,7 +1137,7 @@ func TestChannelArbitratorLocalForceDoubleSpend(t *testing.T) { if state != StateBroadcastCommit { t.Fatalf("state during PublishTx was %v", state) } - case <-time.After(15 * time.Second): + case <-time.After(stateTimeout): t.Fatalf("no state update received") } @@ -1100,7 +1148,7 @@ func TestChannelArbitratorLocalForceDoubleSpend(t *testing.T) { // Wait for a response to the force close. select { case <-respChan: - case <-time.After(5 * time.Second): + case <-time.After(defaultTimeout): t.Fatalf("no response received") } @@ -1109,7 +1157,7 @@ func TestChannelArbitratorLocalForceDoubleSpend(t *testing.T) { if err != nil { t.Fatalf("error force closing channel: %v", err) } - case <-time.After(5 * time.Second): + case <-time.After(defaultTimeout): t.Fatalf("no response received") } @@ -1135,7 +1183,7 @@ func TestChannelArbitratorLocalForceDoubleSpend(t *testing.T) { select { case <-chanArbCtx.resolvedChan: // Expected. - case <-time.After(15 * time.Second): + case <-time.After(stateTimeout): t.Fatalf("contract was not resolved") } } @@ -1197,7 +1245,9 @@ func TestChannelArbitratorPersistence(t *testing.T) { // Now we make the log succeed writing the resolutions, but fail when // attempting to close the channel. log.failLog = false - chanArb.cfg.MarkChannelClosed = func(*channeldb.ChannelCloseSummary) error { + chanArb.cfg.MarkChannelClosed = func(*channeldb.ChannelCloseSummary, + ...channeldb.ChannelStatus) error { + return fmt.Errorf("intentional close error") } @@ -1256,7 +1306,7 @@ func TestChannelArbitratorPersistence(t *testing.T) { select { case <-chanArbCtx.resolvedChan: // Expected. - case <-time.After(5 * time.Second): + case <-time.After(defaultTimeout): t.Fatalf("contract was not resolved") } } @@ -1320,7 +1370,7 @@ func TestChannelArbitratorForceCloseBreachedChannel(t *testing.T) { if state != StateBroadcastCommit { t.Fatalf("state during PublishTx was %v", state) } - case <-time.After(15 * time.Second): + case <-time.After(stateTimeout): t.Fatalf("no state update received") } @@ -1331,7 +1381,7 @@ func TestChannelArbitratorForceCloseBreachedChannel(t *testing.T) { t.Fatalf("unexpected error force closing channel: %v", err) } - case <-time.After(5 * time.Second): + case <-time.After(defaultTimeout): t.Fatalf("no response received") } @@ -1355,7 +1405,7 @@ func TestChannelArbitratorForceCloseBreachedChannel(t *testing.T) { select { case <-chanArbCtx.resolvedChan: // Expected. - case <-time.After(5 * time.Second): + case <-time.After(defaultTimeout): t.Fatalf("contract was not resolved") } } @@ -1449,7 +1499,8 @@ func TestChannelArbitratorCommitFailure(t *testing.T) { closed := make(chan struct{}) chanArb.cfg.MarkChannelClosed = func( - *channeldb.ChannelCloseSummary) error { + *channeldb.ChannelCloseSummary, + ...channeldb.ChannelStatus) error { close(closed) return nil } @@ -1459,7 +1510,7 @@ func TestChannelArbitratorCommitFailure(t *testing.T) { select { case <-closed: - case <-time.After(5 * time.Second): + case <-time.After(defaultTimeout): t.Fatalf("channel was not marked closed") } @@ -1493,7 +1544,7 @@ func TestChannelArbitratorCommitFailure(t *testing.T) { select { case <-chanArbCtx.resolvedChan: // Expected. - case <-time.After(5 * time.Second): + case <-time.After(defaultTimeout): t.Fatalf("contract was not resolved") } } @@ -1797,7 +1848,7 @@ func TestChannelArbitratorDanglingCommitForceClose(t *testing.T) { t.Fatalf("wrong htlc index: expected %v, got %v", htlcIndex, msgs[0].HtlcIndex) } - case <-time.After(5 * time.Second): + case <-time.After(defaultTimeout): t.Fatalf("resolution msgs not sent") } @@ -1810,3 +1861,392 @@ func TestChannelArbitratorDanglingCommitForceClose(t *testing.T) { }) } } + +// TestChannelArbitratorPendingExpiredHTLC tests that if we have pending htlc +// that is expired we will only go to chain if we are running at least the +// time defined in PaymentsExpirationGracePeriod. +// During this time the remote party is expected to send his updates and cancel +// The htlc. +func TestChannelArbitratorPendingExpiredHTLC(t *testing.T) { + t.Parallel() + + // We'll create the arbitrator and its backing log in a default state. + log := &mockArbitratorLog{ + state: StateDefault, + newStates: make(chan ArbitratorState, 5), + resolvers: make(map[ContractResolver]struct{}), + } + chanArbCtx, err := createTestChannelArbitrator(t, log) + if err != nil { + t.Fatalf("unable to create ChannelArbitrator: %v", err) + } + chanArb := chanArbCtx.chanArb + + // We'll inject a test clock implementation so we can control the uptime. + startTime := time.Date(2020, time.February, 3, 13, 0, 0, 0, time.UTC) + testClock := clock.NewTestClock(startTime) + chanArb.cfg.Clock = testClock + + // We also configure the grace period and the IsForwardedHTLC to identify + // the htlc as our initiated payment. + chanArb.cfg.PaymentsExpirationGracePeriod = time.Second * 15 + chanArb.cfg.IsForwardedHTLC = func(chanID lnwire.ShortChannelID, + htlcIndex uint64) bool { + + return false + } + + if err := chanArb.Start(); err != nil { + t.Fatalf("unable to start ChannelArbitrator: %v", err) + } + defer func() { + if err := chanArb.Stop(); err != nil { + t.Fatalf("unable to stop chan arb: %v", err) + } + }() + + // Now that our channel arb has started, we'll set up + // its contract signals channel so we can send it + // various HTLC updates for this test. + htlcUpdates := make(chan *ContractUpdate) + signals := &ContractSignals{ + HtlcUpdates: htlcUpdates, + ShortChanID: lnwire.ShortChannelID{}, + } + chanArb.UpdateContractSignals(signals) + + // Next, we'll send it a new HTLC that is set to expire + // in 10 blocks. + htlcIndex := uint64(99) + htlcExpiry := uint32(10) + pendingHTLC := channeldb.HTLC{ + Incoming: false, + Amt: 10000, + HtlcIndex: htlcIndex, + RefundTimeout: htlcExpiry, + } + htlcUpdates <- &ContractUpdate{ + HtlcKey: RemoteHtlcSet, + Htlcs: []channeldb.HTLC{pendingHTLC}, + } + + // We will advance the uptime to 10 seconds which should be still within + // the grace period and should not trigger going to chain. + testClock.SetTime(startTime.Add(time.Second * 10)) + chanArbCtx.blockEpochs <- &chainntnfs.BlockEpoch{Height: 5} + chanArbCtx.AssertState(StateDefault) + + // We will advance the uptime to 16 seconds which should trigger going + // to chain. + testClock.SetTime(startTime.Add(time.Second * 16)) + chanArbCtx.blockEpochs <- &chainntnfs.BlockEpoch{Height: 6} + chanArbCtx.AssertStateTransitions( + StateBroadcastCommit, + StateCommitmentBroadcasted, + ) +} + +// TestRemoteCloseInitiator tests the setting of close initiator statuses +// for remote force closes and breaches. +func TestRemoteCloseInitiator(t *testing.T) { + // getCloseSummary returns a unilateral close summary for the channel + // provided. + getCloseSummary := func(channel *channeldb.OpenChannel) *RemoteUnilateralCloseInfo { + return &RemoteUnilateralCloseInfo{ + UnilateralCloseSummary: &lnwallet.UnilateralCloseSummary{ + SpendDetail: &chainntnfs.SpendDetail{ + SpenderTxHash: &chainhash.Hash{}, + SpendingTx: &wire.MsgTx{ + TxIn: []*wire.TxIn{}, + TxOut: []*wire.TxOut{}, + }, + }, + ChannelCloseSummary: channeldb.ChannelCloseSummary{ + ChanPoint: channel.FundingOutpoint, + RemotePub: channel.IdentityPub, + SettledBalance: btcutil.Amount(500), + TimeLockedBalance: btcutil.Amount(10000), + IsPending: false, + }, + HtlcResolutions: &lnwallet.HtlcResolutions{}, + }, + } + } + + tests := []struct { + name string + + // notifyClose sends the appropriate chain event to indicate + // that the channel has closed. The event subscription channel + // is expected to be buffered, as is the default for test + // channel arbitrators. + notifyClose func(sub *ChainEventSubscription, + channel *channeldb.OpenChannel) + + // expectedStates is the set of states we expect the arbitrator + // to progress through. + expectedStates []ArbitratorState + }{ + { + name: "force close", + notifyClose: func(sub *ChainEventSubscription, + channel *channeldb.OpenChannel) { + + s := getCloseSummary(channel) + sub.RemoteUnilateralClosure <- s + }, + expectedStates: []ArbitratorState{ + StateContractClosed, StateFullyResolved, + }, + }, + } + + for _, test := range tests { + test := test + + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + // First, create alice's channel. + alice, _, cleanUp, err := lnwallet.CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) + if err != nil { + t.Fatalf("unable to create test channels: %v", + err) + } + defer cleanUp() + + // Create a mock log which will not block the test's + // expected number of transitions transitions, and has + // no commit resolutions so that the channel will + // resolve immediately. + log := &mockArbitratorLog{ + state: StateDefault, + newStates: make(chan ArbitratorState, + len(test.expectedStates)), + resolutions: &ContractResolutions{ + CommitHash: chainhash.Hash{}, + CommitResolution: nil, + }, + } + + // Mock marking the channel as closed, we only care + // about setting of channel status. + mockMarkClosed := func(_ *channeldb.ChannelCloseSummary, + statuses ...channeldb.ChannelStatus) error { + for _, status := range statuses { + err := alice.State().ApplyChanStatus(status) + if err != nil { + return err + } + } + return nil + } + + chanArbCtx, err := createTestChannelArbitrator( + t, log, withMarkClosed(mockMarkClosed), + ) + if err != nil { + t.Fatalf("unable to create "+ + "ChannelArbitrator: %v", err) + } + chanArb := chanArbCtx.chanArb + + if err := chanArb.Start(); err != nil { + t.Fatalf("unable to start "+ + "ChannelArbitrator: %v", err) + } + defer func() { + if err := chanArb.Stop(); err != nil { + t.Fatal(err) + } + }() + + // It should start out in the default state. + chanArbCtx.AssertState(StateDefault) + + // Notify the close event. + test.notifyClose(chanArb.cfg.ChainEvents, alice.State()) + + // Check that the channel transitions as expected. + chanArbCtx.AssertStateTransitions( + test.expectedStates..., + ) + + // It should also mark the channel as resolved. + select { + case <-chanArbCtx.resolvedChan: + // Expected. + case <-time.After(defaultTimeout): + t.Fatalf("contract was not resolved") + } + + // Check that alice has the status we expect. + if !alice.State().HasChanStatus( + channeldb.ChanStatusRemoteCloseInitiator, + ) { + t.Fatalf("expected remote close initiator, "+ + "got: %v", alice.State().ChanStatus()) + } + }) + } +} + +// TestChannelArbitratorAnchors asserts that the commitment tx anchor is swept. +func TestChannelArbitratorAnchors(t *testing.T) { + log := &mockArbitratorLog{ + state: StateDefault, + newStates: make(chan ArbitratorState, 5), + } + + chanArbCtx, err := createTestChannelArbitrator(t, log) + if err != nil { + t.Fatalf("unable to create ChannelArbitrator: %v", err) + } + chanArb := chanArbCtx.chanArb + chanArb.cfg.PreimageDB = newMockWitnessBeacon() + chanArb.cfg.Registry = &mockRegistry{} + + // Setup two pre-confirmation anchor resolutions on the mock channel. + chanArb.cfg.Channel.(*mockChannel).anchorResolutions = + []*lnwallet.AnchorResolution{ + {}, {}, + } + + if err := chanArb.Start(); err != nil { + t.Fatalf("unable to start ChannelArbitrator: %v", err) + } + defer func() { + if err := chanArb.Stop(); err != nil { + t.Fatal(err) + } + }() + + // Create htlcUpdates channel. + htlcUpdates := make(chan *ContractUpdate) + + signals := &ContractSignals{ + HtlcUpdates: htlcUpdates, + ShortChanID: lnwire.ShortChannelID{}, + } + chanArb.UpdateContractSignals(signals) + + errChan := make(chan error, 1) + respChan := make(chan *wire.MsgTx, 1) + + // With the channel found, and the request crafted, we'll send over a + // force close request to the arbitrator that watches this channel. + chanArb.forceCloseReqs <- &forceCloseReq{ + errResp: errChan, + closeTx: respChan, + } + + // The force close request should trigger broadcast of the commitment + // transaction. + chanArbCtx.AssertStateTransitions( + StateBroadcastCommit, + StateCommitmentBroadcasted, + ) + + // With the commitment tx still unconfirmed, we expect sweep attempts + // for all three versions of the commitment transaction. + <-chanArbCtx.sweeper.sweptInputs + <-chanArbCtx.sweeper.sweptInputs + + select { + case <-respChan: + case <-time.After(5 * time.Second): + t.Fatalf("no response received") + } + + select { + case err := <-errChan: + if err != nil { + t.Fatalf("error force closing channel: %v", err) + } + case <-time.After(5 * time.Second): + t.Fatalf("no response received") + } + + // Now notify about the local force close getting confirmed. + closeTx := &wire.MsgTx{ + TxIn: []*wire.TxIn{ + { + PreviousOutPoint: wire.OutPoint{}, + Witness: [][]byte{ + {0x1}, + {0x2}, + }, + }, + }, + } + + chanArb.cfg.ChainEvents.LocalUnilateralClosure <- &LocalUnilateralCloseInfo{ + SpendDetail: &chainntnfs.SpendDetail{}, + LocalForceCloseSummary: &lnwallet.LocalForceCloseSummary{ + CloseTx: closeTx, + HtlcResolutions: &lnwallet.HtlcResolutions{}, + AnchorResolution: &lnwallet.AnchorResolution{ + AnchorSignDescriptor: input.SignDescriptor{ + Output: &wire.TxOut{ + Value: 1, + }, + }, + }, + }, + ChannelCloseSummary: &channeldb.ChannelCloseSummary{}, + CommitSet: CommitSet{ + ConfCommitKey: &LocalHtlcSet, + HtlcSets: map[HtlcSetKey][]channeldb.HTLC{}, + }, + } + + chanArbCtx.AssertStateTransitions( + StateContractClosed, + StateWaitingFullResolution, + ) + + // We expect to only have the anchor resolver active. + if len(chanArb.activeResolvers) != 1 { + t.Fatalf("expected single resolver, instead got: %v", + len(chanArb.activeResolvers)) + } + + resolver := chanArb.activeResolvers[0] + _, ok := resolver.(*anchorResolver) + if !ok { + t.Fatalf("expected anchor resolver, got %T", resolver) + } + + // The anchor resolver is expected to offer the anchor input to the + // sweeper. + <-chanArbCtx.sweeper.updatedInputs + + // The mock sweeper immediately signals success for that input. This + // should transition the channel to the resolved state. + chanArbCtx.AssertStateTransitions(StateFullyResolved) + select { + case <-chanArbCtx.resolvedChan: + case <-time.After(5 * time.Second): + t.Fatalf("contract was not resolved") + } +} + +type mockChannel struct { + anchorResolutions []*lnwallet.AnchorResolution +} + +func (m *mockChannel) NewAnchorResolutions() ([]*lnwallet.AnchorResolution, + error) { + + return m.anchorResolutions, nil +} + +func (m *mockChannel) ForceCloseChan() (*lnwallet.LocalForceCloseSummary, error) { + summary := &lnwallet.LocalForceCloseSummary{ + CloseTx: &wire.MsgTx{}, + HtlcResolutions: &lnwallet.HtlcResolutions{}, + } + return summary, nil +} diff --git a/contractcourt/commit_sweep_resolver.go b/contractcourt/commit_sweep_resolver.go index 4e57127288..fe09dc09ff 100644 --- a/contractcourt/commit_sweep_resolver.go +++ b/contractcourt/commit_sweep_resolver.go @@ -2,9 +2,13 @@ package contractcourt import ( "encoding/binary" + "fmt" "io" + "sync" + "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/lnwallet" "github.com/lightningnetwork/lnd/sweep" @@ -36,7 +40,32 @@ type commitSweepResolver struct { // chanPoint is the channel point of the original contract. chanPoint wire.OutPoint - ResolverKit + // currentReport stores the current state of the resolver for reporting + // over the rpc interface. + currentReport ContractReport + + // reportLock prevents concurrent access to the resolver report. + reportLock sync.Mutex + + contractResolverKit +} + +// newCommitSweepResolver instantiates a new direct commit output resolver. +func newCommitSweepResolver(res lnwallet.CommitOutputResolution, + broadcastHeight uint32, + chanPoint wire.OutPoint, resCfg ResolverConfig) *commitSweepResolver { + + r := &commitSweepResolver{ + contractResolverKit: *newContractResolverKit(resCfg), + commitResolution: res, + broadcastHeight: broadcastHeight, + chanPoint: chanPoint, + } + + r.initLogger(r) + r.initReport() + + return r } // ResolverKey returns an identifier which should be globally unique for this @@ -46,6 +75,63 @@ func (c *commitSweepResolver) ResolverKey() []byte { return key[:] } +// waitForHeight registers for block notifications and waits for the provided +// block height to be reached. +func (c *commitSweepResolver) waitForHeight(waitHeight uint32) error { + // Register for block epochs. After registration, the current height + // will be sent on the channel immediately. + blockEpochs, err := c.Notifier.RegisterBlockEpochNtfn(nil) + if err != nil { + return err + } + defer blockEpochs.Cancel() + + for { + select { + case newBlock, ok := <-blockEpochs.Epochs: + if !ok { + return errResolverShuttingDown + } + height := newBlock.Height + if height >= int32(waitHeight) { + return nil + } + + case <-c.quit: + return errResolverShuttingDown + } + } +} + +// getCommitTxConfHeight waits for confirmation of the commitment tx and returns +// the confirmation height. +func (c *commitSweepResolver) getCommitTxConfHeight() (uint32, error) { + txID := c.commitResolution.SelfOutPoint.Hash + signDesc := c.commitResolution.SelfOutputSignDesc + pkScript := signDesc.Output.PkScript + const confDepth = 1 + confChan, err := c.Notifier.RegisterConfirmationsNtfn( + &txID, pkScript, confDepth, c.broadcastHeight, + ) + if err != nil { + return 0, err + } + defer confChan.Cancel() + + select { + case txConfirmation, ok := <-confChan.Confirmed: + if !ok { + return 0, fmt.Errorf("cannot get confirmation "+ + "for commit tx %v", txID) + } + + return txConfirmation.BlockHeight, nil + + case <-c.quit: + return 0, errResolverShuttingDown + } +} + // Resolve instructs the contract resolver to resolve the output on-chain. Once // the output has been *fully* resolved, the function should return immediately // with a nil ContractResolver value for the first return value. In the case @@ -59,159 +145,121 @@ func (c *commitSweepResolver) Resolve() (ContractResolver, error) { return nil, nil } - // First, we'll register for a notification once the commitment output - // itself has been confirmed. - // - // TODO(roasbeef): instead sweep asap if remote commit? yeh - commitTXID := c.commitResolution.SelfOutPoint.Hash - sweepScript := c.commitResolution.SelfOutputSignDesc.Output.PkScript - confNtfn, err := c.Notifier.RegisterConfirmationsNtfn( - &commitTXID, sweepScript, 1, c.broadcastHeight, - ) + confHeight, err := c.getCommitTxConfHeight() if err != nil { return nil, err } - log.Debugf("%T(%v): waiting for commit tx to confirm", c, c.chanPoint) + unlockHeight := confHeight + c.commitResolution.MaturityDelay - select { - case _, ok := <-confNtfn.Confirmed: - if !ok { - return nil, errResolverShuttingDown - } + c.log.Debugf("commit conf_height=%v, unlock_height=%v", + confHeight, unlockHeight) - case <-c.Quit: - return nil, errResolverShuttingDown - } + // Update report now that we learned the confirmation height. + c.reportLock.Lock() + c.currentReport.MaturityHeight = unlockHeight + c.reportLock.Unlock() - // We're dealing with our commitment transaction if the delay on the - // resolution isn't zero. - isLocalCommitTx := c.commitResolution.MaturityDelay != 0 - - if !isLocalCommitTx { - // There're two types of commitments, those that have tweaks - // for the remote key (us in this case), and those that don't. - // We'll rely on the presence of the commitment tweak to to - // discern which type of commitment this is. - var witnessType input.WitnessType - if c.commitResolution.SelfOutputSignDesc.SingleTweak == nil { - witnessType = input.CommitSpendNoDelayTweakless - } else { - witnessType = input.CommitmentNoDelay - } + // If there is a csv delay, we'll wait for that. + if c.commitResolution.MaturityDelay > 0 { + c.log.Debugf("waiting for csv lock to expire at height %v", + unlockHeight) - // We'll craft an input with all the information required for - // the sweeper to create a fully valid sweeping transaction to - // recover these coins. - inp := input.MakeBaseInput( - &c.commitResolution.SelfOutPoint, - witnessType, - &c.commitResolution.SelfOutputSignDesc, - c.broadcastHeight, - ) - - // With our input constructed, we'll now offer it to the - // sweeper. - log.Infof("%T(%v): sweeping commit output", c, c.chanPoint) - - feePref := sweep.FeePreference{ConfTarget: commitOutputConfTarget} - resultChan, err := c.Sweeper.SweepInput(&inp, feePref) + // We only need to wait for the block before the block that + // unlocks the spend path. + err := c.waitForHeight(unlockHeight - 1) if err != nil { - log.Errorf("%T(%v): unable to sweep input: %v", - c, c.chanPoint, err) - return nil, err } + } - // Sweeper is going to join this input with other inputs if - // possible and publish the sweep tx. When the sweep tx - // confirms, it signals us through the result channel with the - // outcome. Wait for this to happen. - select { - case sweepResult := <-resultChan: - if sweepResult.Err != nil { - log.Errorf("%T(%v): unable to sweep input: %v", - c, c.chanPoint, sweepResult.Err) - - return nil, sweepResult.Err - } - - log.Infof("ChannelPoint(%v) commit tx is fully resolved by "+ - "sweep tx: %v", c.chanPoint, sweepResult.Tx.TxHash()) - case <-c.Quit: - return nil, errResolverShuttingDown - } - - c.resolved = true - return nil, c.Checkpoint(c) + // The output is on our local commitment if the script starts with + // OP_IF for the revocation clause. On the remote commitment it will + // either be a regular P2WKH or a simple sig spend with a CSV delay. + isLocalCommitTx := c.commitResolution.SelfOutputSignDesc.WitnessScript[0] == txscript.OP_IF + isDelayedOutput := c.commitResolution.MaturityDelay != 0 + + c.log.Debugf("isDelayedOutput=%v, isLocalCommitTx=%v", isDelayedOutput, + isLocalCommitTx) + + // There're three types of commitments, those that have tweaks + // for the remote key (us in this case), those that don't, and a third + // where there is no tweak and the output is delayed. On the local + // commitment our output will always be delayed. We'll rely on the + // presence of the commitment tweak to to discern which type of + // commitment this is. + var witnessType input.WitnessType + switch { + + // Delayed output to us on our local commitment. + case isLocalCommitTx: + witnessType = input.CommitmentTimeLock + + // A confirmed output to us on the remote commitment. + case isDelayedOutput: + witnessType = input.CommitmentToRemoteConfirmed + + // A non-delayed output on the remote commitment where the key is + // tweakless. + case c.commitResolution.SelfOutputSignDesc.SingleTweak == nil: + witnessType = input.CommitSpendNoDelayTweakless + + // A non-delayed output on the remote commitment where the key is + // tweaked. + default: + witnessType = input.CommitmentNoDelay } - // Otherwise we are dealing with a local commitment transaction and the - // output we need to sweep has been sent to the nursery for incubation. - // In this case, we'll wait until the commitment output has been spent. - spendNtfn, err := c.Notifier.RegisterSpendNtfn( + c.log.Infof("Sweeping with witness type: %v", witnessType) + + // We'll craft an input with all the information required for + // the sweeper to create a fully valid sweeping transaction to + // recover these coins. + inp := input.NewCsvInput( &c.commitResolution.SelfOutPoint, - c.commitResolution.SelfOutputSignDesc.Output.PkScript, + witnessType, + &c.commitResolution.SelfOutputSignDesc, c.broadcastHeight, + c.commitResolution.MaturityDelay, ) - if err != nil { - return nil, err - } - - log.Infof("%T(%v): waiting for commit output to be swept", c, - c.chanPoint) - - var sweepTx *wire.MsgTx - select { - case commitSpend, ok := <-spendNtfn.Spend: - if !ok { - return nil, errResolverShuttingDown - } - - // Once we detect the commitment output has been spent, - // we'll extract the spending transaction itself, as we - // now consider this to be our sweep transaction. - sweepTx = commitSpend.SpendingTx - - log.Infof("%T(%v): commit output swept by txid=%v", - c, c.chanPoint, sweepTx.TxHash()) - - if err := c.Checkpoint(c); err != nil { - log.Errorf("unable to Checkpoint: %v", err) - return nil, err - } - case <-c.Quit: - return nil, errResolverShuttingDown - } - log.Infof("%T(%v): waiting for commit sweep txid=%v conf", c, c.chanPoint, - sweepTx.TxHash()) + // With our input constructed, we'll now offer it to the + // sweeper. + c.log.Infof("sweeping commit output") - // Now we'll wait until the sweeping transaction has been fully - // confirmed. Once it's confirmed, we can mark this contract resolved. - sweepTXID := sweepTx.TxHash() - sweepingScript := sweepTx.TxOut[0].PkScript - confNtfn, err = c.Notifier.RegisterConfirmationsNtfn( - &sweepTXID, sweepingScript, 1, c.broadcastHeight, - ) + feePref := sweep.FeePreference{ConfTarget: commitOutputConfTarget} + resultChan, err := c.Sweeper.SweepInput(inp, sweep.Params{Fee: feePref}) if err != nil { + c.log.Errorf("unable to sweep input: %v", err) + return nil, err } + + // Sweeper is going to join this input with other inputs if + // possible and publish the sweep tx. When the sweep tx + // confirms, it signals us through the result channel with the + // outcome. Wait for this to happen. select { - case confInfo, ok := <-confNtfn.Confirmed: - if !ok { - return nil, errResolverShuttingDown - } + case sweepResult := <-resultChan: + if sweepResult.Err != nil { + c.log.Errorf("unable to sweep input: %v", + sweepResult.Err) - log.Infof("ChannelPoint(%v) commit tx is fully resolved, at height: %v", - c.chanPoint, confInfo.BlockHeight) + return nil, sweepResult.Err + } - case <-c.Quit: + c.log.Infof("commit tx fully resolved by sweep tx: %v", + sweepResult.Tx.TxHash()) + case <-c.quit: return nil, errResolverShuttingDown } - // Once the transaction has received a sufficient number of - // confirmations, we'll mark ourselves as fully resolved and exit. + // Funds have been swept and balance is no longer in limbo. + c.reportLock.Lock() + c.currentReport.RecoveredBalance = c.currentReport.LimboBalance + c.currentReport.LimboBalance = 0 + c.reportLock.Unlock() + c.resolved = true return nil, c.Checkpoint(c) } @@ -221,7 +269,7 @@ func (c *commitSweepResolver) Resolve() (ContractResolver, error) { // // NOTE: Part of the ContractResolver interface. func (c *commitSweepResolver) Stop() { - close(c.Quit) + close(c.quit) } // IsResolved returns true if the stored state in the resolve is fully @@ -262,46 +310,76 @@ func (c *commitSweepResolver) Encode(w io.Writer) error { return nil } -// Decode attempts to decode an encoded ContractResolver from the passed Reader -// instance, returning an active ContractResolver instance. -// -// NOTE: Part of the ContractResolver interface. -func (c *commitSweepResolver) Decode(r io.Reader) error { +// newCommitSweepResolverFromReader attempts to decode an encoded +// ContractResolver from the passed Reader instance, returning an active +// ContractResolver instance. +func newCommitSweepResolverFromReader(r io.Reader, resCfg ResolverConfig) ( + *commitSweepResolver, error) { + + c := &commitSweepResolver{ + contractResolverKit: *newContractResolverKit(resCfg), + } + if err := decodeCommitResolution(r, &c.commitResolution); err != nil { - return err + return nil, err } if err := binary.Read(r, endian, &c.resolved); err != nil { - return err + return nil, err } if err := binary.Read(r, endian, &c.broadcastHeight); err != nil { - return err + return nil, err } _, err := io.ReadFull(r, c.chanPoint.Hash[:]) if err != nil { - return err + return nil, err } err = binary.Read(r, endian, &c.chanPoint.Index) if err != nil { - return err + return nil, err } // Previously a sweep tx was deserialized at this point. Refactoring // removed this, but keep in mind that this data may still be present in // the database. - return nil + c.initLogger(c) + c.initReport() + + return c, nil } -// AttachResolverKit should be called once a resolved is successfully decoded -// from its stored format. This struct delivers a generic tool kit that -// resolvers need to complete their duty. -// -// NOTE: Part of the ContractResolver interface. -func (c *commitSweepResolver) AttachResolverKit(r ResolverKit) { - c.ResolverKit = r +// report returns a report on the resolution state of the contract. +func (c *commitSweepResolver) report() *ContractReport { + c.reportLock.Lock() + defer c.reportLock.Unlock() + + copy := c.currentReport + return © +} + +// initReport initializes the pending channels report for this resolver. +func (c *commitSweepResolver) initReport() { + amt := btcutil.Amount( + c.commitResolution.SelfOutputSignDesc.Output.Value, + ) + + // Set the initial report. All fields are filled in, except for the + // maturity height which remains 0 until Resolve() is executed. + // + // TODO(joostjager): Resolvers only activate after the commit tx + // confirms. With more refactoring in channel arbitrator, it would be + // possible to make the confirmation height part of ResolverConfig and + // populate MaturityHeight here. + c.currentReport = ContractReport{ + Outpoint: c.commitResolution.SelfOutPoint, + Type: ReportOutputUnencumbered, + Amount: amt, + LimboBalance: amt, + RecoveredBalance: 0, + } } // A compile time assertion to ensure commitSweepResolver meets the // ContractResolver interface. -var _ ContractResolver = (*commitSweepResolver)(nil) +var _ reportingContractResolver = (*commitSweepResolver)(nil) diff --git a/contractcourt/commit_sweep_resolver_test.go b/contractcourt/commit_sweep_resolver_test.go new file mode 100644 index 0000000000..279cc6e846 --- /dev/null +++ b/contractcourt/commit_sweep_resolver_test.go @@ -0,0 +1,246 @@ +package contractcourt + +import ( + "reflect" + "testing" + "time" + + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" + "github.com/lightningnetwork/lnd/chainntnfs" + "github.com/lightningnetwork/lnd/input" + "github.com/lightningnetwork/lnd/lnwallet" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" + "github.com/lightningnetwork/lnd/sweep" +) + +type commitSweepResolverTestContext struct { + resolver *commitSweepResolver + notifier *mockNotifier + sweeper *mockSweeper + resolverResultChan chan resolveResult + t *testing.T +} + +func newCommitSweepResolverTestContext(t *testing.T, + resolution *lnwallet.CommitOutputResolution) *commitSweepResolverTestContext { + + notifier := &mockNotifier{ + epochChan: make(chan *chainntnfs.BlockEpoch), + spendChan: make(chan *chainntnfs.SpendDetail), + confChan: make(chan *chainntnfs.TxConfirmation), + } + + sweeper := newMockSweeper() + + checkPointChan := make(chan struct{}, 1) + + chainCfg := ChannelArbitratorConfig{ + ChainArbitratorConfig: ChainArbitratorConfig{ + Notifier: notifier, + Sweeper: sweeper, + }, + } + + cfg := ResolverConfig{ + ChannelArbitratorConfig: chainCfg, + Checkpoint: func(_ ContractResolver) error { + checkPointChan <- struct{}{} + return nil + }, + } + + resolver := newCommitSweepResolver( + *resolution, 0, wire.OutPoint{}, cfg, + ) + + return &commitSweepResolverTestContext{ + resolver: resolver, + notifier: notifier, + sweeper: sweeper, + t: t, + } +} + +func (i *commitSweepResolverTestContext) resolve() { + // Start resolver. + i.resolverResultChan = make(chan resolveResult, 1) + go func() { + nextResolver, err := i.resolver.Resolve() + i.resolverResultChan <- resolveResult{ + nextResolver: nextResolver, + err: err, + } + }() +} + +func (i *commitSweepResolverTestContext) notifyEpoch(height int32) { + i.notifier.epochChan <- &chainntnfs.BlockEpoch{ + Height: height, + } +} + +func (i *commitSweepResolverTestContext) waitForResult() { + i.t.Helper() + + result := <-i.resolverResultChan + if result.err != nil { + i.t.Fatal(result.err) + } + + if result.nextResolver != nil { + i.t.Fatal("expected no next resolver") + } +} + +type mockSweeper struct { + sweptInputs chan input.Input + updatedInputs chan wire.OutPoint +} + +func newMockSweeper() *mockSweeper { + return &mockSweeper{ + sweptInputs: make(chan input.Input), + updatedInputs: make(chan wire.OutPoint), + } +} + +func (s *mockSweeper) SweepInput(input input.Input, params sweep.Params) ( + chan sweep.Result, error) { + + s.sweptInputs <- input + + result := make(chan sweep.Result, 1) + result <- sweep.Result{ + Tx: &wire.MsgTx{}, + } + return result, nil +} + +func (s *mockSweeper) CreateSweepTx(inputs []input.Input, feePref sweep.FeePreference, + currentBlockHeight uint32) (*wire.MsgTx, error) { + + return nil, nil +} + +func (s *mockSweeper) RelayFeePerKW() chainfee.SatPerKWeight { + return 253 +} + +func (s *mockSweeper) UpdateParams(input wire.OutPoint, + params sweep.ParamsUpdate) (chan sweep.Result, error) { + + s.updatedInputs <- input + + result := make(chan sweep.Result, 1) + result <- sweep.Result{ + Tx: &wire.MsgTx{}, + } + return result, nil +} + +var _ UtxoSweeper = &mockSweeper{} + +// TestCommitSweepResolverNoDelay tests resolution of a direct commitment output +// unencumbered by a time lock. +func TestCommitSweepResolverNoDelay(t *testing.T) { + t.Parallel() + defer timeout(t)() + + res := lnwallet.CommitOutputResolution{ + SelfOutputSignDesc: input.SignDescriptor{ + Output: &wire.TxOut{ + Value: 100, + }, + WitnessScript: []byte{0}, + }, + } + + ctx := newCommitSweepResolverTestContext(t, &res) + ctx.resolve() + + ctx.notifier.confChan <- &chainntnfs.TxConfirmation{} + + // No csv delay, so the input should be swept immediately. + <-ctx.sweeper.sweptInputs + + ctx.waitForResult() +} + +// TestCommitSweepResolverDelay tests resolution of a direct commitment output +// that is encumbered by a time lock. +func TestCommitSweepResolverDelay(t *testing.T) { + t.Parallel() + defer timeout(t)() + + amt := int64(100) + outpoint := wire.OutPoint{ + Index: 5, + } + res := lnwallet.CommitOutputResolution{ + SelfOutputSignDesc: input.SignDescriptor{ + Output: &wire.TxOut{ + Value: amt, + }, + WitnessScript: []byte{0}, + }, + MaturityDelay: 3, + SelfOutPoint: outpoint, + } + + ctx := newCommitSweepResolverTestContext(t, &res) + + report := ctx.resolver.report() + if !reflect.DeepEqual(report, &ContractReport{ + Outpoint: outpoint, + Type: ReportOutputUnencumbered, + Amount: btcutil.Amount(amt), + LimboBalance: btcutil.Amount(amt), + }) { + t.Fatal("unexpected resolver report") + } + + ctx.resolve() + + ctx.notifier.confChan <- &chainntnfs.TxConfirmation{ + BlockHeight: testInitialBlockHeight - 1, + } + + // Allow resolver to process confirmation. + time.Sleep(100 * time.Millisecond) + + // Expect report to be updated. + report = ctx.resolver.report() + if report.MaturityHeight != testInitialBlockHeight+2 { + t.Fatal("report maturity height incorrect") + } + + // Notify initial block height. The csv lock is still in effect, so we + // don't expect any sweep to happen yet. + ctx.notifyEpoch(testInitialBlockHeight) + + select { + case <-ctx.sweeper.sweptInputs: + t.Fatal("no sweep expected") + case <-time.After(100 * time.Millisecond): + } + + // A new block arrives. The commit tx confirmed at height -1 and the csv + // is 3, so a spend will be valid in the first block after height +1. + ctx.notifyEpoch(testInitialBlockHeight + 1) + + <-ctx.sweeper.sweptInputs + + ctx.waitForResult() + + report = ctx.resolver.report() + if !reflect.DeepEqual(report, &ContractReport{ + Outpoint: outpoint, + Type: ReportOutputUnencumbered, + Amount: btcutil.Amount(amt), + RecoveredBalance: btcutil.Amount(amt), + MaturityHeight: testInitialBlockHeight + 2, + }) { + t.Fatal("unexpected resolver report") + } +} diff --git a/contractcourt/contract_resolvers.go b/contractcourt/contract_resolvers.go index b39a04d4a2..a5fe119ad7 100644 --- a/contractcourt/contract_resolvers.go +++ b/contractcourt/contract_resolvers.go @@ -3,7 +3,13 @@ package contractcourt import ( "encoding/binary" "errors" + "fmt" "io" + + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btclog" + "github.com/lightningnetwork/lnd/build" + "github.com/lightningnetwork/lnd/channeldb" ) var ( @@ -46,21 +52,23 @@ type ContractResolver interface { // passed Writer. Encode(w io.Writer) error - // Decode attempts to decode an encoded ContractResolver from the - // passed Reader instance, returning an active ContractResolver - // instance. - Decode(r io.Reader) error - - // AttachResolverKit should be called once a resolved is successfully - // decoded from its stored format. This struct delivers a generic tool - // kit that resolvers need to complete their duty. - AttachResolverKit(ResolverKit) - // Stop signals the resolver to cancel any current resolution // processes, and suspend. Stop() } +// htlcContractResolver is the required interface for htlc resolvers. +type htlcContractResolver interface { + ContractResolver + + // HtlcPoint returns the htlc's outpoint on the commitment tx. + HtlcPoint() wire.OutPoint + + // Supplement adds additional information to the resolver that is + // required before Resolve() is called. + Supplement(htlc channeldb.HTLC) +} + // reportingContractResolver is a ContractResolver that also exposes a report on // the resolution state of the contract. type reportingContractResolver interface { @@ -69,10 +77,9 @@ type reportingContractResolver interface { report() *ContractReport } -// ResolverKit is meant to be used as a mix-in struct to be embedded within a -// given ContractResolver implementation. It contains all the items that a -// resolver requires to carry out its duties. -type ResolverKit struct { +// ResolverConfig contains the externally supplied configuration items that are +// required by a ContractResolver implementation. +type ResolverConfig struct { // ChannelArbitratorConfig contains all the interfaces and closures // required for the resolver to interact with outside sub-systems. ChannelArbitratorConfig @@ -81,8 +88,31 @@ type ResolverKit struct { // should write the state of the resolver to persistent storage, and // return a non-nil error upon success. Checkpoint func(ContractResolver) error +} + +// contractResolverKit is meant to be used as a mix-in struct to be embedded within a +// given ContractResolver implementation. It contains all the common items that +// a resolver requires to carry out its duties. +type contractResolverKit struct { + ResolverConfig + + log btclog.Logger + + quit chan struct{} +} + +// newContractResolverKit instantiates the mix-in struct. +func newContractResolverKit(cfg ResolverConfig) *contractResolverKit { + return &contractResolverKit{ + ResolverConfig: cfg, + quit: make(chan struct{}), + } +} - Quit chan struct{} +// initLogger initializes the resolver-specific logger. +func (r *contractResolverKit) initLogger(resolver ContractResolver) { + logPrefix := fmt.Sprintf("%T(%v):", resolver, r.ChanPoint) + r.log = build.NewPrefixLog(logPrefix, log) } var ( diff --git a/contractcourt/htlc_incoming_contest_resolver.go b/contractcourt/htlc_incoming_contest_resolver.go index 2f3348e6e9..cc4c0f2b20 100644 --- a/contractcourt/htlc_incoming_contest_resolver.go +++ b/contractcourt/htlc_incoming_contest_resolver.go @@ -1,15 +1,18 @@ package contractcourt import ( + "bytes" "encoding/binary" "errors" + "fmt" "io" + "github.com/btcsuite/btcutil" "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/htlcswitch/hop" "github.com/lightningnetwork/lnd/invoices" - - "github.com/btcsuite/btcutil" "github.com/lightningnetwork/lnd/lntypes" + "github.com/lightningnetwork/lnd/lnwallet" ) // htlcIncomingContestResolver is a ContractResolver that's able to resolve an @@ -26,14 +29,26 @@ type htlcIncomingContestResolver struct { // successfully. htlcExpiry uint32 - // circuitKey describes the incoming htlc that is being resolved. - circuitKey channeldb.CircuitKey - // htlcSuccessResolver is the inner resolver that may be utilized if we // learn of the preimage. htlcSuccessResolver } +// newIncomingContestResolver instantiates a new incoming htlc contest resolver. +func newIncomingContestResolver( + res lnwallet.IncomingHtlcResolution, broadcastHeight uint32, + htlc channeldb.HTLC, resCfg ResolverConfig) *htlcIncomingContestResolver { + + success := newSuccessResolver( + res, broadcastHeight, htlc, resCfg, + ) + + return &htlcIncomingContestResolver{ + htlcExpiry: htlc.RefundTimeout, + htlcSuccessResolver: *success, + } +} + // Resolve attempts to resolve this contract. As we don't yet know of the // preimage for the contract, we'll wait for one of two things to happen: // @@ -53,6 +68,22 @@ func (h *htlcIncomingContestResolver) Resolve() (ContractResolver, error) { return nil, nil } + // First try to parse the payload. If that fails, we can stop resolution + // now. + payload, err := h.decodePayload() + if err != nil { + log.Debugf("ChannelArbitrator(%v): cannot decode payload of "+ + "htlc %v", h.ChanPoint, h.HtlcPoint()) + + // If we've locked in an htlc with an invalid payload on our + // commitment tx, we don't need to resolve it. The other party + // will time it out and get their funds back. This situation can + // present itself when we crash before processRemoteAdds in the + // link has ran. + h.resolved = true + return nil, nil + } + // Register for block epochs. After registration, the current height // will be sent on the channel immediately. blockEpochs, err := h.Notifier.RegisterBlockEpochNtfn(nil) @@ -68,7 +99,7 @@ func (h *htlcIncomingContestResolver) Resolve() (ContractResolver, error) { return nil, errResolverShuttingDown } currentHeight = newBlock.Height - case <-h.Quit: + case <-h.quit: return nil, errResolverShuttingDown } @@ -100,7 +131,7 @@ func (h *htlcIncomingContestResolver) Resolve() (ContractResolver, error) { applyPreimage := func(preimage lntypes.Preimage) error { // Sanity check to see if this preimage matches our htlc. At // this point it should never happen that it does not match. - if !preimage.Matches(h.payHash) { + if !preimage.Matches(h.htlc.RHash) { return errors.New("preimage does not match hash") } @@ -137,12 +168,28 @@ func (h *htlcIncomingContestResolver) Resolve() (ContractResolver, error) { preimageSubscription := h.PreimageDB.SubscribeUpdates() defer preimageSubscription.CancelSubscription() - // Define closure to process hodl events either direct or triggered by - // later notifcation. - processHodlEvent := func(e invoices.HodlEvent) (ContractResolver, - error) { + // Define closure to process htlc resolutions either direct or triggered by + // later notification. + processHtlcResolution := func(e invoices.HtlcResolution) ( + ContractResolver, error) { + + // Take action based on the type of resolution we have + // received. + switch resolution := e.(type) { + + // If the htlc resolution was a settle, apply the + // preimage and return a success resolver. + case *invoices.HtlcSettleResolution: + err := applyPreimage(resolution.Preimage) + if err != nil { + return nil, err + } + + return &h.htlcSuccessResolver, nil - if e.Preimage == nil { + // If the htlc was failed, mark the htlc as + // resolved. + case *invoices.HtlcFailResolution: log.Infof("%T(%v): Exit hop HTLC canceled "+ "(expiry=%v, height=%v), abandoning", h, h.htlcResolution.ClaimOutpoint, @@ -150,13 +197,13 @@ func (h *htlcIncomingContestResolver) Resolve() (ContractResolver, error) { h.resolved = true return nil, h.Checkpoint(h) - } - if err := applyPreimage(*e.Preimage); err != nil { - return nil, err + // Error if the resolution type is unknown, we are only + // expecting settles and fails. + default: + return nil, fmt.Errorf("unknown resolution"+ + " type: %v", e) } - - return &h.htlcSuccessResolver, nil } // Create a buffered hodl chan to prevent deadlock. @@ -166,26 +213,49 @@ func (h *htlcIncomingContestResolver) Resolve() (ContractResolver, error) { // on-chain. If this HTLC indeed pays to an existing invoice, the // invoice registry will tell us what to do with the HTLC. This is // identical to HTLC resolution in the link. - event, err := h.Registry.NotifyExitHopHtlc( - h.payHash, h.htlcAmt, h.htlcExpiry, currentHeight, - h.circuitKey, hodlChan, nil, + circuitKey := channeldb.CircuitKey{ + ChanID: h.ShortChanID, + HtlcID: h.htlc.HtlcIndex, + } + + resolution, err := h.Registry.NotifyExitHopHtlc( + h.htlc.RHash, h.htlc.Amt, h.htlcExpiry, currentHeight, + circuitKey, hodlChan, payload, ) - switch err { - case channeldb.ErrInvoiceNotFound: - case nil: - defer h.Registry.HodlUnsubscribeAll(hodlChan) + if err != nil { + return nil, err + } - // Resolve the htlc directly if possible. - if event != nil { - return processHodlEvent(*event) + defer h.Registry.HodlUnsubscribeAll(hodlChan) + + // Take action based on the resolution we received. If the htlc was + // settled, or a htlc for a known invoice failed we can resolve it + // directly. If the resolution is nil, the htlc was neither accepted + // nor failed, so we cannot take action yet. + switch res := resolution.(type) { + case *invoices.HtlcFailResolution: + // In the case where the htlc failed, but the invoice was known + // to the registry, we can directly resolve the htlc. + if res.Outcome != invoices.ResultInvoiceNotFound { + return processHtlcResolution(resolution) } + + // If we settled the htlc, we can resolve it. + case *invoices.HtlcSettleResolution: + return processHtlcResolution(resolution) + + // If the resolution is nil, the htlc was neither settled nor failed so + // we cannot take action at present. + case nil: + default: - return nil, err + return nil, fmt.Errorf("unknown htlc resolution type: %T", + resolution) } // With the epochs and preimage subscriptions initialized, we'll query // to see if we already know the preimage. - preimage, ok := h.PreimageDB.LookupPreimage(h.payHash) + preimage, ok := h.PreimageDB.LookupPreimage(h.htlc.RHash) if ok { // If we do, then this means we can claim the HTLC! However, // we don't know how to ourselves, so we'll return our inner @@ -203,7 +273,7 @@ func (h *htlcIncomingContestResolver) Resolve() (ContractResolver, error) { case preimage := <-preimageSubscription.WitnessUpdates: // We receive all new preimages, so we need to ignore // all except the preimage we are waiting for. - if !preimage.Matches(h.payHash) { + if !preimage.Matches(h.htlc.RHash) { continue } @@ -217,9 +287,8 @@ func (h *htlcIncomingContestResolver) Resolve() (ContractResolver, error) { return &h.htlcSuccessResolver, nil case hodlItem := <-hodlChan: - hodlEvent := hodlItem.(invoices.HodlEvent) - - return processHodlEvent(hodlEvent) + htlcResolution := hodlItem.(invoices.HtlcResolution) + return processHtlcResolution(htlcResolution) case newBlock, ok := <-blockEpochs.Epochs: if !ok { @@ -239,7 +308,7 @@ func (h *htlcIncomingContestResolver) Resolve() (ContractResolver, error) { return nil, h.Checkpoint(h) } - case <-h.Quit: + case <-h.quit: return nil, errResolverShuttingDown } } @@ -249,7 +318,7 @@ func (h *htlcIncomingContestResolver) Resolve() (ContractResolver, error) { func (h *htlcIncomingContestResolver) report() *ContractReport { // No locking needed as these values are read-only. - finalAmt := h.htlcAmt.ToSatoshis() + finalAmt := h.htlc.Amt.ToSatoshis() if h.htlcResolution.SignedSuccessTx != nil { finalAmt = btcutil.Amount( h.htlcResolution.SignedSuccessTx.TxOut[0].Value, @@ -258,7 +327,7 @@ func (h *htlcIncomingContestResolver) report() *ContractReport { return &ContractReport{ Outpoint: h.htlcResolution.ClaimOutpoint, - Incoming: true, + Type: ReportOutputIncomingHtlc, Amount: finalAmt, MaturityHeight: h.htlcExpiry, LimboBalance: finalAmt, @@ -271,7 +340,7 @@ func (h *htlcIncomingContestResolver) report() *ContractReport { // // NOTE: Part of the ContractResolver interface. func (h *htlcIncomingContestResolver) Stop() { - close(h.Quit) + close(h.quit) } // IsResolved returns true if the stored state in the resolve is fully @@ -296,29 +365,51 @@ func (h *htlcIncomingContestResolver) Encode(w io.Writer) error { return h.htlcSuccessResolver.Encode(w) } -// Decode attempts to decode an encoded ContractResolver from the passed Reader -// instance, returning an active ContractResolver instance. -// -// NOTE: Part of the ContractResolver interface. -func (h *htlcIncomingContestResolver) Decode(r io.Reader) error { +// newIncomingContestResolverFromReader attempts to decode an encoded ContractResolver +// from the passed Reader instance, returning an active ContractResolver +// instance. +func newIncomingContestResolverFromReader(r io.Reader, resCfg ResolverConfig) ( + *htlcIncomingContestResolver, error) { + + h := &htlcIncomingContestResolver{} + // We'll first read the one field unique to this resolver. if err := binary.Read(r, endian, &h.htlcExpiry); err != nil { - return err + return nil, err } // Then we'll decode our internal resolver. - return h.htlcSuccessResolver.Decode(r) + successResolver, err := newSuccessResolverFromReader(r, resCfg) + if err != nil { + return nil, err + } + h.htlcSuccessResolver = *successResolver + + return h, nil } -// AttachResolverKit should be called once a resolved is successfully decoded -// from its stored format. This struct delivers a generic tool kit that -// resolvers need to complete their duty. +// Supplement adds additional information to the resolver that is required +// before Resolve() is called. // -// NOTE: Part of the ContractResolver interface. -func (h *htlcIncomingContestResolver) AttachResolverKit(r ResolverKit) { - h.ResolverKit = r +// NOTE: Part of the htlcContractResolver interface. +func (h *htlcIncomingContestResolver) Supplement(htlc channeldb.HTLC) { + h.htlc = htlc +} + +// decodePayload (re)decodes the hop payload of a received htlc. +func (h *htlcIncomingContestResolver) decodePayload() (*hop.Payload, error) { + + onionReader := bytes.NewReader(h.htlc.OnionBlob) + iterator, err := h.OnionProcessor.ReconstructHopIterator( + onionReader, h.htlc.RHash[:], + ) + if err != nil { + return nil, err + } + + return iterator.HopPayload() } // A compile time assertion to ensure htlcIncomingContestResolver meets the // ContractResolver interface. -var _ ContractResolver = (*htlcIncomingContestResolver)(nil) +var _ htlcContractResolver = (*htlcIncomingContestResolver)(nil) diff --git a/contractcourt/htlc_incoming_resolver_test.go b/contractcourt/htlc_incoming_resolver_test.go index 43d01817b4..400662d739 100644 --- a/contractcourt/htlc_incoming_resolver_test.go +++ b/contractcourt/htlc_incoming_resolver_test.go @@ -2,9 +2,12 @@ package contractcourt import ( "bytes" + "io" + "io/ioutil" "testing" "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/htlcswitch/hop" "github.com/lightningnetwork/lnd/invoices" "github.com/lightningnetwork/lnd/lnwallet" @@ -18,9 +21,11 @@ const ( ) var ( - testResPreimage = lntypes.Preimage{1, 2, 3} - testResHash = testResPreimage.Hash() - testResCircuitKey = channeldb.CircuitKey{} + testResPreimage = lntypes.Preimage{1, 2, 3} + testResHash = testResPreimage.Hash() + testResCircuitKey = channeldb.CircuitKey{} + testOnionBlob = []byte{4, 5, 6} + testAcceptHeight int32 = 1234 ) // TestHtlcIncomingResolverFwdPreimageKnown tests resolution of a forwarded htlc @@ -30,7 +35,10 @@ func TestHtlcIncomingResolverFwdPreimageKnown(t *testing.T) { defer timeout(t)() ctx := newIncomingResolverTestContext(t) - ctx.registry.notifyErr = channeldb.ErrInvoiceNotFound + ctx.registry.notifyResolution = invoices.NewFailResolution( + testResCircuitKey, testHtlcExpiry, + invoices.ResultInvoiceNotFound, + ) ctx.witnessBeacon.lookupPreimage[testResHash] = testResPreimage ctx.resolve() ctx.waitForResult(true) @@ -44,7 +52,10 @@ func TestHtlcIncomingResolverFwdContestedSuccess(t *testing.T) { defer timeout(t)() ctx := newIncomingResolverTestContext(t) - ctx.registry.notifyErr = channeldb.ErrInvoiceNotFound + ctx.registry.notifyResolution = invoices.NewFailResolution( + testResCircuitKey, testHtlcExpiry, + invoices.ResultInvoiceNotFound, + ) ctx.resolve() // Simulate a new block coming in. HTLC is not yet expired. @@ -61,7 +72,10 @@ func TestHtlcIncomingResolverFwdContestedTimeout(t *testing.T) { defer timeout(t)() ctx := newIncomingResolverTestContext(t) - ctx.registry.notifyErr = channeldb.ErrInvoiceNotFound + ctx.registry.notifyResolution = invoices.NewFailResolution( + testResCircuitKey, testHtlcExpiry, + invoices.ResultInvoiceNotFound, + ) ctx.resolve() // Simulate a new block coming in. HTLC expires. @@ -77,8 +91,10 @@ func TestHtlcIncomingResolverFwdTimeout(t *testing.T) { defer timeout(t)() ctx := newIncomingResolverTestContext(t) - - ctx.registry.notifyErr = channeldb.ErrInvoiceNotFound + ctx.registry.notifyResolution = invoices.NewFailResolution( + testResCircuitKey, testHtlcExpiry, + invoices.ResultInvoiceNotFound, + ) ctx.witnessBeacon.lookupPreimage[testResHash] = testResPreimage ctx.resolver.htlcExpiry = 90 ctx.resolve() @@ -92,10 +108,11 @@ func TestHtlcIncomingResolverExitSettle(t *testing.T) { defer timeout(t)() ctx := newIncomingResolverTestContext(t) - ctx.registry.notifyEvent = &invoices.HodlEvent{ - CircuitKey: testResCircuitKey, - Preimage: &testResPreimage, - } + ctx.registry.notifyResolution = invoices.NewSettleResolution( + testResPreimage, testResCircuitKey, testAcceptHeight, + invoices.ResultReplayToSettled, + ) + ctx.resolve() data := <-ctx.registry.notifyChan @@ -107,6 +124,12 @@ func TestHtlcIncomingResolverExitSettle(t *testing.T) { } ctx.waitForResult(true) + + if !bytes.Equal( + ctx.onionProcessor.offeredOnionBlob, testOnionBlob, + ) { + t.Fatal("unexpected onion blob") + } } // TestHtlcIncomingResolverExitCancel tests resolution of an exit hop htlc for @@ -116,9 +139,11 @@ func TestHtlcIncomingResolverExitCancel(t *testing.T) { defer timeout(t)() ctx := newIncomingResolverTestContext(t) - ctx.registry.notifyEvent = &invoices.HodlEvent{ - CircuitKey: testResCircuitKey, - } + ctx.registry.notifyResolution = invoices.NewFailResolution( + testResCircuitKey, testAcceptHeight, + invoices.ResultInvoiceAlreadyCanceled, + ) + ctx.resolve() ctx.waitForResult(false) } @@ -133,10 +158,10 @@ func TestHtlcIncomingResolverExitSettleHodl(t *testing.T) { ctx.resolve() notifyData := <-ctx.registry.notifyChan - notifyData.hodlChan <- invoices.HodlEvent{ - CircuitKey: testResCircuitKey, - Preimage: &testResPreimage, - } + notifyData.hodlChan <- invoices.NewSettleResolution( + testResPreimage, testResCircuitKey, testAcceptHeight, + invoices.ResultSettled, + ) ctx.waitForResult(true) } @@ -162,20 +187,46 @@ func TestHtlcIncomingResolverExitCancelHodl(t *testing.T) { ctx := newIncomingResolverTestContext(t) ctx.resolve() notifyData := <-ctx.registry.notifyChan - notifyData.hodlChan <- invoices.HodlEvent{ - CircuitKey: testResCircuitKey, - } + notifyData.hodlChan <- invoices.NewFailResolution( + testResCircuitKey, testAcceptHeight, invoices.ResultCanceled, + ) + ctx.waitForResult(false) } +type mockHopIterator struct { + hop.Iterator +} + +func (h *mockHopIterator) HopPayload() (*hop.Payload, error) { + return nil, nil +} + +type mockOnionProcessor struct { + offeredOnionBlob []byte +} + +func (o *mockOnionProcessor) ReconstructHopIterator(r io.Reader, rHash []byte) ( + hop.Iterator, error) { + + data, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + o.offeredOnionBlob = data + + return &mockHopIterator{}, nil +} + type incomingResolverTestContext struct { - registry *mockRegistry - witnessBeacon *mockWitnessBeacon - resolver *htlcIncomingContestResolver - notifier *mockNotifier - resolveErr chan error - nextResolver ContractResolver - t *testing.T + registry *mockRegistry + witnessBeacon *mockWitnessBeacon + resolver *htlcIncomingContestResolver + notifier *mockNotifier + onionProcessor *mockOnionProcessor + resolveErr chan error + nextResolver ContractResolver + t *testing.T } func newIncomingResolverTestContext(t *testing.T) *incomingResolverTestContext { @@ -189,37 +240,45 @@ func newIncomingResolverTestContext(t *testing.T) *incomingResolverTestContext { notifyChan: make(chan notifyExitHopData, 1), } + onionProcessor := &mockOnionProcessor{} + checkPointChan := make(chan struct{}, 1) chainCfg := ChannelArbitratorConfig{ ChainArbitratorConfig: ChainArbitratorConfig{ - Notifier: notifier, - PreimageDB: witnessBeacon, - Registry: registry, + Notifier: notifier, + PreimageDB: witnessBeacon, + Registry: registry, + OnionProcessor: onionProcessor, }, } + cfg := ResolverConfig{ + ChannelArbitratorConfig: chainCfg, + Checkpoint: func(_ ContractResolver) error { + checkPointChan <- struct{}{} + return nil + }, + } resolver := &htlcIncomingContestResolver{ htlcSuccessResolver: htlcSuccessResolver{ - ResolverKit: ResolverKit{ - ChannelArbitratorConfig: chainCfg, - Checkpoint: func(_ ContractResolver) error { - checkPointChan <- struct{}{} - return nil - }, + contractResolverKit: *newContractResolverKit(cfg), + htlcResolution: lnwallet.IncomingHtlcResolution{}, + htlc: channeldb.HTLC{ + RHash: testResHash, + OnionBlob: testOnionBlob, }, - htlcResolution: lnwallet.IncomingHtlcResolution{}, - payHash: testResHash, }, htlcExpiry: testHtlcExpiry, } return &incomingResolverTestContext{ - registry: registry, - witnessBeacon: witnessBeacon, - resolver: resolver, - notifier: notifier, - t: t, + registry: registry, + witnessBeacon: witnessBeacon, + resolver: resolver, + notifier: notifier, + onionProcessor: onionProcessor, + t: t, } } @@ -251,7 +310,7 @@ func (i *incomingResolverTestContext) waitForResult(expectSuccessRes bool) { } if !expectSuccessRes { - if err != nil { + if i.nextResolver != nil { i.t.Fatal("expected no next resolver") } return diff --git a/contractcourt/htlc_outgoing_contest_resolver.go b/contractcourt/htlc_outgoing_contest_resolver.go index 43549a261d..28d95247af 100644 --- a/contractcourt/htlc_outgoing_contest_resolver.go +++ b/contractcourt/htlc_outgoing_contest_resolver.go @@ -5,6 +5,8 @@ import ( "io" "github.com/btcsuite/btcutil" + "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/lnwallet" ) // htlcOutgoingContestResolver is a ContractResolver that's able to resolve an @@ -18,6 +20,21 @@ type htlcOutgoingContestResolver struct { htlcTimeoutResolver } +// newOutgoingContestResolver instantiates a new outgoing contested htlc +// resolver. +func newOutgoingContestResolver(res lnwallet.OutgoingHtlcResolution, + broadcastHeight uint32, htlc channeldb.HTLC, + resCfg ResolverConfig) *htlcOutgoingContestResolver { + + timeout := newTimeoutResolver( + res, broadcastHeight, htlc, resCfg, + ) + + return &htlcOutgoingContestResolver{ + htlcTimeoutResolver: *timeout, + } +} + // Resolve commences the resolution of this contract. As this contract hasn't // yet timed out, we'll wait for one of two things to happen // @@ -76,36 +93,6 @@ func (h *htlcOutgoingContestResolver) Resolve() (ContractResolver, error) { default: } - // We'll check the current height, if the HTLC has already expired, - // then we'll morph immediately into a resolver that can sweep the - // HTLC. - // - // TODO(roasbeef): use grace period instead? - _, currentHeight, err := h.ChainIO.GetBestBlock() - if err != nil { - return nil, err - } - - // If the current height is >= expiry-1, then a spend will be valid to - // be included in the next block, and we can immediately return the - // resolver. - // - // TODO(joostjager): Statement above may not be valid. For CLTV locks, - // the expiry value is the last _invalid_ block. The likely reason that - // this does not create a problem, is that utxonursery is checking the - // expiry again (in the proper way). Same holds for minus one operation - // below. - // - // Source: - // https://github.com/btcsuite/btcd/blob/991d32e72fe84d5fbf9c47cd604d793a0cd3a072/blockchain/validate.go#L154 - if uint32(currentHeight) >= h.htlcResolution.Expiry-1 { - log.Infof("%T(%v): HTLC has expired (height=%v, expiry=%v), "+ - "transforming into timeout resolver", h, - h.htlcResolution.ClaimOutpoint, currentHeight, - h.htlcResolution.Expiry) - return &h.htlcTimeoutResolver, nil - } - // If we reach this point, then we can't fully act yet, so we'll await // either of our signals triggering: the HTLC expires, or we learn of // the preimage. @@ -125,9 +112,18 @@ func (h *htlcOutgoingContestResolver) Resolve() (ContractResolver, error) { return nil, errResolverShuttingDown } - // If this new height expires the HTLC, then we can - // exit early and create a resolver that's capable of - // handling the time locked output. + // If the current height is >= expiry-1, then a timeout + // path spend will be valid to be included in the next + // block, and we can immediately return the resolver. + // + // TODO(joostjager): Statement above may not be valid. + // For CLTV locks, the expiry value is the last + // _invalid_ block. The likely reason that this does not + // create a problem, is that utxonursery is checking the + // expiry again (in the proper way). + // + // Source: + // https://github.com/btcsuite/btcd/blob/991d32e72fe84d5fbf9c47cd604d793a0cd3a072/blockchain/validate.go#L154 newHeight := uint32(newBlock.Height) if newHeight >= h.htlcResolution.Expiry-1 { log.Infof("%T(%v): HTLC has expired "+ @@ -151,7 +147,7 @@ func (h *htlcOutgoingContestResolver) Resolve() (ContractResolver, error) { // claimed. return h.claimCleanUp(commitSpend) - case <-h.Quit: + case <-h.quit: return nil, fmt.Errorf("resolver canceled") } } @@ -161,7 +157,7 @@ func (h *htlcOutgoingContestResolver) Resolve() (ContractResolver, error) { func (h *htlcOutgoingContestResolver) report() *ContractReport { // No locking needed as these values are read-only. - finalAmt := h.htlcAmt.ToSatoshis() + finalAmt := h.htlc.Amt.ToSatoshis() if h.htlcResolution.SignedTimeoutTx != nil { finalAmt = btcutil.Amount( h.htlcResolution.SignedTimeoutTx.TxOut[0].Value, @@ -170,7 +166,7 @@ func (h *htlcOutgoingContestResolver) report() *ContractReport { return &ContractReport{ Outpoint: h.htlcResolution.ClaimOutpoint, - Incoming: false, + Type: ReportOutputOutgoingHtlc, Amount: finalAmt, MaturityHeight: h.htlcResolution.Expiry, LimboBalance: finalAmt, @@ -183,7 +179,7 @@ func (h *htlcOutgoingContestResolver) report() *ContractReport { // // NOTE: Part of the ContractResolver interface. func (h *htlcOutgoingContestResolver) Stop() { - close(h.Quit) + close(h.quit) } // IsResolved returns true if the stored state in the resolve is fully @@ -202,23 +198,21 @@ func (h *htlcOutgoingContestResolver) Encode(w io.Writer) error { return h.htlcTimeoutResolver.Encode(w) } -// Decode attempts to decode an encoded ContractResolver from the passed Reader -// instance, returning an active ContractResolver instance. -// -// NOTE: Part of the ContractResolver interface. -func (h *htlcOutgoingContestResolver) Decode(r io.Reader) error { - return h.htlcTimeoutResolver.Decode(r) -} +// newOutgoingContestResolverFromReader attempts to decode an encoded ContractResolver +// from the passed Reader instance, returning an active ContractResolver +// instance. +func newOutgoingContestResolverFromReader(r io.Reader, resCfg ResolverConfig) ( + *htlcOutgoingContestResolver, error) { -// AttachResolverKit should be called once a resolved is successfully decoded -// from its stored format. This struct delivers a generic tool kit that -// resolvers need to complete their duty. -// -// NOTE: Part of the ContractResolver interface. -func (h *htlcOutgoingContestResolver) AttachResolverKit(r ResolverKit) { - h.ResolverKit = r + h := &htlcOutgoingContestResolver{} + timeoutResolver, err := newTimeoutResolverFromReader(r, resCfg) + if err != nil { + return nil, err + } + h.htlcTimeoutResolver = *timeoutResolver + return h, nil } // A compile time assertion to ensure htlcOutgoingContestResolver meets the // ContractResolver interface. -var _ ContractResolver = (*htlcOutgoingContestResolver)(nil) +var _ htlcContractResolver = (*htlcOutgoingContestResolver)(nil) diff --git a/contractcourt/htlc_outgoing_contest_resolver_test.go b/contractcourt/htlc_outgoing_contest_resolver_test.go new file mode 100644 index 0000000000..3a6c1ea587 --- /dev/null +++ b/contractcourt/htlc_outgoing_contest_resolver_test.go @@ -0,0 +1,197 @@ +package contractcourt + +import ( + "fmt" + "testing" + + "github.com/btcsuite/btcd/wire" + "github.com/lightningnetwork/lnd/chainntnfs" + "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/input" + "github.com/lightningnetwork/lnd/lntypes" + "github.com/lightningnetwork/lnd/lnwallet" +) + +const ( + outgoingContestHtlcExpiry = 110 +) + +// TestHtlcOutgoingResolverTimeout tests resolution of an offered htlc that +// timed out. +func TestHtlcOutgoingResolverTimeout(t *testing.T) { + t.Parallel() + defer timeout(t)() + + // Setup the resolver with our test resolution. + ctx := newOutgoingResolverTestContext(t) + + // Start the resolution process in a goroutine. + ctx.resolve() + + // Notify arrival of the block after which the timeout path of the htlc + // unlocks. + ctx.notifyEpoch(outgoingContestHtlcExpiry - 1) + + // Assert that the resolver finishes without error and transforms in a + // timeout resolver. + ctx.waitForResult(true) +} + +// TestHtlcOutgoingResolverRemoteClaim tests resolution of an offered htlc that +// is claimed by the remote party. +func TestHtlcOutgoingResolverRemoteClaim(t *testing.T) { + t.Parallel() + defer timeout(t)() + + // Setup the resolver with our test resolution and start the resolution + // process. + ctx := newOutgoingResolverTestContext(t) + ctx.resolve() + + // The remote party sweeps the htlc. Notify our resolver of this event. + preimage := lntypes.Preimage{} + ctx.notifier.spendChan <- &chainntnfs.SpendDetail{ + SpendingTx: &wire.MsgTx{ + TxIn: []*wire.TxIn{ + { + Witness: [][]byte{ + {0}, {1}, {2}, preimage[:], + }, + }, + }, + }, + } + + // We expect the extracted preimage to be added to the witness beacon. + <-ctx.preimageDB.newPreimages + + // We also expect a resolution message to the incoming side of the + // circuit. + <-ctx.resolutionChan + + // Assert that the resolver finishes without error. + ctx.waitForResult(false) +} + +type resolveResult struct { + err error + nextResolver ContractResolver +} + +type outgoingResolverTestContext struct { + resolver *htlcOutgoingContestResolver + notifier *mockNotifier + preimageDB *mockWitnessBeacon + resolverResultChan chan resolveResult + resolutionChan chan ResolutionMsg + t *testing.T +} + +func newOutgoingResolverTestContext(t *testing.T) *outgoingResolverTestContext { + notifier := &mockNotifier{ + epochChan: make(chan *chainntnfs.BlockEpoch), + spendChan: make(chan *chainntnfs.SpendDetail), + confChan: make(chan *chainntnfs.TxConfirmation), + } + + checkPointChan := make(chan struct{}, 1) + resolutionChan := make(chan ResolutionMsg, 1) + + preimageDB := newMockWitnessBeacon() + + onionProcessor := &mockOnionProcessor{} + + chainCfg := ChannelArbitratorConfig{ + ChainArbitratorConfig: ChainArbitratorConfig{ + Notifier: notifier, + PreimageDB: preimageDB, + DeliverResolutionMsg: func(msgs ...ResolutionMsg) error { + if len(msgs) != 1 { + return fmt.Errorf("expected 1 "+ + "resolution msg, instead got %v", + len(msgs)) + } + + resolutionChan <- msgs[0] + return nil + }, + OnionProcessor: onionProcessor, + }, + } + + outgoingRes := lnwallet.OutgoingHtlcResolution{ + Expiry: outgoingContestHtlcExpiry, + SweepSignDesc: input.SignDescriptor{ + Output: &wire.TxOut{}, + }, + } + + cfg := ResolverConfig{ + ChannelArbitratorConfig: chainCfg, + Checkpoint: func(_ ContractResolver) error { + checkPointChan <- struct{}{} + return nil + }, + } + + resolver := &htlcOutgoingContestResolver{ + htlcTimeoutResolver: htlcTimeoutResolver{ + contractResolverKit: *newContractResolverKit(cfg), + htlcResolution: outgoingRes, + htlc: channeldb.HTLC{ + RHash: testResHash, + OnionBlob: testOnionBlob, + }, + }, + } + + return &outgoingResolverTestContext{ + resolver: resolver, + notifier: notifier, + preimageDB: preimageDB, + resolutionChan: resolutionChan, + t: t, + } +} + +func (i *outgoingResolverTestContext) resolve() { + // Start resolver. + i.resolverResultChan = make(chan resolveResult, 1) + go func() { + nextResolver, err := i.resolver.Resolve() + i.resolverResultChan <- resolveResult{ + nextResolver: nextResolver, + err: err, + } + }() + + // Notify initial block height. + i.notifyEpoch(testInitialBlockHeight) +} + +func (i *outgoingResolverTestContext) notifyEpoch(height int32) { + i.notifier.epochChan <- &chainntnfs.BlockEpoch{ + Height: height, + } +} + +func (i *outgoingResolverTestContext) waitForResult(expectTimeoutRes bool) { + i.t.Helper() + + result := <-i.resolverResultChan + if result.err != nil { + i.t.Fatal(result.err) + } + + if !expectTimeoutRes { + if result.nextResolver != nil { + i.t.Fatal("expected no next resolver") + } + return + } + + _, ok := result.nextResolver.(*htlcTimeoutResolver) + if !ok { + i.t.Fatal("expected htlcTimeoutResolver") + } +} diff --git a/contractcourt/htlc_success_resolver.go b/contractcourt/htlc_success_resolver.go index d1c8716cba..38fa7fd051 100644 --- a/contractcourt/htlc_success_resolver.go +++ b/contractcourt/htlc_success_resolver.go @@ -6,10 +6,9 @@ import ( "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" + "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/input" - "github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lnwallet" - "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/sweep" ) @@ -38,9 +37,6 @@ type htlcSuccessResolver struct { // historical queries to the chain for spends/confirmations. broadcastHeight uint32 - // payHash is the payment hash of the original HTLC extended to us. - payHash lntypes.Hash - // sweepTx will be non-nil if we've already crafted a transaction to // sweep a direct HTLC output. This is only a concern if we're sweeping // from the commitment transaction of the remote party. @@ -48,11 +44,23 @@ type htlcSuccessResolver struct { // TODO(roasbeef): send off to utxobundler sweepTx *wire.MsgTx - // htlcAmt is the original amount of the htlc, not taking into - // account any fees that may have to be paid if it goes on chain. - htlcAmt lnwire.MilliSatoshi + // htlc contains information on the htlc that we are resolving on-chain. + htlc channeldb.HTLC - ResolverKit + contractResolverKit +} + +// newSuccessResolver instanties a new htlc success resolver. +func newSuccessResolver(res lnwallet.IncomingHtlcResolution, + broadcastHeight uint32, htlc channeldb.HTLC, + resCfg ResolverConfig) *htlcSuccessResolver { + + return &htlcSuccessResolver{ + contractResolverKit: *newContractResolverKit(resCfg), + htlcResolution: res, + broadcastHeight: broadcastHeight, + htlc: htlc, + } } // ResolverKey returns an identifier which should be globally unique for this @@ -99,7 +107,7 @@ func (h *htlcSuccessResolver) Resolve() (ContractResolver, error) { if h.sweepTx == nil { log.Infof("%T(%x): crafting sweep tx for "+ "incoming+remote htlc confirmed", h, - h.payHash[:]) + h.htlc.RHash[:]) // Before we can craft out sweeping transaction, we // need to create an input which contains all the items @@ -110,6 +118,7 @@ func (h *htlcSuccessResolver) Resolve() (ContractResolver, error) { &h.htlcResolution.SweepSignDesc, h.htlcResolution.Preimage[:], h.broadcastHeight, + h.htlcResolution.CsvDelay, ) // With the input created, we can now generate the full @@ -133,7 +142,7 @@ func (h *htlcSuccessResolver) Resolve() (ContractResolver, error) { } log.Infof("%T(%x): crafted sweep tx=%v", h, - h.payHash[:], spew.Sdump(h.sweepTx)) + h.htlc.RHash[:], spew.Sdump(h.sweepTx)) // With the sweep transaction signed, we'll now // Checkpoint our state. @@ -149,7 +158,7 @@ func (h *htlcSuccessResolver) Resolve() (ContractResolver, error) { err := h.PublishTx(h.sweepTx) if err != nil { log.Infof("%T(%x): unable to publish tx: %v", - h, h.payHash[:], err) + h, h.htlc.RHash[:], err) return nil, err } @@ -165,7 +174,7 @@ func (h *htlcSuccessResolver) Resolve() (ContractResolver, error) { } log.Infof("%T(%x): waiting for sweep tx (txid=%v) to be "+ - "confirmed", h, h.payHash[:], sweepTXID) + "confirmed", h, h.htlc.RHash[:], sweepTXID) select { case _, ok := <-confNtfn.Confirmed: @@ -173,7 +182,7 @@ func (h *htlcSuccessResolver) Resolve() (ContractResolver, error) { return nil, errResolverShuttingDown } - case <-h.Quit: + case <-h.quit: return nil, errResolverShuttingDown } @@ -184,7 +193,7 @@ func (h *htlcSuccessResolver) Resolve() (ContractResolver, error) { } log.Infof("%T(%x): broadcasting second-layer transition tx: %v", - h, h.payHash[:], spew.Sdump(h.htlcResolution.SignedSuccessTx)) + h, h.htlc.RHash[:], spew.Sdump(h.htlcResolution.SignedSuccessTx)) // We'll now broadcast the second layer transaction so we can kick off // the claiming process. @@ -200,10 +209,10 @@ func (h *htlcSuccessResolver) Resolve() (ContractResolver, error) { // done so. if !h.outputIncubating { log.Infof("%T(%x): incubating incoming htlc output", - h, h.payHash[:]) + h, h.htlc.RHash[:]) err := h.IncubateOutputs( - h.ChanPoint, nil, nil, &h.htlcResolution, + h.ChanPoint, nil, &h.htlcResolution, h.broadcastHeight, ) if err != nil { @@ -230,7 +239,7 @@ func (h *htlcSuccessResolver) Resolve() (ContractResolver, error) { } log.Infof("%T(%x): waiting for second-level HTLC output to be spent "+ - "after csv_delay=%v", h, h.payHash[:], h.htlcResolution.CsvDelay) + "after csv_delay=%v", h, h.htlc.RHash[:], h.htlcResolution.CsvDelay) select { case _, ok := <-spendNtfn.Spend: @@ -238,7 +247,7 @@ func (h *htlcSuccessResolver) Resolve() (ContractResolver, error) { return nil, errResolverShuttingDown } - case <-h.Quit: + case <-h.quit: return nil, errResolverShuttingDown } @@ -251,7 +260,7 @@ func (h *htlcSuccessResolver) Resolve() (ContractResolver, error) { // // NOTE: Part of the ContractResolver interface. func (h *htlcSuccessResolver) Stop() { - close(h.Quit) + close(h.quit) } // IsResolved returns true if the stored state in the resolve is fully @@ -283,50 +292,61 @@ func (h *htlcSuccessResolver) Encode(w io.Writer) error { if err := binary.Write(w, endian, h.broadcastHeight); err != nil { return err } - if _, err := w.Write(h.payHash[:]); err != nil { + if _, err := w.Write(h.htlc.RHash[:]); err != nil { return err } return nil } -// Decode attempts to decode an encoded ContractResolver from the passed Reader -// instance, returning an active ContractResolver instance. -// -// NOTE: Part of the ContractResolver interface. -func (h *htlcSuccessResolver) Decode(r io.Reader) error { +// newSuccessResolverFromReader attempts to decode an encoded ContractResolver +// from the passed Reader instance, returning an active ContractResolver +// instance. +func newSuccessResolverFromReader(r io.Reader, resCfg ResolverConfig) ( + *htlcSuccessResolver, error) { + + h := &htlcSuccessResolver{ + contractResolverKit: *newContractResolverKit(resCfg), + } + // First we'll decode our inner HTLC resolution. if err := decodeIncomingResolution(r, &h.htlcResolution); err != nil { - return err + return nil, err } // Next, we'll read all the fields that are specified to the contract // resolver. if err := binary.Read(r, endian, &h.outputIncubating); err != nil { - return err + return nil, err } if err := binary.Read(r, endian, &h.resolved); err != nil { - return err + return nil, err } if err := binary.Read(r, endian, &h.broadcastHeight); err != nil { - return err + return nil, err } - if _, err := io.ReadFull(r, h.payHash[:]); err != nil { - return err + if _, err := io.ReadFull(r, h.htlc.RHash[:]); err != nil { + return nil, err } - return nil + return h, nil } -// AttachResolverKit should be called once a resolved is successfully decoded -// from its stored format. This struct delivers a generic tool kit that -// resolvers need to complete their duty. +// Supplement adds additional information to the resolver that is required +// before Resolve() is called. // -// NOTE: Part of the ContractResolver interface. -func (h *htlcSuccessResolver) AttachResolverKit(r ResolverKit) { - h.ResolverKit = r +// NOTE: Part of the htlcContractResolver interface. +func (h *htlcSuccessResolver) Supplement(htlc channeldb.HTLC) { + h.htlc = htlc +} + +// HtlcPoint returns the htlc's outpoint on the commitment tx. +// +// NOTE: Part of the htlcContractResolver interface. +func (h *htlcSuccessResolver) HtlcPoint() wire.OutPoint { + return h.htlcResolution.HtlcPoint() } // A compile time assertion to ensure htlcSuccessResolver meets the // ContractResolver interface. -var _ ContractResolver = (*htlcSuccessResolver)(nil) +var _ htlcContractResolver = (*htlcSuccessResolver)(nil) diff --git a/contractcourt/htlc_timeout_resolver.go b/contractcourt/htlc_timeout_resolver.go index 9d1c92d723..439babc4e0 100644 --- a/contractcourt/htlc_timeout_resolver.go +++ b/contractcourt/htlc_timeout_resolver.go @@ -8,6 +8,7 @@ import ( "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" "github.com/lightningnetwork/lnd/chainntnfs" + "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lnwallet" @@ -40,15 +41,23 @@ type htlcTimeoutResolver struct { // TODO(roasbeef): wrap above into definite resolution embedding? broadcastHeight uint32 - // htlcIndex is the index of this HTLC within the trace of the - // additional commitment state machine. - htlcIndex uint64 + // htlc contains information on the htlc that we are resolving on-chain. + htlc channeldb.HTLC - // htlcAmt is the original amount of the htlc, not taking into - // account any fees that may have to be paid if it goes on chain. - htlcAmt lnwire.MilliSatoshi + contractResolverKit +} + +// newTimeoutResolver instantiates a new timeout htlc resolver. +func newTimeoutResolver(res lnwallet.OutgoingHtlcResolution, + broadcastHeight uint32, htlc channeldb.HTLC, + resCfg ResolverConfig) *htlcTimeoutResolver { - ResolverKit + return &htlcTimeoutResolver{ + contractResolverKit: *newContractResolverKit(resCfg), + htlcResolution: res, + broadcastHeight: broadcastHeight, + htlc: htlc, + } } // ResolverKey returns an identifier which should be globally unique for this @@ -142,7 +151,7 @@ func (h *htlcTimeoutResolver) claimCleanUp( // resolved, then exit. if err := h.DeliverResolutionMsg(ResolutionMsg{ SourceChan: h.ShortChanID, - HtlcIndex: h.htlcIndex, + HtlcIndex: h.htlc.HtlcIndex, PreImage: &pre, }); err != nil { return nil, err @@ -238,7 +247,7 @@ func (h *htlcTimeoutResolver) Resolve() (ContractResolver, error) { h.htlcResolution.ClaimOutpoint) err := h.IncubateOutputs( - h.ChanPoint, nil, &h.htlcResolution, nil, + h.ChanPoint, &h.htlcResolution, nil, h.broadcastHeight, ) if err != nil { @@ -274,7 +283,7 @@ func (h *htlcTimeoutResolver) Resolve() (ContractResolver, error) { return errResolverShuttingDown } - case <-h.Quit: + case <-h.quit: return errResolverShuttingDown } @@ -312,7 +321,7 @@ func (h *htlcTimeoutResolver) Resolve() (ContractResolver, error) { return nil, errResolverShuttingDown } - case <-h.Quit: + case <-h.quit: return nil, errResolverShuttingDown } @@ -337,7 +346,7 @@ func (h *htlcTimeoutResolver) Resolve() (ContractResolver, error) { failureMsg := &lnwire.FailPermanentChannelFailure{} if err := h.DeliverResolutionMsg(ResolutionMsg{ SourceChan: h.ShortChanID, - HtlcIndex: h.htlcIndex, + HtlcIndex: h.htlc.HtlcIndex, Failure: failureMsg, }); err != nil { return nil, err @@ -365,7 +374,7 @@ func (h *htlcTimeoutResolver) Resolve() (ContractResolver, error) { // // NOTE: Part of the ContractResolver interface. func (h *htlcTimeoutResolver) Stop() { - close(h.Quit) + close(h.quit) } // IsResolved returns true if the stored state in the resolve is fully @@ -399,52 +408,63 @@ func (h *htlcTimeoutResolver) Encode(w io.Writer) error { return err } - if err := binary.Write(w, endian, h.htlcIndex); err != nil { + if err := binary.Write(w, endian, h.htlc.HtlcIndex); err != nil { return err } return nil } -// Decode attempts to decode an encoded ContractResolver from the passed Reader -// instance, returning an active ContractResolver instance. -// -// NOTE: Part of the ContractResolver interface. -func (h *htlcTimeoutResolver) Decode(r io.Reader) error { +// newTimeoutResolverFromReader attempts to decode an encoded ContractResolver +// from the passed Reader instance, returning an active ContractResolver +// instance. +func newTimeoutResolverFromReader(r io.Reader, resCfg ResolverConfig) ( + *htlcTimeoutResolver, error) { + + h := &htlcTimeoutResolver{ + contractResolverKit: *newContractResolverKit(resCfg), + } + // First, we'll read out all the mandatory fields of the // OutgoingHtlcResolution that we store. if err := decodeOutgoingResolution(r, &h.htlcResolution); err != nil { - return err + return nil, err } // With those fields read, we can now read back the fields that are // specific to the resolver itself. if err := binary.Read(r, endian, &h.outputIncubating); err != nil { - return err + return nil, err } if err := binary.Read(r, endian, &h.resolved); err != nil { - return err + return nil, err } if err := binary.Read(r, endian, &h.broadcastHeight); err != nil { - return err + return nil, err } - if err := binary.Read(r, endian, &h.htlcIndex); err != nil { - return err + if err := binary.Read(r, endian, &h.htlc.HtlcIndex); err != nil { + return nil, err } - return nil + return h, nil } -// AttachResolverKit should be called once a resolved is successfully decoded -// from its stored format. This struct delivers a generic tool kit that -// resolvers need to complete their duty. +// Supplement adds additional information to the resolver that is required +// before Resolve() is called. // -// NOTE: Part of the ContractResolver interface. -func (h *htlcTimeoutResolver) AttachResolverKit(r ResolverKit) { - h.ResolverKit = r +// NOTE: Part of the htlcContractResolver interface. +func (h *htlcTimeoutResolver) Supplement(htlc channeldb.HTLC) { + h.htlc = htlc +} + +// HtlcPoint returns the htlc's outpoint on the commitment tx. +// +// NOTE: Part of the htlcContractResolver interface. +func (h *htlcTimeoutResolver) HtlcPoint() wire.OutPoint { + return h.htlcResolution.HtlcPoint() } // A compile time assertion to ensure htlcTimeoutResolver meets the // ContractResolver interface. -var _ ContractResolver = (*htlcTimeoutResolver)(nil) +var _ htlcContractResolver = (*htlcTimeoutResolver)(nil) diff --git a/contractcourt/htlc_timeout_resolver_test.go b/contractcourt/htlc_timeout_resolver_test.go index 7dcf025ad6..923fb3eaf4 100644 --- a/contractcourt/htlc_timeout_resolver_test.go +++ b/contractcourt/htlc_timeout_resolver_test.go @@ -7,6 +7,8 @@ import ( "testing" "time" + "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" "github.com/lightningnetwork/lnd/chainntnfs" "github.com/lightningnetwork/lnd/input" @@ -14,12 +16,22 @@ import ( "github.com/lightningnetwork/lnd/lnwallet" ) +type dummySignature struct{} + +func (s *dummySignature) Serialize() []byte { + return []byte{} +} + +func (s *dummySignature) Verify(_ []byte, _ *btcec.PublicKey) bool { + return true +} + type mockSigner struct { } func (m *mockSigner) SignOutputRaw(tx *wire.MsgTx, - signDesc *input.SignDescriptor) ([]byte, error) { - return nil, nil + signDesc *input.SignDescriptor) (input.Signature, error) { + return &dummySignature{}, nil } func (m *mockSigner) ComputeInputScript(tx *wire.MsgTx, @@ -144,7 +156,8 @@ func TestHtlcTimeoutResolver(t *testing.T) { timeout: true, txToBroadcast: func() (*wire.MsgTx, error) { witness, err := input.SenderHtlcSpendTimeout( - nil, signer, fakeSignDesc, sweepTx, + &dummySignature{}, txscript.SigHashAll, + signer, fakeSignDesc, sweepTx, ) if err != nil { return nil, err @@ -163,8 +176,9 @@ func TestHtlcTimeoutResolver(t *testing.T) { timeout: false, txToBroadcast: func() (*wire.MsgTx, error) { witness, err := input.ReceiverHtlcSpendRedeem( - nil, fakePreimageBytes, signer, - fakeSignDesc, sweepTx, + &dummySignature{}, txscript.SigHashAll, + fakePreimageBytes, signer, fakeSignDesc, + sweepTx, ) if err != nil { return nil, err @@ -216,7 +230,6 @@ func TestHtlcTimeoutResolver(t *testing.T) { Notifier: notifier, PreimageDB: witnessBeacon, IncubateOutputs: func(wire.OutPoint, - *lnwallet.CommitOutputResolution, *lnwallet.OutgoingHtlcResolution, *lnwallet.IncomingHtlcResolution, uint32) error { @@ -237,15 +250,18 @@ func TestHtlcTimeoutResolver(t *testing.T) { }, } - resolver := &htlcTimeoutResolver{ - ResolverKit: ResolverKit{ - ChannelArbitratorConfig: chainCfg, - Checkpoint: func(_ ContractResolver) error { - checkPointChan <- struct{}{} - return nil - }, + cfg := ResolverConfig{ + ChannelArbitratorConfig: chainCfg, + Checkpoint: func(_ ContractResolver) error { + checkPointChan <- struct{}{} + return nil }, } + resolver := &htlcTimeoutResolver{ + contractResolverKit: *newContractResolverKit( + cfg, + ), + } resolver.htlcResolution.SweepSignDesc = *fakeSignDesc // If the test case needs the remote commitment to be diff --git a/contractcourt/interfaces.go b/contractcourt/interfaces.go index c928f4289f..ded07f3276 100644 --- a/contractcourt/interfaces.go +++ b/contractcourt/interfaces.go @@ -1,10 +1,17 @@ package contractcourt import ( + "io" + + "github.com/btcsuite/btcd/wire" "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/htlcswitch/hop" + "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/invoices" "github.com/lightningnetwork/lnd/lntypes" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/sweep" ) // Registry is an interface which represents the invoice registry. @@ -21,8 +28,40 @@ type Registry interface { NotifyExitHopHtlc(payHash lntypes.Hash, paidAmount lnwire.MilliSatoshi, expiry uint32, currentHeight int32, circuitKey channeldb.CircuitKey, hodlChan chan<- interface{}, - eob []byte) (*invoices.HodlEvent, error) + payload invoices.Payload) (invoices.HtlcResolution, error) - // HodlUnsubscribeAll unsubscribes from all hodl events. + // HodlUnsubscribeAll unsubscribes from all htlc resolutions. HodlUnsubscribeAll(subscriber chan<- interface{}) } + +// OnionProcessor is an interface used to decode onion blobs. +type OnionProcessor interface { + // ReconstructHopIterator attempts to decode a valid sphinx packet from + // the passed io.Reader instance. + ReconstructHopIterator(r io.Reader, rHash []byte) (hop.Iterator, error) +} + +// UtxoSweeper defines the sweep functions that contract court requires. +type UtxoSweeper interface { + // SweepInput sweeps inputs back into the wallet. + SweepInput(input input.Input, params sweep.Params) (chan sweep.Result, + error) + + // CreateSweepTx accepts a list of inputs and signs and generates a txn + // that spends from them. This method also makes an accurate fee + // estimate before generating the required witnesses. + CreateSweepTx(inputs []input.Input, feePref sweep.FeePreference, + currentBlockHeight uint32) (*wire.MsgTx, error) + + // RelayFeePerKW returns the minimum fee rate required for transactions + // to be relayed. + RelayFeePerKW() chainfee.SatPerKWeight + + // UpdateParams allows updating the sweep parameters of a pending input + // in the UtxoSweeper. This function can be used to provide an updated + // fee preference that will be used for a new sweep transaction of the + // input that will act as a replacement transaction (RBF) of the + // original sweeping transaction, if any. + UpdateParams(input wire.OutPoint, params sweep.ParamsUpdate) ( + chan sweep.Result, error) +} diff --git a/contractcourt/mock_registry_test.go b/contractcourt/mock_registry_test.go index e450228930..a7f430f23e 100644 --- a/contractcourt/mock_registry_test.go +++ b/contractcourt/mock_registry_test.go @@ -16,15 +16,15 @@ type notifyExitHopData struct { } type mockRegistry struct { - notifyChan chan notifyExitHopData - notifyErr error - notifyEvent *invoices.HodlEvent + notifyChan chan notifyExitHopData + notifyErr error + notifyResolution invoices.HtlcResolution } func (r *mockRegistry) NotifyExitHopHtlc(payHash lntypes.Hash, paidAmount lnwire.MilliSatoshi, expiry uint32, currentHeight int32, circuitKey channeldb.CircuitKey, hodlChan chan<- interface{}, - eob []byte) (*invoices.HodlEvent, error) { + payload invoices.Payload) (invoices.HtlcResolution, error) { r.notifyChan <- notifyExitHopData{ hodlChan: hodlChan, @@ -34,7 +34,7 @@ func (r *mockRegistry) NotifyExitHopHtlc(payHash lntypes.Hash, currentHeight: currentHeight, } - return r.notifyEvent, r.notifyErr + return r.notifyResolution, r.notifyErr } func (r *mockRegistry) HodlUnsubscribeAll(subscriber chan<- interface{}) {} diff --git a/discovery/chan_series.go b/discovery/chan_series.go index ddea51041f..ffb59b4ef5 100644 --- a/discovery/chan_series.go +++ b/discovery/chan_series.go @@ -6,6 +6,7 @@ import ( "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/netann" "github.com/lightningnetwork/lnd/routing/route" ) @@ -119,7 +120,7 @@ func (c *ChanSeries) UpdatesInHorizon(chain chainhash.Hash, continue } - chanAnn, edge1, edge2, err := CreateChanAnnouncement( + chanAnn, edge1, edge2, err := netann.CreateChanAnnouncement( channel.Info.AuthProof, channel.Info, channel.Policy1, channel.Policy2, ) @@ -258,7 +259,7 @@ func (c *ChanSeries) FetchChanAnns(chain chainhash.Hash, continue } - chanAnn, edge1, edge2, err := CreateChanAnnouncement( + chanAnn, edge1, edge2, err := netann.CreateChanAnnouncement( channel.Info.AuthProof, channel.Info, channel.Policy1, channel.Policy2, ) @@ -323,20 +324,7 @@ func (c *ChanSeries) FetchChanUpdates(chain chainhash.Hash, chanUpdates := make([]*lnwire.ChannelUpdate, 0, 2) if e1 != nil { - chanUpdate := &lnwire.ChannelUpdate{ - ChainHash: chanInfo.ChainHash, - ShortChannelID: shortChanID, - Timestamp: uint32(e1.LastUpdate.Unix()), - MessageFlags: e1.MessageFlags, - ChannelFlags: e1.ChannelFlags, - TimeLockDelta: e1.TimeLockDelta, - HtlcMinimumMsat: e1.MinHTLC, - HtlcMaximumMsat: e1.MaxHTLC, - BaseFee: uint32(e1.FeeBaseMSat), - FeeRate: uint32(e1.FeeProportionalMillionths), - ExtraOpaqueData: e1.ExtraOpaqueData, - } - chanUpdate.Signature, err = lnwire.NewSigFromRawSignature(e1.SigBytes) + chanUpdate, err := netann.ChannelUpdateFromEdge(chanInfo, e1) if err != nil { return nil, err } @@ -344,20 +332,7 @@ func (c *ChanSeries) FetchChanUpdates(chain chainhash.Hash, chanUpdates = append(chanUpdates, chanUpdate) } if e2 != nil { - chanUpdate := &lnwire.ChannelUpdate{ - ChainHash: chanInfo.ChainHash, - ShortChannelID: shortChanID, - Timestamp: uint32(e2.LastUpdate.Unix()), - MessageFlags: e2.MessageFlags, - ChannelFlags: e2.ChannelFlags, - TimeLockDelta: e2.TimeLockDelta, - HtlcMinimumMsat: e2.MinHTLC, - HtlcMaximumMsat: e2.MaxHTLC, - BaseFee: uint32(e2.FeeBaseMSat), - FeeRate: uint32(e2.FeeProportionalMillionths), - ExtraOpaqueData: e2.ExtraOpaqueData, - } - chanUpdate.Signature, err = lnwire.NewSigFromRawSignature(e2.SigBytes) + chanUpdate, err := netann.ChannelUpdateFromEdge(chanInfo, e2) if err != nil { return nil, err } diff --git a/discovery/gossiper.go b/discovery/gossiper.go index 5e8e02159c..031394123f 100644 --- a/discovery/gossiper.go +++ b/discovery/gossiper.go @@ -20,6 +20,7 @@ import ( "github.com/lightningnetwork/lnd/lnwallet" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/multimutex" + "github.com/lightningnetwork/lnd/netann" "github.com/lightningnetwork/lnd/routing" "github.com/lightningnetwork/lnd/routing/route" "github.com/lightningnetwork/lnd/ticker" @@ -350,15 +351,6 @@ func New(cfg Config, selfKey *btcec.PublicKey) *AuthenticatedGossiper { return gossiper } -// updatedChanPolicies is a set of channel policies that have been successfully -// updated and written to disk, or an error if the policy update failed. This -// struct's map field is intended to be used for updating channel policies on -// the link layer. -type updatedChanPolicies struct { - chanPolicies map[wire.OutPoint]*channeldb.ChannelEdgePolicy - err error -} - // EdgeWithInfo contains the information that is required to update an edge. type EdgeWithInfo struct { // Info describes the channel. @@ -1364,6 +1356,22 @@ func (d *AuthenticatedGossiper) processChanPolicyUpdate( return chanUpdates, nil } +// remotePubFromChanInfo returns the public key of the remote peer given a +// ChannelEdgeInfo that describe a channel we have with them. +func remotePubFromChanInfo(chanInfo *channeldb.ChannelEdgeInfo, + chanFlags lnwire.ChanUpdateChanFlags) [33]byte { + + var remotePubKey [33]byte + switch { + case chanFlags&lnwire.ChanUpdateDirection == 0: + remotePubKey = chanInfo.NodeKey2Bytes + case chanFlags&lnwire.ChanUpdateDirection == 1: + remotePubKey = chanInfo.NodeKey1Bytes + } + + return remotePubKey +} + // processRejectedEdge examines a rejected edge to see if we can extract any // new announcements from it. An edge will get rejected if we already added // the same edge without AuthProof to the graph. If the received announcement @@ -1400,7 +1408,7 @@ func (d *AuthenticatedGossiper) processRejectedEdge( // We'll then create then validate the new fully assembled // announcement. - chanAnn, e1Ann, e2Ann, err := CreateChanAnnouncement( + chanAnn, e1Ann, e2Ann, err := netann.CreateChanAnnouncement( proof, chanInfo, e1, e2, ) if err != nil { @@ -1459,9 +1467,7 @@ func (d *AuthenticatedGossiper) addNode(msg *lnwire.NodeAnnouncement) error { } timestamp := time.Unix(int64(msg.Timestamp), 0) - features := lnwire.NewFeatureVector( - msg.Features, lnwire.GlobalFeatures, - ) + features := lnwire.NewFeatureVector(msg.Features, lnwire.Features) node := &channeldb.LightningNode{ HaveNodeAnnouncement: true, LastUpdate: timestamp, @@ -2159,7 +2165,7 @@ func (d *AuthenticatedGossiper) processNetworkAnnouncement( msg.ChannelID, peerID) - chanAnn, _, _, err := CreateChanAnnouncement( + chanAnn, _, _, err := netann.CreateChanAnnouncement( chanInfo.AuthProof, chanInfo, e1, e2, ) @@ -2242,7 +2248,7 @@ func (d *AuthenticatedGossiper) processNetworkAnnouncement( dbProof.BitcoinSig1Bytes = oppositeProof.BitcoinSignature.ToSignatureBytes() dbProof.BitcoinSig2Bytes = msg.BitcoinSignature.ToSignatureBytes() } - chanAnn, e1Ann, e2Ann, err := CreateChanAnnouncement( + chanAnn, e1Ann, e2Ann, err := netann.CreateChanAnnouncement( &dbProof, chanInfo, e1, e2, ) if err != nil { @@ -2447,42 +2453,23 @@ func (d *AuthenticatedGossiper) updateChannel(info *channeldb.ChannelEdgeInfo, edge *channeldb.ChannelEdgePolicy) (*lnwire.ChannelAnnouncement, *lnwire.ChannelUpdate, error) { - // Make sure timestamp is always increased, such that our update gets - // propagated. - timestamp := time.Now().Unix() - if timestamp <= edge.LastUpdate.Unix() { - timestamp = edge.LastUpdate.Unix() + 1 - } - edge.LastUpdate = time.Unix(timestamp, 0) - - chanUpdate := &lnwire.ChannelUpdate{ - ChainHash: info.ChainHash, - ShortChannelID: lnwire.NewShortChanIDFromInt(edge.ChannelID), - Timestamp: uint32(timestamp), - MessageFlags: edge.MessageFlags, - ChannelFlags: edge.ChannelFlags, - TimeLockDelta: edge.TimeLockDelta, - HtlcMinimumMsat: edge.MinHTLC, - HtlcMaximumMsat: edge.MaxHTLC, - BaseFee: uint32(edge.FeeBaseMSat), - FeeRate: uint32(edge.FeeProportionalMillionths), - ExtraOpaqueData: edge.ExtraOpaqueData, - } + // Parse the unsigned edge into a channel update. + chanUpdate := netann.UnsignedChannelUpdateFromEdge(info, edge) - // With the update applied, we'll generate a new signature over a - // digest of the channel announcement itself. - sig, err := SignAnnouncement(d.cfg.AnnSigner, d.selfKey, chanUpdate) + // We'll generate a new signature over a digest of the channel + // announcement itself and update the timestamp to ensure it propagate. + err := netann.SignChannelUpdate( + d.cfg.AnnSigner, d.selfKey, chanUpdate, + netann.ChanUpdSetTimestamp, + ) if err != nil { return nil, nil, err } // Next, we'll set the new signature in place, and update the reference // in the backing slice. - edge.SetSigBytes(sig.Serialize()) - chanUpdate.Signature, err = lnwire.NewSigFromSignature(sig) - if err != nil { - return nil, nil, err - } + edge.LastUpdate = time.Unix(int64(chanUpdate.Timestamp), 0) + edge.SigBytes = chanUpdate.Signature.ToSignatureBytes() // To ensure that our signature is valid, we'll verify it ourself // before committing it to the slice returned. diff --git a/discovery/gossiper_test.go b/discovery/gossiper_test.go index 4252397c15..75d3cef67e 100644 --- a/discovery/gossiper_test.go +++ b/discovery/gossiper_test.go @@ -23,9 +23,11 @@ import ( "github.com/go-errors/errors" "github.com/lightningnetwork/lnd/chainntnfs" "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/lnpeer" "github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/netann" "github.com/lightningnetwork/lnd/routing" "github.com/lightningnetwork/lnd/routing/route" "github.com/lightningnetwork/lnd/ticker" @@ -95,7 +97,7 @@ type mockSigner struct { } func (n *mockSigner) SignMessage(pubKey *btcec.PublicKey, - msg []byte) (*btcec.Signature, error) { + msg []byte) (input.Signature, error) { if !pubKey.IsEqual(n.privKey.PubKey()) { return nil, fmt.Errorf("unknown public key") @@ -550,7 +552,7 @@ func createNodeAnnouncement(priv *btcec.PrivateKey, } signer := mockSigner{priv} - sig, err := SignAnnouncement(&signer, priv.PubKey(), a) + sig, err := netann.SignAnnouncement(&signer, priv.PubKey(), a) if err != nil { return nil, err } @@ -602,7 +604,7 @@ func createUpdateAnnouncement(blockHeight uint32, func signUpdate(nodeKey *btcec.PrivateKey, a *lnwire.ChannelUpdate) error { pub := nodeKey.PubKey() signer := mockSigner{nodeKey} - sig, err := SignAnnouncement(&signer, pub, a) + sig, err := netann.SignAnnouncement(&signer, pub, a) if err != nil { return err } @@ -644,7 +646,7 @@ func createRemoteChannelAnnouncement(blockHeight uint32, pub := nodeKeyPriv1.PubKey() signer := mockSigner{nodeKeyPriv1} - sig, err := SignAnnouncement(&signer, pub, a) + sig, err := netann.SignAnnouncement(&signer, pub, a) if err != nil { return nil, err } @@ -655,7 +657,7 @@ func createRemoteChannelAnnouncement(blockHeight uint32, pub = nodeKeyPriv2.PubKey() signer = mockSigner{nodeKeyPriv2} - sig, err = SignAnnouncement(&signer, pub, a) + sig, err = netann.SignAnnouncement(&signer, pub, a) if err != nil { return nil, err } @@ -666,7 +668,7 @@ func createRemoteChannelAnnouncement(blockHeight uint32, pub = bitcoinKeyPriv1.PubKey() signer = mockSigner{bitcoinKeyPriv1} - sig, err = SignAnnouncement(&signer, pub, a) + sig, err = netann.SignAnnouncement(&signer, pub, a) if err != nil { return nil, err } @@ -677,7 +679,7 @@ func createRemoteChannelAnnouncement(blockHeight uint32, pub = bitcoinKeyPriv2.PubKey() signer = mockSigner{bitcoinKeyPriv2} - sig, err = SignAnnouncement(&signer, pub, a) + sig, err = netann.SignAnnouncement(&signer, pub, a) if err != nil { return nil, err } diff --git a/discovery/message_store.go b/discovery/message_store.go index e0c10a865a..207f857ff8 100644 --- a/discovery/message_store.go +++ b/discovery/message_store.go @@ -6,8 +6,8 @@ import ( "errors" "fmt" - "github.com/coreos/bbolt" "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/lnwire" ) @@ -68,8 +68,8 @@ var _ GossipMessageStore = (*MessageStore)(nil) // NewMessageStore creates a new message store backed by a channeldb instance. func NewMessageStore(db *channeldb.DB) (*MessageStore, error) { - err := db.Update(func(tx *bbolt.Tx) error { - _, err := tx.CreateBucketIfNotExists(messageStoreBucket) + err := kvdb.Batch(db.Backend, func(tx kvdb.RwTx) error { + _, err := tx.CreateTopLevelBucket(messageStoreBucket) return err }) if err != nil { @@ -124,8 +124,8 @@ func (s *MessageStore) AddMessage(msg lnwire.Message, peerPubKey [33]byte) error return err } - return s.db.Batch(func(tx *bbolt.Tx) error { - messageStore := tx.Bucket(messageStoreBucket) + return kvdb.Batch(s.db.Backend, func(tx kvdb.RwTx) error { + messageStore := tx.ReadWriteBucket(messageStoreBucket) if messageStore == nil { return ErrCorruptedMessageStore } @@ -145,8 +145,8 @@ func (s *MessageStore) DeleteMessage(msg lnwire.Message, return err } - return s.db.Batch(func(tx *bbolt.Tx) error { - messageStore := tx.Bucket(messageStoreBucket) + return kvdb.Batch(s.db.Backend, func(tx kvdb.RwTx) error { + messageStore := tx.ReadWriteBucket(messageStoreBucket) if messageStore == nil { return ErrCorruptedMessageStore } @@ -200,8 +200,8 @@ func readMessage(msgBytes []byte) (lnwire.Message, error) { // all peers. func (s *MessageStore) Messages() (map[[33]byte][]lnwire.Message, error) { msgs := make(map[[33]byte][]lnwire.Message) - err := s.db.View(func(tx *bbolt.Tx) error { - messageStore := tx.Bucket(messageStoreBucket) + err := kvdb.View(s.db, func(tx kvdb.ReadTx) error { + messageStore := tx.ReadBucket(messageStoreBucket) if messageStore == nil { return ErrCorruptedMessageStore } @@ -238,13 +238,13 @@ func (s *MessageStore) MessagesForPeer( peerPubKey [33]byte) ([]lnwire.Message, error) { var msgs []lnwire.Message - err := s.db.View(func(tx *bbolt.Tx) error { - messageStore := tx.Bucket(messageStoreBucket) + err := kvdb.View(s.db, func(tx kvdb.ReadTx) error { + messageStore := tx.ReadBucket(messageStoreBucket) if messageStore == nil { return ErrCorruptedMessageStore } - c := messageStore.Cursor() + c := messageStore.ReadCursor() k, v := c.Seek(peerPubKey[:]) for ; bytes.HasPrefix(k, peerPubKey[:]); k, v = c.Next() { // Deserialize the message from its raw bytes and filter @@ -273,8 +273,8 @@ func (s *MessageStore) MessagesForPeer( // Peers returns the public key of all peers with messages within the store. func (s *MessageStore) Peers() (map[[33]byte]struct{}, error) { peers := make(map[[33]byte]struct{}) - err := s.db.View(func(tx *bbolt.Tx) error { - messageStore := tx.Bucket(messageStoreBucket) + err := kvdb.View(s.db, func(tx kvdb.ReadTx) error { + messageStore := tx.ReadBucket(messageStoreBucket) if messageStore == nil { return ErrCorruptedMessageStore } diff --git a/discovery/message_store_test.go b/discovery/message_store_test.go index a106ad2256..fc7ba3360e 100644 --- a/discovery/message_store_test.go +++ b/discovery/message_store_test.go @@ -9,9 +9,9 @@ import ( "testing" "github.com/btcsuite/btcd/btcec" - "github.com/coreos/bbolt" "github.com/davecgh/go-spew/spew" "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/lnwire" ) @@ -236,8 +236,8 @@ func TestMessageStoreUnsupportedMessage(t *testing.T) { if _, err := lnwire.WriteMessage(&rawMsg, unsupportedMsg, 0); err != nil { t.Fatalf("unable to serialize message: %v", err) } - err = msgStore.db.Update(func(tx *bbolt.Tx) error { - messageStore := tx.Bucket(messageStoreBucket) + err = kvdb.Update(msgStore.db, func(tx kvdb.RwTx) error { + messageStore := tx.ReadWriteBucket(messageStoreBucket) return messageStore.Put(msgKey, rawMsg.Bytes()) }) if err != nil { diff --git a/discovery/mock_test.go b/discovery/mock_test.go index 57fc319a59..714f6b4ac0 100644 --- a/discovery/mock_test.go +++ b/discovery/mock_test.go @@ -45,8 +45,8 @@ func (p *mockPeer) SendMessageLazy(sync bool, msgs ...lnwire.Message) error { func (p *mockPeer) AddNewChannel(_ *channeldb.OpenChannel, _ <-chan struct{}) error { return nil } -func (p *mockPeer) WipeChannel(_ *wire.OutPoint) error { return nil } -func (p *mockPeer) IdentityKey() *btcec.PublicKey { return p.pk } +func (p *mockPeer) WipeChannel(_ *wire.OutPoint) {} +func (p *mockPeer) IdentityKey() *btcec.PublicKey { return p.pk } func (p *mockPeer) PubKey() [33]byte { var pubkey [33]byte copy(pubkey[:], p.pk.SerializeCompressed()) @@ -56,10 +56,10 @@ func (p *mockPeer) Address() net.Addr { return nil } func (p *mockPeer) QuitSignal() <-chan struct{} { return p.quit } -func (p *mockPeer) LocalGlobalFeatures() *lnwire.FeatureVector { +func (p *mockPeer) LocalFeatures() *lnwire.FeatureVector { return nil } -func (p *mockPeer) RemoteGlobalFeatures() *lnwire.FeatureVector { +func (p *mockPeer) RemoteFeatures() *lnwire.FeatureVector { return nil } diff --git a/discovery/sync_manager_test.go b/discovery/sync_manager_test.go index d165447f52..c7a228f8cf 100644 --- a/discovery/sync_manager_test.go +++ b/discovery/sync_manager_test.go @@ -529,12 +529,16 @@ func assertSyncerStatus(t *testing.T, s *GossipSyncer, syncState syncerState, func assertTransitionToChansSynced(t *testing.T, s *GossipSyncer, peer *mockPeer) { t.Helper() - assertMsgSent(t, peer, &lnwire.QueryChannelRange{ + query := &lnwire.QueryChannelRange{ FirstBlockHeight: 0, NumBlocks: math.MaxUint32, - }) + } + assertMsgSent(t, peer, query) - s.ProcessQueryMsg(&lnwire.ReplyChannelRange{Complete: 1}, nil) + s.ProcessQueryMsg(&lnwire.ReplyChannelRange{ + QueryChannelRange: *query, + Complete: 1, + }, nil) chanSeries := s.cfg.channelSeries.(*mockChannelGraphTimeSeries) diff --git a/discovery/syncer.go b/discovery/syncer.go index 65a1b8f380..a154f99008 100644 --- a/discovery/syncer.go +++ b/discovery/syncer.go @@ -298,6 +298,17 @@ type GossipSyncer struct { // received over, these will be read by the replyHandler. queryMsgs chan lnwire.Message + // curQueryRangeMsg keeps track of the latest QueryChannelRange message + // we've sent to a peer to ensure we've consumed all expected replies. + // This field is primarily used within the waitingQueryChanReply state. + curQueryRangeMsg *lnwire.QueryChannelRange + + // prevReplyChannelRange keeps track of the previous ReplyChannelRange + // message we've received from a peer to ensure they've fully replied to + // our query by ensuring they covered our requested block range. This + // field is primarily used within the waitingQueryChanReply state. + prevReplyChannelRange *lnwire.ReplyChannelRange + // bufferedChanRangeReplies is used in the waitingQueryChanReply to // buffer all the chunked response to our query. bufferedChanRangeReplies []lnwire.ShortChannelID @@ -666,10 +677,64 @@ func (g *GossipSyncer) synchronizeChanIDs() (bool, error) { return false, err } +// isLegacyReplyChannelRange determines where a ReplyChannelRange message is +// considered legacy. There was a point where lnd used to include the same query +// over multiple replies, rather than including the portion of the query the +// reply is handling. We'll use this as a way of detecting whether we are +// communicating with a legacy node so we can properly sync with them. +func isLegacyReplyChannelRange(query *lnwire.QueryChannelRange, + reply *lnwire.ReplyChannelRange) bool { + + return reply.QueryChannelRange == *query +} + // processChanRangeReply is called each time the GossipSyncer receives a new // reply to the initial range query to discover new channels that it didn't // previously know of. func (g *GossipSyncer) processChanRangeReply(msg *lnwire.ReplyChannelRange) error { + // If we're not communicating with a legacy node, we'll apply some + // further constraints on their reply to ensure it satisfies our query. + if !isLegacyReplyChannelRange(g.curQueryRangeMsg, msg) { + // The first block should be within our original request. + if msg.FirstBlockHeight < g.curQueryRangeMsg.FirstBlockHeight { + return fmt.Errorf("reply includes channels for height "+ + "%v prior to query %v", msg.FirstBlockHeight, + g.curQueryRangeMsg.FirstBlockHeight) + } + + // The last block should also be. We don't need to check the + // intermediate ones because they should already be in sorted + // order. + replyLastHeight := msg.QueryChannelRange.LastBlockHeight() + queryLastHeight := g.curQueryRangeMsg.LastBlockHeight() + if replyLastHeight > queryLastHeight { + return fmt.Errorf("reply includes channels for height "+ + "%v after query %v", replyLastHeight, + queryLastHeight) + } + + // If we've previously received a reply for this query, look at + // its last block to ensure the current reply properly follows + // it. + if g.prevReplyChannelRange != nil { + prevReply := g.prevReplyChannelRange + prevReplyLastHeight := prevReply.LastBlockHeight() + + // The current reply can either start from the previous + // reply's last block, if there are still more channels + // for the same block, or the block after. + if msg.FirstBlockHeight != prevReplyLastHeight && + msg.FirstBlockHeight != prevReplyLastHeight+1 { + + return fmt.Errorf("first block of reply %v "+ + "does not continue from last block of "+ + "previous %v", msg.FirstBlockHeight, + prevReplyLastHeight) + } + } + } + + g.prevReplyChannelRange = msg g.bufferedChanRangeReplies = append( g.bufferedChanRangeReplies, msg.ShortChanIDs..., ) @@ -679,8 +744,25 @@ func (g *GossipSyncer) processChanRangeReply(msg *lnwire.ReplyChannelRange) erro // If this isn't the last response, then we can exit as we've already // buffered the latest portion of the streaming reply. - if msg.Complete == 0 { - return nil + switch { + // If we're communicating with a legacy node, we'll need to look at the + // complete field. + case isLegacyReplyChannelRange(g.curQueryRangeMsg, msg): + if msg.Complete == 0 { + return nil + } + + // Otherwise, we'll look at the reply's height range. + default: + replyLastHeight := msg.QueryChannelRange.LastBlockHeight() + queryLastHeight := g.curQueryRangeMsg.LastBlockHeight() + + // TODO(wilmer): This might require some padding if the remote + // node is not aware of the last height we sent them, i.e., is + // behind a few blocks from us. + if replyLastHeight < queryLastHeight { + return nil + } } log.Infof("GossipSyncer(%x): filtering through %v chans", @@ -696,8 +778,10 @@ func (g *GossipSyncer) processChanRangeReply(msg *lnwire.ReplyChannelRange) erro } // As we've received the entirety of the reply, we no longer need to - // hold on to the set of buffered replies, so we'll let that be garbage - // collected now. + // hold on to the set of buffered replies or the original query that + // prompted the replies, so we'll let that be garbage collected now. + g.curQueryRangeMsg = nil + g.prevReplyChannelRange = nil g.bufferedChanRangeReplies = nil // If there aren't any channels that we don't know of, then we can @@ -757,11 +841,14 @@ func (g *GossipSyncer) genChanRangeQuery( // Finally, we'll craft the channel range query, using our starting // height, then asking for all known channels to the foreseeable end of // the main chain. - return &lnwire.QueryChannelRange{ + query := &lnwire.QueryChannelRange{ ChainHash: g.cfg.chainHash, FirstBlockHeight: startHeight, NumBlocks: math.MaxUint32 - startHeight, - }, nil + } + g.curQueryRangeMsg = query + + return query, nil } // replyPeerQueries is called in response to any query by the remote peer. @@ -807,6 +894,23 @@ func (g *GossipSyncer) replyPeerQueries(msg lnwire.Message) error { // ensure that our final fragment carries the "complete" bit to indicate the // end of our streaming response. func (g *GossipSyncer) replyChanRangeQuery(query *lnwire.QueryChannelRange) error { + // Before responding, we'll check to ensure that the remote peer is + // querying for the same chain that we're on. If not, we'll send back a + // response with a complete value of zero to indicate we're on a + // different chain. + if g.cfg.chainHash != query.ChainHash { + log.Warnf("Remote peer requested QueryChannelRange for "+ + "chain=%v, we're on chain=%v", query.ChainHash, + g.cfg.chainHash) + + return g.cfg.sendToPeerSync(&lnwire.ReplyChannelRange{ + QueryChannelRange: *query, + Complete: 0, + EncodingType: g.cfg.encodingType, + ShortChanIDs: nil, + }) + } + log.Infof("GossipSyncer(%x): filtering chan range: start_height=%v, "+ "num_blocks=%v", g.cfg.peerPub[:], query.FirstBlockHeight, query.NumBlocks) @@ -814,8 +918,9 @@ func (g *GossipSyncer) replyChanRangeQuery(query *lnwire.QueryChannelRange) erro // Next, we'll consult the time series to obtain the set of known // channel ID's that match their query. startBlock := query.FirstBlockHeight + endBlock := startBlock + query.NumBlocks - 1 channelRange, err := g.cfg.channelSeries.FilterChannelRange( - query.ChainHash, startBlock, startBlock+query.NumBlocks, + query.ChainHash, startBlock, endBlock, ) if err != nil { return err @@ -824,6 +929,14 @@ func (g *GossipSyncer) replyChanRangeQuery(query *lnwire.QueryChannelRange) erro // TODO(roasbeef): means can't send max uint above? // * or make internal 64 + // In the base case (no actual response) the first block and last block + // will match those of the query. In the loop below, we'll update these + // two variables incrementally with each chunk to properly compute the + // starting block for each response and the number of blocks in a + // response. + firstBlockHeight := startBlock + lastBlockHeight := endBlock + numChannels := int32(len(channelRange)) numChansSent := int32(0) for { @@ -854,13 +967,48 @@ func (g *GossipSyncer) replyChanRangeQuery(query *lnwire.QueryChannelRange) erro "size=%v", g.cfg.peerPub[:], len(channelChunk)) } + // If we have any channels at all to return, then we need to + // update our pointers to the first and last blocks for each + // response. + if len(channelChunk) > 0 { + // If this is the first response we'll send, we'll point + // the first block to the first block in the query. + // Otherwise, we'll continue from the block we left off + // at. + if numChansSent == 0 { + firstBlockHeight = startBlock + } else { + firstBlockHeight = lastBlockHeight + } + + // If this is the last response we'll send, we'll point + // the last block to the last block of the query. + // Otherwise, we'll set it to the height of the last + // channel in the chunk. + if isFinalChunk { + lastBlockHeight = endBlock + } else { + lastBlockHeight = channelChunk[len(channelChunk)-1].BlockHeight + } + } + + // The number of blocks contained in this response (the total + // span) is the difference between the last channel ID and the + // first in the range. We add one as even if all channels + // returned are in the same block, we need to count that. + numBlocksInResp := lastBlockHeight - firstBlockHeight + 1 + // With our chunk assembled, we'll now send to the remote peer // the current chunk. replyChunk := lnwire.ReplyChannelRange{ - QueryChannelRange: *query, - Complete: 0, - EncodingType: g.cfg.encodingType, - ShortChanIDs: channelChunk, + QueryChannelRange: lnwire.QueryChannelRange{ + ChainHash: query.ChainHash, + NumBlocks: numBlocksInResp, + FirstBlockHeight: firstBlockHeight, + }, + Complete: 0, + EncodingType: g.cfg.encodingType, + ShortChanIDs: channelChunk, } if isFinalChunk { replyChunk.Complete = 1 @@ -890,8 +1038,8 @@ func (g *GossipSyncer) replyShortChanIDs(query *lnwire.QueryShortChanIDs) error // different chain. if g.cfg.chainHash != query.ChainHash { log.Warnf("Remote peer requested QueryShortChanIDs for "+ - "chain=%v, we're on chain=%v", g.cfg.chainHash, - query.ChainHash) + "chain=%v, we're on chain=%v", query.ChainHash, + g.cfg.chainHash) return g.cfg.sendToPeerSync(&lnwire.ReplyShortChanIDsEnd{ ChainHash: query.ChainHash, diff --git a/discovery/syncer_test.go b/discovery/syncer_test.go index ea99e4ec3b..606fc0629c 100644 --- a/discovery/syncer_test.go +++ b/discovery/syncer_test.go @@ -533,6 +533,61 @@ func TestGossipSyncerApplyGossipFilter(t *testing.T) { } } +// TestGossipSyncerQueryChannelRangeWrongChainHash tests that if we receive a +// channel range query for the wrong chain, then we send back a response with no +// channels and complete=0. +func TestGossipSyncerQueryChannelRangeWrongChainHash(t *testing.T) { + t.Parallel() + + // First, we'll create a GossipSyncer instance with a canned sendToPeer + // message to allow us to intercept their potential sends. + msgChan, syncer, _ := newTestSyncer( + lnwire.NewShortChanIDFromInt(10), defaultEncoding, + defaultChunkSize, + ) + + // We'll now ask the syncer to reply to a channel range query, but for a + // chain that it isn't aware of. + query := &lnwire.QueryChannelRange{ + ChainHash: *chaincfg.SimNetParams.GenesisHash, + FirstBlockHeight: 0, + NumBlocks: math.MaxUint32, + } + err := syncer.replyChanRangeQuery(query) + if err != nil { + t.Fatalf("unable to process short chan ID's: %v", err) + } + + select { + case <-time.After(time.Second * 15): + t.Fatalf("no msgs received") + + case msgs := <-msgChan: + // We should get back exactly one message, that's a + // ReplyChannelRange with a matching query, and a complete value + // of zero. + if len(msgs) != 1 { + t.Fatalf("wrong messages: expected %v, got %v", + 1, len(msgs)) + } + + msg, ok := msgs[0].(*lnwire.ReplyChannelRange) + if !ok { + t.Fatalf("expected lnwire.ReplyChannelRange, got %T", msg) + } + + if msg.QueryChannelRange != *query { + t.Fatalf("wrong query channel range in reply: "+ + "expected: %v\ngot: %v", spew.Sdump(*query), + spew.Sdump(msg.QueryChannelRange)) + } + if msg.Complete != 0 { + t.Fatalf("expected complete set to 0, got %v", + msg.Complete) + } + } +} + // TestGossipSyncerReplyShortChanIDsWrongChainHash tests that if we get a chan // ID query for the wrong chain, then we send back only a short ID end with // complete=0. @@ -709,20 +764,33 @@ func TestGossipSyncerReplyChanRangeQuery(t *testing.T) { // Next, we'll craft a query to ask for all the new chan ID's after // block 100. + const startingBlockHeight = 100 + const numBlocks = 50 + const endingBlockHeight = startingBlockHeight + numBlocks - 1 query := &lnwire.QueryChannelRange{ - FirstBlockHeight: 100, - NumBlocks: 50, + FirstBlockHeight: uint32(startingBlockHeight), + NumBlocks: uint32(numBlocks), } // We'll then launch a goroutine to reply to the query with a set of 5 // responses. This will ensure we get two full chunks, and one partial // chunk. - resp := []lnwire.ShortChannelID{ - lnwire.NewShortChanIDFromInt(1), - lnwire.NewShortChanIDFromInt(2), - lnwire.NewShortChanIDFromInt(3), - lnwire.NewShortChanIDFromInt(4), - lnwire.NewShortChanIDFromInt(5), + queryResp := []lnwire.ShortChannelID{ + { + BlockHeight: uint32(startingBlockHeight), + }, + { + BlockHeight: 102, + }, + { + BlockHeight: 104, + }, + { + BlockHeight: 106, + }, + { + BlockHeight: 108, + }, } errCh := make(chan error, 1) @@ -733,14 +801,17 @@ func TestGossipSyncerReplyChanRangeQuery(t *testing.T) { return case filterReq := <-chanSeries.filterRangeReqs: // We should be querying for block 100 to 150. - if filterReq.startHeight != 100 && filterReq.endHeight != 150 { - errCh <- fmt.Errorf("wrong height range: %v", spew.Sdump(filterReq)) + if filterReq.startHeight != startingBlockHeight && + filterReq.endHeight != endingBlockHeight { + + errCh <- fmt.Errorf("wrong height range: %v", + spew.Sdump(filterReq)) return } // If the proper request was sent, then we'll respond // with our set of short channel ID's. - chanSeries.filterRangeResp <- resp + chanSeries.filterRangeResp <- queryResp errCh <- nil } }() @@ -767,16 +838,55 @@ func TestGossipSyncerReplyChanRangeQuery(t *testing.T) { t.Fatalf("expected ReplyChannelRange instead got %T", msg) } - // If this is not the last chunk, then Complete should - // be set to zero. Otherwise, it should be one. + // We'll determine the correct values of each field in + // each response based on the order that they were sent. + var ( + expectedFirstBlockHeight uint32 + expectedNumBlocks uint32 + expectedComplete uint8 + ) + switch { - case i < 2 && rangeResp.Complete != 0: - t.Fatalf("non-final chunk should have "+ - "Complete=0: %v", spew.Sdump(rangeResp)) + // The first reply should range from our starting block + // height until it reaches its maximum capacity of + // channels. + case i == 0: + expectedFirstBlockHeight = startingBlockHeight + expectedNumBlocks = chunkSize + 1 + + // The last reply should range starting from the next + // block of our previous reply up until the ending + // height of the query. It should also have the Complete + // bit set. + case i == numExpectedChunks-1: + expectedFirstBlockHeight = respMsgs[len(respMsgs)-1].BlockHeight + expectedNumBlocks = endingBlockHeight - expectedFirstBlockHeight + 1 + expectedComplete = 1 + + // Any intermediate replies should range starting from + // the next block of our previous reply up until it + // reaches its maximum capacity of channels. + default: + expectedFirstBlockHeight = respMsgs[len(respMsgs)-1].BlockHeight + expectedNumBlocks = 5 + } - case i == 2 && rangeResp.Complete != 1: - t.Fatalf("final chunk should have "+ - "Complete=1: %v", spew.Sdump(rangeResp)) + switch { + case rangeResp.FirstBlockHeight != expectedFirstBlockHeight: + t.Fatalf("FirstBlockHeight in resp #%d "+ + "incorrect: expected %v, got %v", i+1, + expectedFirstBlockHeight, + rangeResp.FirstBlockHeight) + + case rangeResp.NumBlocks != expectedNumBlocks: + t.Fatalf("NumBlocks in resp #%d incorrect: "+ + "expected %v, got %v", i+1, + expectedNumBlocks, rangeResp.NumBlocks) + + case rangeResp.Complete != expectedComplete: + t.Fatalf("Complete in resp #%d incorrect: "+ + "expected %v, got %v", i+1, + expectedNumBlocks, rangeResp.Complete) } respMsgs = append(respMsgs, rangeResp.ShortChanIDs...) @@ -785,13 +895,13 @@ func TestGossipSyncerReplyChanRangeQuery(t *testing.T) { // We should get back exactly 5 short chan ID's, and they should match // exactly the ID's we sent as a reply. - if len(respMsgs) != len(resp) { + if len(respMsgs) != len(queryResp) { t.Fatalf("expected %v chan ID's, instead got %v", - len(resp), spew.Sdump(respMsgs)) + len(queryResp), spew.Sdump(respMsgs)) } - if !reflect.DeepEqual(resp, respMsgs) { + if !reflect.DeepEqual(queryResp, respMsgs) { t.Fatalf("mismatched response: expected %v, got %v", - spew.Sdump(resp), spew.Sdump(respMsgs)) + spew.Sdump(queryResp), spew.Sdump(respMsgs)) } // Wait for error from goroutine. @@ -934,35 +1044,91 @@ func TestGossipSyncerGenChanRangeQuery(t *testing.T) { } // TestGossipSyncerProcessChanRangeReply tests that we'll properly buffer -// replied channel replies until we have the complete version. If no new -// channels were discovered, then we should go directly to the chanSsSynced -// state. Otherwise, we should go to the queryNewChannels states. +// replied channel replies until we have the complete version. func TestGossipSyncerProcessChanRangeReply(t *testing.T) { t.Parallel() + t.Run("legacy", func(t *testing.T) { + testGossipSyncerProcessChanRangeReply(t, true) + }) + t.Run("block ranges", func(t *testing.T) { + testGossipSyncerProcessChanRangeReply(t, false) + }) +} + +// testGossipSyncerProcessChanRangeReply tests that we'll properly buffer +// replied channel replies until we have the complete version. The legacy +// option, if set, uses the Complete field of the reply to determine when we've +// received all expected replies. Otherwise, it looks at the block ranges of +// each reply instead. +func testGossipSyncerProcessChanRangeReply(t *testing.T, legacy bool) { + t.Parallel() + // First, we'll create a GossipSyncer instance with a canned sendToPeer // message to allow us to intercept their potential sends. + highestID := lnwire.ShortChannelID{ + BlockHeight: latestKnownHeight, + } _, syncer, chanSeries := newTestSyncer( - lnwire.NewShortChanIDFromInt(10), defaultEncoding, defaultChunkSize, + highestID, defaultEncoding, defaultChunkSize, ) startingState := syncer.state + query, err := syncer.genChanRangeQuery(true) + if err != nil { + t.Fatalf("unable to generate channel range query: %v", err) + } + + var replyQueries []*lnwire.QueryChannelRange + if legacy { + // Each reply query is the same as the original query in the + // legacy mode. + replyQueries = []*lnwire.QueryChannelRange{query, query, query} + } else { + // When interpreting block ranges, the first reply should start + // from our requested first block, and the last should end at + // our requested last block. + replyQueries = []*lnwire.QueryChannelRange{ + { + FirstBlockHeight: 0, + NumBlocks: 11, + }, + { + FirstBlockHeight: 11, + NumBlocks: 1, + }, + { + FirstBlockHeight: 12, + NumBlocks: query.NumBlocks - 12, + }, + } + } + replies := []*lnwire.ReplyChannelRange{ { + QueryChannelRange: *replyQueries[0], ShortChanIDs: []lnwire.ShortChannelID{ - lnwire.NewShortChanIDFromInt(10), + { + BlockHeight: 10, + }, }, }, { + QueryChannelRange: *replyQueries[1], ShortChanIDs: []lnwire.ShortChannelID{ - lnwire.NewShortChanIDFromInt(11), + { + BlockHeight: 11, + }, }, }, { - Complete: 1, + QueryChannelRange: *replyQueries[2], + Complete: 1, ShortChanIDs: []lnwire.ShortChannelID{ - lnwire.NewShortChanIDFromInt(12), + { + BlockHeight: 12, + }, }, }, } @@ -983,9 +1149,15 @@ func TestGossipSyncerProcessChanRangeReply(t *testing.T) { } expectedReq := []lnwire.ShortChannelID{ - lnwire.NewShortChanIDFromInt(10), - lnwire.NewShortChanIDFromInt(11), - lnwire.NewShortChanIDFromInt(12), + { + BlockHeight: 10, + }, + { + BlockHeight: 11, + }, + { + BlockHeight: 12, + }, } // As we're about to send the final response, we'll launch a goroutine @@ -1036,48 +1208,6 @@ func TestGossipSyncerProcessChanRangeReply(t *testing.T) { t.Fatal(err) } } - - // We'll repeat our final reply again, but this time we won't send any - // new channels. As a result, we should transition over to the - // chansSynced state. - errCh = make(chan error, 1) - go func() { - select { - case <-time.After(time.Second * 15): - errCh <- errors.New("no query received") - return - case req := <-chanSeries.filterReq: - // We should get a request for the entire range of short - // chan ID's. - if !reflect.DeepEqual(expectedReq[2], req[0]) { - errCh <- fmt.Errorf("wrong request: expected %v, got %v", - expectedReq[2], req[0]) - return - } - - // We'll send back only the last two to simulate filtering. - chanSeries.filterResp <- []lnwire.ShortChannelID{} - errCh <- nil - } - }() - if err := syncer.processChanRangeReply(replies[2]); err != nil { - t.Fatalf("unable to process reply: %v", err) - } - - if syncer.syncState() != chansSynced { - t.Fatalf("wrong state: expected %v instead got %v", - chansSynced, syncer.state) - } - - // Wait for error from goroutine. - select { - case <-time.After(time.Second * 30): - t.Fatalf("goroutine did not return within 30 seconds") - case err := <-errCh: - if err != nil { - t.Fatal(err) - } - } } // TestGossipSyncerSynchronizeChanIDs tests that we properly request chunks of @@ -1213,17 +1343,17 @@ func TestGossipSyncerDelayDOS(t *testing.T) { // First, we'll create two GossipSyncer instances with a canned // sendToPeer message to allow us to intercept their potential sends. - startHeight := lnwire.ShortChannelID{ + highestID := lnwire.ShortChannelID{ BlockHeight: 1144, } msgChan1, syncer1, chanSeries1 := newTestSyncer( - startHeight, defaultEncoding, chunkSize, true, false, + highestID, defaultEncoding, chunkSize, true, false, ) syncer1.Start() defer syncer1.Stop() msgChan2, syncer2, chanSeries2 := newTestSyncer( - startHeight, defaultEncoding, chunkSize, false, true, + highestID, defaultEncoding, chunkSize, false, true, ) syncer2.Start() defer syncer2.Stop() @@ -1253,9 +1383,10 @@ func TestGossipSyncerDelayDOS(t *testing.T) { // inherently disjoint. var syncer2Chans []lnwire.ShortChannelID for i := 0; i < numTotalChans; i++ { - syncer2Chans = append( - syncer2Chans, lnwire.NewShortChanIDFromInt(uint64(i)), - ) + syncer2Chans = append(syncer2Chans, lnwire.ShortChannelID{ + BlockHeight: highestID.BlockHeight - 1, + TxIndex: uint32(i), + }) } // We'll kick off the test by asserting syncer1 sends over the @@ -1483,17 +1614,17 @@ func TestGossipSyncerRoutineSync(t *testing.T) { // First, we'll create two GossipSyncer instances with a canned // sendToPeer message to allow us to intercept their potential sends. - startHeight := lnwire.ShortChannelID{ + highestID := lnwire.ShortChannelID{ BlockHeight: 1144, } msgChan1, syncer1, chanSeries1 := newTestSyncer( - startHeight, defaultEncoding, chunkSize, true, false, + highestID, defaultEncoding, chunkSize, true, false, ) syncer1.Start() defer syncer1.Stop() msgChan2, syncer2, chanSeries2 := newTestSyncer( - startHeight, defaultEncoding, chunkSize, false, true, + highestID, defaultEncoding, chunkSize, false, true, ) syncer2.Start() defer syncer2.Stop() @@ -1501,9 +1632,9 @@ func TestGossipSyncerRoutineSync(t *testing.T) { // Although both nodes are at the same height, syncer will have 3 chan // ID's that syncer1 doesn't know of. syncer2Chans := []lnwire.ShortChannelID{ - lnwire.NewShortChanIDFromInt(4), - lnwire.NewShortChanIDFromInt(5), - lnwire.NewShortChanIDFromInt(6), + {BlockHeight: highestID.BlockHeight - 3}, + {BlockHeight: highestID.BlockHeight - 2}, + {BlockHeight: highestID.BlockHeight - 1}, } // We'll kick off the test by passing over the QueryChannelRange @@ -1627,35 +1758,34 @@ func TestGossipSyncerAlreadySynced(t *testing.T) { // our chunk parsing works properly. With this value we should get 3 // queries: two full chunks, and one lingering chunk. const chunkSize = 2 + const numChans = 3 // First, we'll create two GossipSyncer instances with a canned // sendToPeer message to allow us to intercept their potential sends. - startHeight := lnwire.ShortChannelID{ + highestID := lnwire.ShortChannelID{ BlockHeight: 1144, } msgChan1, syncer1, chanSeries1 := newTestSyncer( - startHeight, defaultEncoding, chunkSize, + highestID, defaultEncoding, chunkSize, ) syncer1.Start() defer syncer1.Stop() msgChan2, syncer2, chanSeries2 := newTestSyncer( - startHeight, defaultEncoding, chunkSize, + highestID, defaultEncoding, chunkSize, ) syncer2.Start() defer syncer2.Stop() // The channel state of both syncers will be identical. They should // recognize this, and skip the sync phase below. - syncer1Chans := []lnwire.ShortChannelID{ - lnwire.NewShortChanIDFromInt(1), - lnwire.NewShortChanIDFromInt(2), - lnwire.NewShortChanIDFromInt(3), - } - syncer2Chans := []lnwire.ShortChannelID{ - lnwire.NewShortChanIDFromInt(1), - lnwire.NewShortChanIDFromInt(2), - lnwire.NewShortChanIDFromInt(3), + var syncer1Chans, syncer2Chans []lnwire.ShortChannelID + for i := numChans; i > 0; i-- { + shortChanID := lnwire.ShortChannelID{ + BlockHeight: highestID.BlockHeight - uint32(i), + } + syncer1Chans = append(syncer1Chans, shortChanID) + syncer2Chans = append(syncer2Chans, shortChanID) } // We'll now kick off the test by allowing both side to send their diff --git a/discovery/utils.go b/discovery/utils.go deleted file mode 100644 index ce211493e1..0000000000 --- a/discovery/utils.go +++ /dev/null @@ -1,154 +0,0 @@ -package discovery - -import ( - "github.com/btcsuite/btcd/btcec" - "github.com/go-errors/errors" - "github.com/lightningnetwork/lnd/channeldb" - "github.com/lightningnetwork/lnd/lnwallet" - "github.com/lightningnetwork/lnd/lnwire" -) - -// CreateChanAnnouncement is a helper function which creates all channel -// announcements given the necessary channel related database items. This -// function is used to transform out database structs into the corresponding wire -// structs for announcing new channels to other peers, or simply syncing up a -// peer's initial routing table upon connect. -func CreateChanAnnouncement(chanProof *channeldb.ChannelAuthProof, - chanInfo *channeldb.ChannelEdgeInfo, - e1, e2 *channeldb.ChannelEdgePolicy) (*lnwire.ChannelAnnouncement, - *lnwire.ChannelUpdate, *lnwire.ChannelUpdate, error) { - - // First, using the parameters of the channel, along with the channel - // authentication chanProof, we'll create re-create the original - // authenticated channel announcement. - chanID := lnwire.NewShortChanIDFromInt(chanInfo.ChannelID) - chanAnn := &lnwire.ChannelAnnouncement{ - ShortChannelID: chanID, - NodeID1: chanInfo.NodeKey1Bytes, - NodeID2: chanInfo.NodeKey2Bytes, - ChainHash: chanInfo.ChainHash, - BitcoinKey1: chanInfo.BitcoinKey1Bytes, - BitcoinKey2: chanInfo.BitcoinKey2Bytes, - Features: lnwire.NewRawFeatureVector(), - ExtraOpaqueData: chanInfo.ExtraOpaqueData, - } - - var err error - chanAnn.BitcoinSig1, err = lnwire.NewSigFromRawSignature( - chanProof.BitcoinSig1Bytes, - ) - if err != nil { - return nil, nil, nil, err - } - chanAnn.BitcoinSig2, err = lnwire.NewSigFromRawSignature( - chanProof.BitcoinSig2Bytes, - ) - if err != nil { - return nil, nil, nil, err - } - chanAnn.NodeSig1, err = lnwire.NewSigFromRawSignature( - chanProof.NodeSig1Bytes, - ) - if err != nil { - return nil, nil, nil, err - } - chanAnn.NodeSig2, err = lnwire.NewSigFromRawSignature( - chanProof.NodeSig2Bytes, - ) - if err != nil { - return nil, nil, nil, err - } - - // We'll unconditionally queue the channel's existence chanProof as it - // will need to be processed before either of the channel update - // networkMsgs. - - // Since it's up to a node's policy as to whether they advertise the - // edge in a direction, we don't create an advertisement if the edge is - // nil. - var edge1Ann, edge2Ann *lnwire.ChannelUpdate - if e1 != nil { - edge1Ann = &lnwire.ChannelUpdate{ - ChainHash: chanInfo.ChainHash, - ShortChannelID: chanID, - Timestamp: uint32(e1.LastUpdate.Unix()), - MessageFlags: e1.MessageFlags, - ChannelFlags: e1.ChannelFlags, - TimeLockDelta: e1.TimeLockDelta, - HtlcMinimumMsat: e1.MinHTLC, - HtlcMaximumMsat: e1.MaxHTLC, - BaseFee: uint32(e1.FeeBaseMSat), - FeeRate: uint32(e1.FeeProportionalMillionths), - ExtraOpaqueData: e1.ExtraOpaqueData, - } - edge1Ann.Signature, err = lnwire.NewSigFromRawSignature(e1.SigBytes) - if err != nil { - return nil, nil, nil, err - } - } - if e2 != nil { - edge2Ann = &lnwire.ChannelUpdate{ - ChainHash: chanInfo.ChainHash, - ShortChannelID: chanID, - Timestamp: uint32(e2.LastUpdate.Unix()), - MessageFlags: e2.MessageFlags, - ChannelFlags: e2.ChannelFlags, - TimeLockDelta: e2.TimeLockDelta, - HtlcMinimumMsat: e2.MinHTLC, - HtlcMaximumMsat: e2.MaxHTLC, - BaseFee: uint32(e2.FeeBaseMSat), - FeeRate: uint32(e2.FeeProportionalMillionths), - ExtraOpaqueData: e2.ExtraOpaqueData, - } - edge2Ann.Signature, err = lnwire.NewSigFromRawSignature(e2.SigBytes) - if err != nil { - return nil, nil, nil, err - } - } - - return chanAnn, edge1Ann, edge2Ann, nil -} - -// SignAnnouncement is a helper function which is used to sign any outgoing -// channel node node announcement messages. -func SignAnnouncement(signer lnwallet.MessageSigner, pubKey *btcec.PublicKey, - msg lnwire.Message) (*btcec.Signature, error) { - - var ( - data []byte - err error - ) - - switch m := msg.(type) { - case *lnwire.ChannelAnnouncement: - data, err = m.DataToSign() - case *lnwire.ChannelUpdate: - data, err = m.DataToSign() - case *lnwire.NodeAnnouncement: - data, err = m.DataToSign() - default: - return nil, errors.New("can't sign message " + - "of this format") - } - if err != nil { - return nil, errors.Errorf("unable to get data to sign: %v", err) - } - - return signer.SignMessage(pubKey, data) -} - -// remotePubFromChanInfo returns the public key of the remote peer given a -// ChannelEdgeInfo that describe a channel we have with them. -func remotePubFromChanInfo(chanInfo *channeldb.ChannelEdgeInfo, - chanFlags lnwire.ChanUpdateChanFlags) [33]byte { - - var remotePubKey [33]byte - switch { - case chanFlags&lnwire.ChanUpdateDirection == 0: - remotePubKey = chanInfo.NodeKey2Bytes - case chanFlags&lnwire.ChanUpdateDirection == 1: - remotePubKey = chanInfo.NodeKey1Bytes - } - - return remotePubKey -} diff --git a/docker/README.md b/docker/README.md index 474188c503..b2b736d89c 100644 --- a/docker/README.md +++ b/docker/README.md @@ -23,7 +23,7 @@ environment for testing as one doesn't need to wait tens of minutes for blocks to arrive in order to test channel related functionality. Additionally, it's possible to spin up an arbitrary number of `lnd` instances within containers to create a mini development cluster. All state is saved between instances using a -shared value. +shared volume. Current workflow is big because we recreate the whole network by ourselves, next versions will use the started `btcd` bitcoin node in `testnet` and @@ -63,8 +63,12 @@ bitcoin into. # Init bitcoin network env variable: $ export NETWORK="simnet" +# Create persistent volumes for alice and bob. +$ docker volume create simnet_lnd_alice +$ docker volume create simnet_lnd_bob + # Run the "Alice" container and log into it: -$ docker-compose run -d --name alice lnd_btc +$ docker-compose run -d --name alice --volume simnet_lnd_alice:/root/.lnd lnd $ docker exec -i -t alice bash # Generate a new backward compatible nested p2sh address for Alice: @@ -75,10 +79,10 @@ $ MINING_ADDRESS= docker-compose up -d btcd # Generate 400 blocks (we need at least "100 >=" blocks because of coinbase # block maturity and "300 ~=" in order to activate segwit): -$ docker-compose run btcctl generate 400 +$ docker exec -it btcd /start-btcctl.sh generate 400 # Check that segwit is active: -$ docker-compose run btcctl getblockchaininfo | grep -A 1 segwit +$ docker exec -it btcd /start-btcctl.sh getblockchaininfo | grep -A 1 segwit ``` Check `Alice` balance: @@ -90,7 +94,7 @@ Connect `Bob` node to `Alice` node. ```bash # Run "Bob" node and log into it: -$ docker-compose run -d --name bob lnd_btc +$ docker-compose run -d --name bob --volume simnet_lnd_bob:/root/.lnd lnd $ docker exec -i -t bob bash # Get the identity pubkey of "Bob" node: @@ -159,7 +163,7 @@ Create the `Alice<->Bob` channel. alice$ lncli --network=simnet openchannel --node_key= --local_amt=1000000 # Include funding transaction in block thereby opening the channel: -$ docker-compose run btcctl generate 3 +$ docker exec -it btcd /start-btcctl.sh generate 3 # Check that channel with "Bob" was opened: alice$ lncli --network=simnet listchannels @@ -243,7 +247,7 @@ alice$ lncli --network=simnet listchannels alice$ lncli --network=simnet closechannel --funding_txid= --output_index= # Include close transaction in a block thereby closing the channel: -$ docker-compose run btcctl generate 3 +$ docker exec -it btcd /start-btcctl.sh generate 3 # Check "Alice" on-chain balance was credited by her settled amount in the channel: alice$ lncli --network=simnet walletbalance @@ -295,10 +299,7 @@ First of all you need to run `btcd` node in `testnet` and wait for it to be synced with test network (`May the Force and Patience be with you`). ```bash # Init bitcoin network env variable: -$ export NETWORK="testnet" - -# Run "btcd" node: -$ docker-compose up -d "btcd" +$ NETWORK="testnet" docker-compose up ``` After `btcd` synced, connect `Alice` to the `Faucet` node. diff --git a/docker/btcd/Dockerfile b/docker/btcd/Dockerfile index 94aac048c9..5de4a389b8 100644 --- a/docker/btcd/Dockerfile +++ b/docker/btcd/Dockerfile @@ -1,14 +1,18 @@ FROM golang:1.12-alpine as builder -MAINTAINER Olaoluwa Osuntokun +LABEL maintainer="Olaoluwa Osuntokun " # Install build dependencies such as git and glide. RUN apk add --no-cache git gcc musl-dev WORKDIR $GOPATH/src/github.com/btcsuite/btcd +# Pin down btcd to a version that we know works with lnd. +ARG BTCD_VERSION=v0.20.1-beta + # Grab and install the latest version of of btcd and all related dependencies. RUN git clone https://github.com/btcsuite/btcd.git . \ + && git checkout $BTCD_VERSION \ && GO111MODULE=on go install -v . ./cmd/... # Start a new image diff --git a/docker/btcd/start-btcctl.sh b/docker/btcd/start-btcctl.sh index 7ff1aefb6f..8bd5fda0ad 100755 --- a/docker/btcd/start-btcctl.sh +++ b/docker/btcd/start-btcctl.sh @@ -45,14 +45,14 @@ NETWORK=$(set_default "$NETWORK" "simnet") PARAMS="" if [ "$NETWORK" != "mainnet" ]; then - PARAMS=$(echo --$NETWORK) + PARAMS="--$NETWORK" fi PARAMS=$(echo $PARAMS \ "--rpccert=/rpc/rpc.cert" \ "--rpcuser=$RPCUSER" \ "--rpcpass=$RPCPASS" \ - "--rpcserver=rpcserver" \ + "--rpcserver=localhost" \ ) PARAMS="$PARAMS $@" diff --git a/docker/btcd/start-btcd.sh b/docker/btcd/start-btcd.sh index 9ef605ae2d..4f5f7ba367 100755 --- a/docker/btcd/start-btcd.sh +++ b/docker/btcd/start-btcd.sh @@ -46,7 +46,7 @@ NETWORK=$(set_default "$NETWORK" "simnet") PARAMS="" if [ "$NETWORK" != "mainnet" ]; then - PARAMS=$(echo --$NETWORK) + PARAMS="--$NETWORK" fi PARAMS=$(echo $PARAMS \ @@ -72,4 +72,3 @@ PARAMS="$PARAMS $@" # Print command and start bitcoin node. echo "Command: btcd $PARAMS" exec btcd $PARAMS - diff --git a/docker/docker-compose.ltc.yml b/docker/docker-compose.ltc.yml new file mode 100644 index 0000000000..c3bbd3dcad --- /dev/null +++ b/docker/docker-compose.ltc.yml @@ -0,0 +1,55 @@ +version: '2' +services: + # ltc is an image of litecoin node which used as base image for ltcd and + # ltcctl. The environment variables default values determined on stage of + # container start within starting script. + ltcd: + image: ltcd + container_name: ltcd + build: + context: ltcd/ + volumes: + - shared:/rpc + - litecoin:/data + environment: + - RPCUSER + - RPCPASS + - NETWORK + - DEBUG + - MINING_ADDRESS + entrypoint: ["./start-ltcd.sh"] + + lnd: + image: lnd + container_name: lnd_ltc + build: + context: ../ + dockerfile: docker/lnd/Dockerfile + environment: + - RPCUSER + - RPCPASS + - NETWORK + - CHAIN + - DEBUG + volumes: + - shared:/rpc + - lnd_ltc:/root/.lnd + entrypoint: ["./start-lnd.sh"] + links: + - "ltcd:blockchain" + +volumes: + # shared volume is need to store the btcd rpc certificates and use it within + # ltcctl and lnd containers. + shared: + driver: local + + # litecoin volume is needed for maintaining blockchain persistence + # during ltcd container recreation. + litecoin: + driver: local + + # lnd volume is used for persisting lnd application data and chain state + # during container lifecycle. + lnd_ltc: + driver: local diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index fc0971b191..f13a400f05 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -1,95 +1,42 @@ version: '2' services: - # btc is an image of bitcoin node which used as base image for btcd and # btccli. The environment variables default values determined on stage of # container start within starting script. - btc: + btcd: image: btcd + container_name: btcd build: context: btcd/ volumes: - - shared:/rpc - - bitcoin:/data + - shared:/rpc + - bitcoin:/data environment: - RPCUSER - RPCPASS - NETWORK + - DEBUG + - MINING_ADDRESS + entrypoint: ["./start-btcd.sh"] - btcd: - extends: btc - container_name: btcd - environment: - - DEBUG - - MINING_ADDRESS - - NETWORK - entrypoint: ["./start-btcd.sh"] - - btcctl: - extends: btc - container_name: btcctl - links: - - "btcd:rpcserver" - entrypoint: ["./start-btcctl.sh"] - - - # ltc is an image of litecoin node which used as base image for ltcd and - # ltcctl. The environment variables default values determined on stage of - # container start within starting script. - ltc: - image: ltcd + lnd: + image: lnd + container_name: lnd build: - context: ltcd/ - volumes: - - shared:/rpc - - litecoin:/data + context: ../ + dockerfile: docker/lnd/Dockerfile environment: - RPCUSER - RPCPASS - NETWORK - - ltcd: - extends: ltc - container_name: ltcd - environment: - - DEBUG - - MINING_ADDRESS - - NETWORK - entrypoint: ["./start-ltcd.sh"] - - ltcctl: - extends: ltc - container_name: ltcctl - links: - - "ltcd:rpcserver" - entrypoint: ["./start-ltcctl.sh"] - - lnd: - image: lnd - build: - context: ../ - dockerfile: docker/lnd/Dockerfile - environment: - - RPCUSER - - RPCPASS - - NETWORK - - CHAIN - - DEBUG - volumes: - - shared:/rpc - entrypoint: ["./start-lnd.sh"] - - lnd_ltc: - extends: lnd - container_name: lnd_ltc - links: - - "ltcd:blockchain" - - lnd_btc: - extends: lnd - container_name: lnd_btc + - CHAIN + - DEBUG + volumes: + - shared:/rpc + - lnd:/root/.lnd + entrypoint: ["./start-lnd.sh"] links: - - "btcd:blockchain" + - "btcd:blockchain" volumes: # shared volume is need to store the btcd rpc certificates and use it within @@ -102,7 +49,7 @@ volumes: bitcoin: driver: local - # litecoin volume is needed for maintaining blockchain persistence - # during ltcd container recreation. - litecoin: + # lnd volume is used for persisting lnd application data and chain state + # during container lifecycle. + lnd: driver: local diff --git a/docker/lnd/Dockerfile b/docker/lnd/Dockerfile index 2d5c4ef0df..db55ce1272 100644 --- a/docker/lnd/Dockerfile +++ b/docker/lnd/Dockerfile @@ -1,9 +1,6 @@ FROM golang:1.13-alpine as builder -MAINTAINER Olaoluwa Osuntokun - -# Copy in the local repository to build from. -COPY . /go/src/github.com/lightningnetwork/lnd +LABEL maintainer="Olaoluwa Osuntokun " # Force Go to use the cgo based DNS resolver. This is required to ensure DNS # queries required to connect to linked containers succeed. @@ -12,10 +9,14 @@ ENV GODEBUG netdns=cgo # Install dependencies and install/build lnd. RUN apk add --no-cache --update alpine-sdk \ git \ - make \ -&& cd /go/src/github.com/lightningnetwork/lnd \ + make + +# Copy in the local repository to build from. +COPY . /go/src/github.com/lightningnetwork/lnd + +RUN cd /go/src/github.com/lightningnetwork/lnd \ && make \ -&& make install tags="signrpc walletrpc chainrpc invoicesrpc routerrpc" +&& make install tags="signrpc walletrpc chainrpc invoicesrpc" # Start a new, final image to reduce size. FROM alpine as final diff --git a/docker/lnd/start-lnd.sh b/docker/lnd/start-lnd.sh index d65c145dcc..1fb8502ff5 100755 --- a/docker/lnd/start-lnd.sh +++ b/docker/lnd/start-lnd.sh @@ -51,7 +51,6 @@ fi exec lnd \ --noseedbackup \ - --logdir="/data" \ "--$CHAIN.active" \ "--$CHAIN.$NETWORK" \ "--$CHAIN.node"="btcd" \ diff --git a/docker/ltcd/Dockerfile b/docker/ltcd/Dockerfile index 2746d04284..e82ee9d0ee 100644 --- a/docker/ltcd/Dockerfile +++ b/docker/ltcd/Dockerfile @@ -1,6 +1,6 @@ FROM golang:1.12-alpine as builder -MAINTAINER Olaoluwa Osuntokun +LABEL maintainer="Olaoluwa Osuntokun " # Grab and install the latest version of roasbeef's fork of ltcd and all # related dependencies. @@ -14,17 +14,14 @@ RUN GO111MODULE=on go install . ./cmd/ltcctl ./cmd/gencerts FROM alpine as final # Expose mainnet ports (server, rpc) -EXPOSE 8333 8334 +EXPOSE 9333 9334 # Expose testnet ports (server, rpc) -EXPOSE 18333 18334 +EXPOSE 19334 19335 # Expose simnet ports (server, rpc) EXPOSE 18555 18556 -# Expose segnet ports (server, rpc) -EXPOSE 28901 28902 - # Copy the compiled binaries from the builder image. COPY --from=builder /go/bin/ltcctl /bin/ COPY --from=builder /go/bin/ltcd /bin/ diff --git a/docker/ltcd/start-ltcctl.sh b/docker/ltcd/start-ltcctl.sh index ec82ea4cf4..2888ab9c05 100755 --- a/docker/ltcd/start-ltcctl.sh +++ b/docker/ltcd/start-ltcctl.sh @@ -43,10 +43,17 @@ RPCUSER=$(set_default "$RPCUSER" "devuser") RPCPASS=$(set_default "$RPCPASS" "devpass") NETWORK=$(set_default "$NETWORK" "simnet") -exec ltcctl \ - "--$NETWORK" \ - --rpccert="/rpc/rpc.cert" \ - --rpcuser="$RPCUSER" \ - --rpcpass="$RPCPASS" \ - --rpcserver="rpcserver" \ - "$@" +PARAMS="" +if [ "$NETWORK" != "mainnet" ]; then + PARAMS="--$NETWORK" +fi + +PARAMS=$(echo $PARAMS \ + "--rpccert=/rpc/rpc.cert" \ + "--rpcuser=$RPCUSER" \ + "--rpcpass=$RPCPASS" \ + "--rpcserver=localhost" \ +) + +PARAMS="$PARAMS $@" +exec ltcctl $PARAMS diff --git a/docker/ltcd/start-ltcd.sh b/docker/ltcd/start-ltcd.sh index 9bdbe6d0dc..b6c6d6990e 100755 --- a/docker/ltcd/start-ltcd.sh +++ b/docker/ltcd/start-ltcd.sh @@ -44,8 +44,12 @@ RPCPASS=$(set_default "$RPCPASS" "devpass") DEBUG=$(set_default "$DEBUG" "info") NETWORK=$(set_default "$NETWORK" "simnet") -PARAMS=$(echo \ - "--$NETWORK" \ +PARAMS="" +if [ "$NETWORK" != "mainnet" ]; then + PARAMS="--$NETWORK" +fi + +PARAMS=$(echo $PARAMS \ "--debuglevel=$DEBUG" \ "--rpcuser=$RPCUSER" \ "--rpcpass=$RPCPASS" \ @@ -68,4 +72,3 @@ PARAMS="$PARAMS $@" # Print command and start bitcoin node. echo "Command: ltcd $PARAMS" exec ltcd $PARAMS - diff --git a/docs/DOCKER.md b/docs/DOCKER.md index 50f81a52ec..2916dde9c6 100644 --- a/docs/DOCKER.md +++ b/docs/DOCKER.md @@ -65,3 +65,30 @@ $ docker logs lnd-testnet This is a simple example, it is possible to use any command-line options necessary to expose RPC ports, use `btcd` or `bitcoind`, or add additional chains. + +## LND Development and Testing + +To test the Docker production image locally, run the following from +the project root: + +``` +$ docker build . -t lnd:master +``` + +To choose a specific branch or tag instead, use the "checkout" build-arg. For example, to build the latest commits in master: + +``` +$ docker build . --build-arg checkout=v0.8.0-beta -t lnd:v0.8.0-beta +``` + +To build the image using the most current tag: + +``` +$ docker build . --build-arg checkout=$(git describe --tags `git rev-list --tags --max-count=1`) -t lnd:latest-tag +``` + +Once the image has been built and tagged locally, start the container: + +``` +docker run --name=lnd-testnet -it lnd:1.0 --bitcoin.active --bitcoin.testnet --bitcoin.node=neutrino --neutrino.connect=faucet.lightning.community +``` diff --git a/docs/INSTALL.md b/docs/INSTALL.md index 7a37aca4be..93746a9a5e 100644 --- a/docs/INSTALL.md +++ b/docs/INSTALL.md @@ -158,6 +158,9 @@ To check that `lnd` was installed properly run the following command: make check ``` +This command requires `groestlcoind` (almost any version should do) to be available +in the system's `$PATH` variable. Otherwise some of the tests will fail. + # Available Backend Operating Modes In order to run, `lnd` requires, that the user specify a chain backend. At the @@ -204,6 +207,7 @@ groestlcoind: --groestlcoind.rpcpass= Password for RPC connections --groestlcoind.zmqpubrawblock= The address listening for ZMQ connections to deliver raw block notifications --groestlcoind.zmqpubrawtx= The address listening for ZMQ connections to deliver raw transaction notifications + --groestlcoind.estimatemode= The fee estimate mode. Must be either "ECONOMICAL" or "CONSERVATIVE". (default: CONSERVATIVE) ``` ## Using grsd @@ -350,6 +354,9 @@ lnd --groestlcoin.testnet --debuglevel=debug --groestlcoin.node=groestlcoind --g the default `groestlcoind` settings, having more than one instance of `lnd`, or `lnd` plus any application that consumes the RPC could cause `lnd` to miss crucial updates from the backend. +- The default fee estimate mode in `groestlcoind` is CONSERVATIVE. You can set + `groestlcoind.estimatemode=ECONOMICAL` to change it into ECONOMICAL. Futhermore, + if you start `groestlcoind` in `regtest`, this configuration won't take any effect. # Creating a wallet diff --git a/docs/MAKEFILE.md b/docs/MAKEFILE.md index 3dc23f1c4d..558eb85095 100644 --- a/docs/MAKEFILE.md +++ b/docs/MAKEFILE.md @@ -9,6 +9,10 @@ make check make install ``` +The command `make check` requires `bitcoind` (almost any version should do) to +be available in the system's `$PATH` variable. Otherwise some of the tests will +fail. + Developers ========== diff --git a/docs/code_contribution_guidelines.md b/docs/code_contribution_guidelines.md index 6336729242..f59aafa754 100644 --- a/docs/code_contribution_guidelines.md +++ b/docs/code_contribution_guidelines.md @@ -424,6 +424,7 @@ statements and select statements. If one is forced to wrap lines of function arguments that exceed the 80 character limit, then a new line should be inserted before the first stanza in the comment body. + **WRONG** ```go func foo(a, b, c, diff --git a/docs/configuring_tor.md b/docs/configuring_tor.md index 672d024ff3..affd010f83 100644 --- a/docs/configuring_tor.md +++ b/docs/configuring_tor.md @@ -2,7 +2,8 @@ 1. [Overview](#overview) 2. [Getting Started](#getting-started) 3. [Tor Stream Isolation](#tor-stream-isolation) -4. [Listening for Inbound Connections](#listening-for-inbound-connections) +4. [Authentication](#authentication) +5. [Listening for Inbound Connections](#listening-for-inbound-connections) ## Overview @@ -78,6 +79,8 @@ Tor: --tor.dns= The DNS server as host:port that Tor will use for SRV queries - NOTE must have TCP resolution enabled (default: soa.nodes.lightning.directory:53) --tor.streamisolation Enable Tor stream isolation by randomizing user credentials for each connection. --tor.control= The host:port that Tor is listening on for Tor control connections (default: localhost:9051) + --tor.targetipaddress= IP address that Tor should use as the target of the hidden service + --tor.password= The password used to arrive at the HashedControlPassword for the control port. If provided, the HASHEDPASSWORD authentication method will be used instead of the SAFECOOKIE one. --tor.v2 Automatically set up a v2 onion service to listen for inbound connections --tor.v3 Automatically set up a v3 onion service to listen for inbound connections --tor.privatekeypath= The path to the private key of the onion service being created @@ -133,6 +136,26 @@ specification of an additional argument: ⛰ ./lnd --tor.active --tor.streamisolation ``` +## Authentication + +In order for `lnd` to communicate with the Tor daemon securely, it must first +establish an authenticated connection. `lnd` supports the following Tor control +authentication methods (arguably, from most to least secure): + +* `SAFECOOKIE`: This authentication method relies on a cookie created and + stored by the Tor daemon and is the default assuming the Tor daemon supports + it by specifying `CookieAuthentication 1` in its configuration file. +* `HASHEDPASSWORD`: This authentication method is stateless as it relies on a + password hash scheme and may be useful if the Tor daemon is operating under a + separate host from the `lnd` node. The password hash can be obtained through + the Tor daemon with `tor --hash-password PASSWORD`, which should then be + specified in Tor's configuration file with `HashedControlPassword + PASSWORD_HASH`. Finally, to use it within `lnd`, the `--tor.password` flag + should be provided with the corresponding password. +* `NULL`: To bypass any authentication at all, this scheme can be used instead. + It doesn't require any additional flags to `lnd` or configuration options to + the Tor daemon. + ## Listening for Inbound Connections In order to listen for inbound connections through Tor, an onion service must be diff --git a/docs/fuzz.md b/docs/fuzz.md new file mode 100644 index 0000000000..1763446288 --- /dev/null +++ b/docs/fuzz.md @@ -0,0 +1,54 @@ +# Fuzzing LND # + +The `fuzz` package is organized into subpackages which are named after the `lnd` package they test. Each subpackage has its own set of fuzz targets. + +### Setup and Installation ### +This section will cover setup and installation of `go-fuzz` and fuzzing binaries. + +* First, we must get `go-fuzz`. +``` +$ go get -u github.com/dvyukov/go-fuzz/... +``` +* The following is a command to build all fuzzing harnesses for a specific package. +``` +$ cd fuzz/ +$ find * -maxdepth 1 -regex '[A-Za-z0-9\-_.]'* -not -name fuzz_utils.go | sed 's/\.go$//1' | xargs -I % sh -c 'go-fuzz-build -func Fuzz_% -o -%-fuzz.zip github.com/lightningnetwork/lnd/fuzz/' +``` + +* This may take a while since this will create zip files associated with each fuzzing target. + +* Now, run `go-fuzz` with `workdir` set as below! +``` +$ go-fuzz -bin=<.zip archive here> -workdir= -procs= +``` + +`go-fuzz` will print out log lines every couple of seconds. Example output: +``` +2017/09/19 17:44:23 workers: 8, corpus: 23 (3s ago), crashers: 1, restarts: 1/748, execs: 400690 (16694/sec), cover: 394, uptime: 24s +``` +Corpus is the number of items in the corpus. `go-fuzz` may add valid inputs to +the corpus in an attempt to gain more coverage. Crashers is the number of inputs +resulting in a crash. The inputs, and their outputs are logged in: +`fuzz///crashers`. `go-fuzz` also creates a `suppressions` directory +of stacktraces to ignore so that it doesn't create duplicate stacktraces. +Cover is a number representing edge coverage of the program being fuzzed. + +### Brontide ### +The brontide fuzzers need to be run with a `-timeout` flag of 20 seconds or greater since there is a lot of machine state that must be printed on panic. + +### Corpus ### +Fuzzing generally works best with a corpus that is of minimal size while achieving the maximum coverage. However, `go-fuzz` automatically minimizes the corpus in-memory before fuzzing so a large corpus shouldn't make a difference - edge coverage is all that really matters. + +### Test Harness ### +If you take a look at the test harnesses that are used, you will see that they all consist of one function: +``` +func Fuzz(data []byte) int +``` +If: + +- `-1` is returned, the fuzzing input is ignored +- `0` is returned, `go-fuzz` will add the input to the corpus and deprioritize it in future mutations. +- `1` is returned, `go-fuzz` will add the input to the corpus and prioritize it in future mutations. + +### Conclusion ### +Citizens, do your part and `go-fuzz` `lnd` today! diff --git a/docs/go-fuzz/README.md b/docs/go-fuzz/README.md deleted file mode 100644 index 38f9e80670..0000000000 --- a/docs/go-fuzz/README.md +++ /dev/null @@ -1,106 +0,0 @@ -# How to fuzz the Lightning Network Daemon's wire protocol using go-fuzz # - -This document will describe how to use the fuzz-testing library `go-fuzz` on -the `lnd` wire protocol. - -### Introduction ### - -Lnd uses its own wire protocol to send and receive messages of all types. There -are 22 different message types, each with their own specific format. If a -message is not in the correct format, `lnd` should logically reject the message -and throw an error. But what if it doesn't? What if we could sneakily craft a -custom message that could pass all the necessary checks and cause an error to -go undetected? Chaos would ensue. However, crafting such a message would require -an in-depth understanding of the many different cogs that make the wire protocol -tick. - -A better solution is fuzz-testing. Fuzz-testing or fuzzing is when a program -known as a fuzzer generates many, many inputs to a function or program in an -attempt to cause it to crash. Fuzzing is surprisingly effective at finding bugs -and a particular fuzzing program `AFL` is well-known for the amount of bugs it -has found with its learned approach. The library we will be using, `go-fuzz`, is -based on `AFL` and has quite a track record of finding bugs in a diverse set of -go programs. `go-fuzz` takes a coverage-guided approach in an attempt to cover -as many code paths as possible on an attack surface. We give `go-fuzz` real, -valid inputs and it will essentially change bits until it achieves a crash! -After reading this document, you too may be able to find errors in `lnd` with -`go-fuzz`! - -### Setup and Installation ### -This section will cover setup and installation of `go-fuzz`. - -* First, we must get `go-fuzz`: -``` -$ go get github.com/dvyukov/go-fuzz/go-fuzz -$ go get github.com/dvyukov/go-fuzz/go-fuzz-build -``` -* Next, create a folder in the `lnwire` package. You can name it whatever. -``` -$ mkdir lnwire/ -``` -* Unzip `corpus.tar.gz` in the `docs/go-fuzz` folder and move it to the folder you just made. -``` -$ tar -xzf docs/go-fuzz/corpus.tar.gz -$ mv corpus lnwire/ -``` -* Now, move `wirefuzz.go` to the same folder you just created. -``` -$ mv docs/go-fuzz/wirefuzz.go lnwire/ -``` -* Change the package name in `wirefuzz.go` from `wirefuzz` to ``. -* Build the test program - this produces a `-fuzz.zip` (archive) file. -``` -$ go-fuzz-build github.com/lightningnetwork/lnd/lnwire/ -``` -* Now, run `go-fuzz`!!! -``` -$ go-fuzz -bin=<.zip archive here> -workdir=lnwire/ -``` - -`go-fuzz` will print out log lines every couple of seconds. Example output: -``` -2017/09/19 17:44:23 slaves: 8, corpus: 23 (3s ago), crashers: 1, restarts: 1/748, execs: 400690 (16694/sec), cover: 394, uptime: 24s -``` -Corpus is the number of items in the corpus. `go-fuzz` may add valid inputs to -the corpus in an attempt to gain more coverage. Crashers is the number of inputs -resulting in a crash. The inputs, and their outputs are logged in: -`/crashers`. `go-fuzz` also creates a `suppressions` directory -of stacktraces to ignore so that it doesn't create duplicate stacktraces. -Cover is a number representing coverage of the program being fuzzed. When I ran -this earlier, `go-fuzz` found two bugs ([#310](https://github.com/lightningnetwork/lnd/pull/310) and [#312](https://github.com/lightningnetwork/lnd/pull/312)) within minutes! - -### Corpus Notes ### -You may wonder how I made the corpus that you unzipped in the previous step. -It's quite simple really. For every message type that `lnwire_test.go` -processed in `TestLightningWireProtocol`, I logged it (in `[]byte` format) to -a .txt file. Within minutes, I had a corpus of valid `lnwire` messages that -I could use with `go-fuzz`! `go-fuzz` will alter these valid messages to create -the sneakily crafted message that I described in the introduction that manages -to bypass validation checks and crash the program. I ran `go-fuzz` for several -hours on the corpus I generated and found two bugs. I believe I have exhausted -the current corpus, but there are still perhaps possible malicious inputs that -`go-fuzz` has not yet reached and could reach with a slightly different generated -corpus. - -### Test Harness ### -If you take a look at the test harness that I used, `wirefuzz.go`, you will see -that it consists of one function: `func Fuzz(data []byte) int`. `go-fuzz` requires -that each input in the corpus is in `[]byte` format. The test harness is also -quite simple. It reads in `[]byte` messages into `lnwire.Message` objects, -serializes them into a buffer, deserializes them back into `lnwire.Message` objects -and asserts their equality. If the pre-serialization and post-deserialization -`lnwire.Message` objects are not equal, the wire protocol has encountered a bug. -Wherever a `0` is returned, `go-fuzz` will ignore that input as it has reached -an unimportant code path caused by the parser catching the error. If a `1` is -returned, the `[]byte` input was parsed successfully and the two `lnwire.Message` -objects were indeed equal. This `[]byte` input is then added to the corpus as -a valid message. If a `panic` is reached, serialization or deserialization failed -and `go-fuzz` may have found a bug. - -### Conclusion ### -Fuzzing is a powerful and quick way to find bugs in programs that works especially -well with protocols where there is a strict format with validation rules. Fuzzing -is important as an automated security tool and can find real bugs in real-world -software. The fuzzing of `lnd` is by no means complete and there exist probably -many more bugs in the software that may `go` undetected if left unfuzzed. Citizens, -do your part and `go-fuzz` `lnd` today! diff --git a/docs/go-fuzz/corpus.tar.gz b/docs/go-fuzz/corpus.tar.gz deleted file mode 100644 index da6a0ea683..0000000000 Binary files a/docs/go-fuzz/corpus.tar.gz and /dev/null differ diff --git a/docs/go-fuzz/wirefuzz.go b/docs/go-fuzz/wirefuzz.go deleted file mode 100644 index 59f496f294..0000000000 --- a/docs/go-fuzz/wirefuzz.go +++ /dev/null @@ -1,54 +0,0 @@ -package wirefuzz - -import ( - "bytes" - "fmt" - "reflect" - - "github.com/lightningnetwork/lnd/lnwire" -) - -// Fuzz is used by go-fuzz to fuzz for potentially malicious input -func Fuzz(data []byte) int { - // Because go-fuzz requires this function signature with a []byte parameter, - // and we want to emulate the behavior of mainScenario in lnwire_test.go, - // we first parse the []byte parameter into a Message type. - - // Parsing []byte into Message - r := bytes.NewReader(data) - msg, err := lnwire.ReadMessage(r, 0) - if err != nil { - // Ignore this input - go-fuzz generated []byte that cannot be represented as Message - return 0 - } - - // We will serialize Message into a new bytes buffer - var b bytes.Buffer - if _, err := lnwire.WriteMessage(&b, msg, 0); err != nil { - // Could not serialize Message into bytes buffer, panic - panic(err) - } - - // Make sure serialized bytes buffer (excluding 2 bytes for message type - // is less than max payload size for this specific message,. - payloadLen := uint32(b.Len()) - 2 - if payloadLen > msg.MaxPayloadLength(0) { - // Ignore this input - max payload constraint violated - return 0 - } - - // Deserialize the message from the serialized bytes buffer and - // assert that the original message is equal to the newly deserialized message. - newMsg, err := lnwire.ReadMessage(&b, 0) - if err != nil { - // Could not deserialize message from bytes buffer, panic - panic(err) - } - if !reflect.DeepEqual(msg, newMsg) { - // Deserialized message and original message are not deeply equal - panic(fmt.Errorf("Deserialized message and original message " + - "are not deeply equal.")) - } - - return 1 -} diff --git a/docs/macaroons.md b/docs/macaroons.md index caa9c1bffe..33692c94b8 100644 --- a/docs/macaroons.md +++ b/docs/macaroons.md @@ -119,6 +119,11 @@ A very simple example using `curl` may look something like this: Have a look at the [Java GRPC example](/docs/grpc/java.md) for programmatic usage details. +## Creating macaroons with custom permissions + +The macaroon bakery is described in more detail in the +[README in the macaroons package](../macaroons/README.md). + ## Future improvements to the `lnd` macaroon implementation The existing macaroon implementation in `lnd` and `lncli` lays the groundwork @@ -131,8 +136,6 @@ such as: * Root key rotation and possibly macaroon invalidation/rotation -* Tools to allow you to easily delegate macaroons in more flexible ways - * Additional restrictions, such as limiting payments to use (or not use) specific routes, channels, nodes, etc. diff --git a/docs/psbt.md b/docs/psbt.md new file mode 100644 index 0000000000..17a44ca851 --- /dev/null +++ b/docs/psbt.md @@ -0,0 +1,233 @@ +# PSBT + +This document describes various use cases around the topic of Partially Signed +Bitcoin Transactions (PSBTs). Currently only channel funding is possible with +PSBTs but more features are planned. + +See [BIP174](https://github.com/bitcoin/bips/blob/master/bip-0174.mediawiki) for +a full description of the PSBT format and the different _roles_ that a +participant in a PSBT can have. + +## Opening a channel by using a PSBT + +This is a step-by-step guide on how to open a channel with `lnd` by using a PSBT +as the funding transaction. +We will use `bitcoind` to create and sign the transaction just to keep the +example simple. Of course any other PSBT compatible wallet could be used and the +process would likely be spread out over multiple signing steps. The goal of this +example is not to cover each and every possible edge case but to help users of +`lnd` understand what inputs the `lncli` utility expects. + +The goal is to open a channel of 1'234'567 satoshis to the node +`03db1e56e5f76bc4018cf6f03d1bb98a7ae96e3f18535e929034f85e7f1ca2b8ac` by using +a PSBT. That means, `lnd` can have a wallet balance of `0` and is still able to +open a channel. We'll jump into an example right away. + +The new funding flow has a small caveat: _Time matters_. + +When opening a channel using the PSBT flow, we start the negotiation +with the remote peer immediately so we can obtain their multisig key they are +going to use for the channel. Then we pause the whole process until we get a +fully signed transaction back from the user. Unfortunately there is no reliable +way to know after how much time the remote node starts to clean up and "forgets" +about the pending channel. If the remote node is an `lnd` node, we know it's +after 10 minutes. **So as long as the whole process takes less than 10 minutes, +everything should work fine.** + +### Safety warning + +**DO NOT PUBLISH** the finished transaction by yourself or with another tool. +lnd MUST publish it in the proper funding flow order **OR THE FUNDS CAN BE +LOST**! + +This is very important to remember when using wallets like `Wasabi` for +instance, where the "publish" button is very easy to hit by accident. + +### 1. Use the new `--psbt` flag in `lncli openchannel` + +The new `--psbt` flag in the `openchannel` command starts an interactive dialog +between `lncli` and the user. Below the command you see an example output from +a regtest setup. Of course all values will be different. + +```bash +$ lncli openchannel --node_key 03db1e56e5f76bc4018cf6f03d1bb98a7ae96e3f18535e929034f85e7f1ca2b8ac --local_amt 1234567 --psbt + +Starting PSBT funding flow with pending channel ID fc7853889a04d33b8115bd79ebc99c5eea80d894a0bead40fae5a06bcbdccd3d. +PSBT funding initiated with peer 03db1e56e5f76bc4018cf6f03d1bb98a7ae96e3f18535e929034f85e7f1ca2b8ac. +Please create a PSBT that sends 0.01234567 BTC (1234567 satoshi) to the funding address bcrt1qh33ghvgjj3ef625nl9jxz6nnrz2z9e65vsdey7w5msrklgr6rc0sv0s08q. + +Example with bitcoind: + bitcoin-cli walletcreatefundedpsbt [] '[{"bcrt1qh33ghvgjj3ef625nl9jxz6nnrz2z9e65vsdey7w5msrklgr6rc0sv0s08q":0.01234567}]' + +Or if you are using a wallet that can fund a PSBT directly (currently not +possible with bitcoind), you can use this PSBT that contains the same address +and amount: cHNidP8BADUCAAAAAAGH1hIAAAAAACIAILxii7ESlHKdKpP5ZGFqcxiUIudUZBuSedTcB2+geh4fAAAAAAAA + +Paste the funded PSBT here to continue the funding flow. +Base64 encoded PSBT: +``` + +The command line now waits until a PSBT is entered. We'll create one in the next +step. Make sure to use a new shell window/tab for the next commands and leave +the prompt from the `openchannel` running as is. + +### 2. Use `bitcoind` to create a funding transaction + +The output of the last command already gave us an example command to use with +`bitcoind`. We'll go ahead and execute it now. The meaning of this command is +something like "bitcoind, give me a PSBT that sends the given amount to the +given address, choose any input you see fit": + +```bash +$ bitcoin-cli walletcreatefundedpsbt [] '[{"bcrt1qh33ghvgjj3ef625nl9jxz6nnrz2z9e65vsdey7w5msrklgr6rc0sv0s08q":0.01234567}]' + +{ + "psbt": "cHNidP8BAH0CAAAAAbxLLf9+AYfqfF69QAQuETnL6cas7GDiWBZF+3xxc/Y/AAAAAAD+////AofWEgAAAAAAIgAgvGKLsRKUcp0qk/lkYWpzGJQi51RkG5J51NwHb6B6Hh+1If0jAQAAABYAFL+6THEGhybJnOkFGSRFbtCcPOG8AAAAAAABAR8wBBAkAQAAABYAFHemJ11XF7CU7WXBIJLD/qZF+6jrAAAA", + "fee": 0.00003060, + "changepos": 1 +} +``` + +We see that `bitcoind` has given us a transaction that would pay `3060` satoshi +in fees. Fee estimation/calculation can be changed with parameters of the +`walletcreatefundedpsbt` command. To see all options, use +`bitcoin-cli help walletcreatefundedpsbt`. + +If we want to know what exactly is in this PSBT, we can look at it with the +`decodepsbt` command: + +```bash +$ bitcoin-cli decodepsbt cHNidP8BAH0CAAAAAbxLLf9+AYfqfF69QAQuETnL6cas7GDiWBZF+3xxc/Y/AAAAAAD+////AofWEgAAAAAAIgAgvGKLsRKUcp0qk/lkYWpzGJQi51RkG5J51NwHb6B6Hh+1If0jAQAAABYAFL+6THEGhybJnOkFGSRFbtCcPOG8AAAAAAABAR8wBBAkAQAAABYAFHemJ11XF7CU7WXBIJLD/qZF+6jrAAAA + +{ + "tx": { + "txid": "374504e4246a93a45b4a2c2bc31d8adc8525aa101c7b9065db6dc01c4bdfce0a", + "hash": "374504e4246a93a45b4a2c2bc31d8adc8525aa101c7b9065db6dc01c4bdfce0a", + "version": 2, + "size": 125, + "vsize": 125, + "weight": 500, + "locktime": 0, + "vin": [ + { + "txid": "3ff673717cfb451658e260ecacc6e9cb39112e0440bd5e7cea87017eff2d4bbc", + "vout": 0, + "scriptSig": { + "asm": "", + "hex": "" + }, + "sequence": 4294967294 + } + ], + "vout": [ + { + "value": 0.01234567, + "n": 0, + "scriptPubKey": { + "asm": "0 bc628bb11294729d2a93f964616a73189422e754641b9279d4dc076fa07a1e1f", + "hex": "0020bc628bb11294729d2a93f964616a73189422e754641b9279d4dc076fa07a1e1f", + "reqSigs": 1, + "type": "witness_v0_scripthash", + "addresses": [ + "bcrt1qh33ghvgjj3ef625nl9jxz6nnrz2z9e65vsdey7w5msrklgr6rc0sv0s08q" + ] + } + }, + { + "value": 48.98759093, + "n": 1, + "scriptPubKey": { + "asm": "0 bfba4c71068726c99ce9051924456ed09c3ce1bc", + "hex": "0014bfba4c71068726c99ce9051924456ed09c3ce1bc", + "reqSigs": 1, + "type": "witness_v0_keyhash", + "addresses": [ + "bcrt1qh7aycugxsunvn88fq5vjg3tw6zwrecduvvgre5" + ] + } + } + ] + }, + "unknown": { + }, + "inputs": [ + { + "witness_utxo": { + "amount": 48.99996720, + "scriptPubKey": { + "asm": "0 77a6275d5717b094ed65c12092c3fea645fba8eb", + "hex": "001477a6275d5717b094ed65c12092c3fea645fba8eb", + "type": "witness_v0_keyhash", + "address": "bcrt1qw7nzwh2hz7cffmt9cysf9sl75ezlh28tzl4n4e" + } + } + } + ], + "outputs": [ + { + }, + { + } + ], + "fee": 0.00003060 +} +``` + +This tells us that we got a PSBT with a big input, the channel output and a +change output for the rest. Everything is there but the signatures/witness data, +which is exactly what we need. + +### 3. Verify and sign the PSBT + +Now that we have a valid PSBT that has everything but the final +signatures/witness data, we can paste it into the prompt in `lncli` that is +still waiting for our input. + +```bash +... +Base64 encoded PSBT: cHNidP8BAH0CAAAAAbxLLf9+AYfqfF69QAQuETnL6cas7GDiWBZF+3xxc/Y/AAAAAAD+////AofWEgAAAAAAIgAgvGKLsRKUcp0qk/lkYWpzGJQi51RkG5J51NwHb6B6Hh+1If0jAQAAABYAFL+6THEGhybJnOkFGSRFbtCcPOG8AAAAAAABAR8wBBAkAQAAABYAFHemJ11XF7CU7WXBIJLD/qZF+6jrAAAA + +PSBT verified by lnd, please continue the funding flow by signing the PSBT by +all required parties/devices. Once the transaction is fully signed, paste it +again here. + +Base64 encoded PSBT: +``` + +We can now go ahead and sign the transaction. We are going to use `bitcoind` for +this again, but in practice this would now happen on a hardware wallet and +perhaps `bitcoind` would only know the public keys and couldn't sign for the +transaction itself. Again, this is only an example and can't reflect all +real-world use cases. + +```bash +$ bitcoin-cli walletprocesspsbt cHNidP8BAH0CAAAAAbxLLf9+AYfqfF69QAQuETnL6cas7GDiWBZF+3xxc/Y/AAAAAAD+////AofWEgAAAAAAIgAgvGKLsRKUcp0qk/lkYWpzGJQi51RkG5J51NwHb6B6Hh+1If0jAQAAABYAFL+6THEGhybJnOkFGSRFbtCcPOG8AAAAAAABAR8wBBAkAQAAABYAFHemJ11XF7CU7WXBIJLD/qZF+6jrAAAA + +{ +"psbt": "cHNidP8BAH0CAAAAAbxLLf9+AYfqfF69QAQuETnL6cas7GDiWBZF+3xxc/Y/AAAAAAD+////AofWEgAAAAAAIgAgvGKLsRKUcp0qk/lkYWpzGJQi51RkG5J51NwHb6B6Hh+1If0jAQAAABYAFL+6THEGhybJnOkFGSRFbtCcPOG8AAAAAAABAR8wBBAkAQAAABYAFHemJ11XF7CU7WXBIJLD/qZF+6jrAQhrAkcwRAIgHKQbenZYvgADRd9TKGVO36NnaIgW3S12OUg8XGtSrE8CICmeaYoJ/U7Ecm+/GneY8i2hu2QCaQnuomJgzn+JAnrDASEDUBmCLcsybA5qXSRBBdZ0Uk/FQiay9NgOpv4D26yeJpAAAAA=", +"complete": true +} +``` + +Interpreting the output, we now have a complete, final, and signed transaction +inside the PSBT. + +**!!! WARNING !!!** + +**DO NOT PUBLISH** the finished transaction by yourself or with another tool. +lnd MUST publish it in the proper funding flow order **OR THE FUNDS CAN BE +LOST**! + +Let's give it to `lncli` to continue: + +```bash +... +Base64 encoded PSBT: cHNidP8BAH0CAAAAAbxLLf9+AYfqfF69QAQuETnL6cas7GDiWBZF+3xxc/Y/AAAAAAD+////AofWEgAAAAAAIgAgvGKLsRKUcp0qk/lkYWpzGJQi51RkG5J51NwHb6B6Hh+1If0jAQAAABYAFL+6THEGhybJnOkFGSRFbtCcPOG8AAAAAAABAR8wBBAkAQAAABYAFHemJ11XF7CU7WXBIJLD/qZF+6jrAQhrAkcwRAIgHKQbenZYvgADRd9TKGVO36NnaIgW3S12OUg8XGtSrE8CICmeaYoJ/U7Ecm+/GneY8i2hu2QCaQnuomJgzn+JAnrDASEDUBmCLcsybA5qXSRBBdZ0Uk/FQiay9NgOpv4D26yeJpAAAAA= +{ + "funding_txid": "374504e4246a93a45b4a2c2bc31d8adc8525aa101c7b9065db6dc01c4bdfce0a" +} +``` + +Success! We now have the final transaction ID of the published funding +transaction. Now we only have to wait for some confirmations, then we can start +using the freshly created channel. diff --git a/docs/safety.md b/docs/safety.md new file mode 100644 index 0000000000..6f3d74cc83 --- /dev/null +++ b/docs/safety.md @@ -0,0 +1,438 @@ +# lnd Operational Safety Guidelines + +## Table of Contents + +* [Overview](#overview) + - [aezeed](#aezeed) + - [Wallet password](#wallet-password) + - [TLS](#tls) + - [Macaroons](#macaroons) + - [Static Channel Backups (SCBs)](#static-channel-backups-scbs) + - [Static remote keys](#static-remote-keys) +* [Best practices](#best-practices) + - [aezeed storage](#aezeed-storage) + - [File based backups](#file-based-backups) + - [Keeping Static Channel Backups (SCBs) safe](#keeping-static-channel-backups-scb-safe) + - [Keep `lnd` updated](#keep-lnd-updated) + - [Zombie channels](#zombie-channels) + - [Migrating a node to a new device](#migrating-a-node-to-a-new-device) + - [Migrating a node from clearnet to Tor](#migrating-a-node-from-clearnet-to-tor) + - [Prevent data corruption](#prevent-data-corruption) + - [Don't interrupt `lncli` commands](#dont-interrupt-lncli-commands) + - [Regular accounting/monitoring](#regular-accountingmonitoring) + - [Pruned bitcoind node](#pruned-bitcoind-node) + - [The `--noseedbackup` flag](#the---noseedbackup-flag) + +## Overview + +This chapter describes the security/safety mechanisms that are implemented in +`lnd`. We encourage every person that is planning on putting mainnet funds into +a Lightning Network channel using `lnd` to read this guide carefully. +As of this writing, `lnd` is still in beta and it is considered `#reckless` to +put any life altering amounts of BTC into the network. +That said, we constantly put in a lot of effort to make `lnd` safer to use and +more secure. We will update this documentation with each safety mechanism that +we implement. + +The first part of this document describes the security elements that are used in +`lnd` and how they work on a high level. +The second part is a list of best practices that has crystallized from bug +reports, developer recommendations and experiences from a lot of individuals +running mainnet `lnd` nodes during the last 18 months and counting. + +### aezeed + +This is what all the on-chain private keys are derived from. `aezeed` is similar +to BIP39 as it uses the same word list to encode the seed as a mnemonic phrase. +But this is where the similarities end, because `aezeed` is _not_ compatible +with BIP39. The 24 words of `aezeed` encode a 128 bit entropy (the seed itself), +a wallet birthday (days since BTC genesis block) and a version. +This data is _encrypted_ with a password using the AEZ cipher suite (hence the +name). Encrypting the content instead of using the password to derive the HD +extended root key has the advantage that the password can actually be checked +for correctness and can also be changed without affecting any of the derived +keys. +A BIP for the `aezeed` scheme is being written and should be published soon. + +Important to know: +* As with any bitcoin seed phrase, never reveal this to any person and store + the 24 words (and the password) in a safe place. +* You should never run two different `lnd` nodes with the same seed! Even if + they aren't running at the same time. This will lead to strange/unpredictable + behavior or even loss of funds. To migrate an `lnd` node to a new device, + please see the [node migration section](#migrating-a-node-to-a-new-device). +* For more technical information [see the aezeed README](../aezeed/README.md). + +### Wallet password + +The wallet password is one of the first things that has to be entered if a new +wallet is created using `lnd`. It is completely independent from the `aezeed` +cipher seed passphrase (which is optional). The wallet password is used to +encrypt the sensitive parts of `lnd`'s databases, currently some parts of +`wallet.db` and `macaroons.db`. Loss of this password does not necessarily +mean loss of funds, as long as the `aezeed` passphrase is still available. +But the node will need to be restored using the +[SCB restore procedure](recovery.md). + +### TLS + +By default the two API connections `lnd` offers (gRPC on port 10009 and REST on +port 8080) use TLS with a self-signed certificate for transport level security. +Specifying the certificate on the client side (for example `lncli`) is only a +protection against man-in-the-middle attacks and does not provide any +authentication. In fact, `lnd` will never even see the certificate that is +supplied to `lncli` with the `--tlscertpath` argument. `lncli` only uses that +certificate to verify it is talking to the correct gRPC server. +If the key/certificate pair (`tls.cert` and `tls.key` in the main `lnd` data +directory) is missing on startup, a new self-signed key/certificate pair is +generated. Clients connecting to `lnd` then have to use the new certificate +to verify they are talking to the correct server. + +### Macaroons + +Macaroons are used as the main authentication method in `lnd`. A macaroon is a +cryptographically verifiable token, comparable to a [JWT](https://jwt.io/) +or other form of API access token. In `lnd` this token consists of a _list of +permissions_ (what operations does the user of the token have access to) and a +set of _restrictions_ (e.g. token expiration timestamp, IP address restriction). +`lnd` does not keep track of the individual macaroons issued, only the key that +was used to create (and later verify) them. That means, individual tokens cannot +currently be invalidated, only all of them at once. +See the [high-level macaroons documentation](macaroons.md) or the [technical +README](../macaroons/README.md) for more information. + +Important to know: +* Deleting the `*.macaroon` files in the `/data/chain/bitcoin/mainnet/` + folder will trigger `lnd` to recreate the default macaroons. But this does + **NOT** invalidate clients that use an old macaroon. To make sure all + previously generated macaroons are invalidated, the `macaroons.db` has to be + deleted as well as all `*.macaroon`. + +### Static Channel Backups (SCBs) + +A Static Channel Backup is a piece of data that contains all _static_ +information about a channel, like funding transaction, capacity, key derivation +paths, remote node public key, remote node last known network addresses and +some static settings like CSV timeout and min HTLC setting. +Such a backup can either be obtained as a file containing entries for multiple +channels or by calling RPC methods to get individual (or all) channel data. +See the section on [keeping SCBs safe](#keeping-static-channel-backups-scb-safe) +for more information. + +What the SCB does **not** contain is the current channel balance (or the +associated commitment transaction). So how can a channel be restored using +SCBs? +That's the important part: _A channel cannot be restored using SCBs_, but the +funds that are in the channel can be claimed. The restore procedure relies on +the Data Loss Prevention (DLP) protocol which works by connecting to the remote +node and asking them to **force close** the channel and hand over the needed +information to sweep the on-chain funds that belong to the local node. +Because of this, [restoring a node from SCB](recovery.md) should be seen as an +emergency measure as all channels will be closed and on-chain fees incur to the +party that opened the channel initially. +To migrate an existing, working node to a new device, SCBs are _not_ the way to +do it. See the section about +[migrating a node](#migrating-a-node-to-a-new-device) on how to do it correctly. + +Important to know: +* [Restoring a node from SCB](recovery.md) will force-close all channels + contained in that file. +* Restoring a node from SCB relies on the remote node of each channel to be + online and respond to the DLP protocol. That's why it's important to + [get rid of zombie channels](#zombie-channels) because they cannot be + recovered using SCBs. +* The SCB data is encrypted with a key from the seed the node was created with. + A node can therefore only be restored from SCB if the seed is also known. + +### Static remote keys + +Since version `v0.8.0-beta`, `lnd` supports the `option_static_remote_key` (also +known as "safu commitments"). All new channels will be opened with this option +enabled by default, if the other node also supports it. +In essence, this change makes it possible for a node to sweep their channel +funds if the remote node force-closes, without any further communication between +the nodes. Previous to this change, your node needed to get a random channel +secret (called the `per_commit_point`) from the remote node even if they +force-closed the channel, which could make recovery very difficult. + +## Best practices + +### aezeed storage + +When creating a new wallet, `lnd` will print out 24 words to write down, which +is the wallet's seed (in the [aezeed](#aezeed) format). That seed is optionally +encrypted with a passphrase, also called the _cipher seed passphrase_. +It is absolutely important to write both the seed and, if set, the password down +and store it in a safe place as **there is no way of exporting the seed from an +lnd wallet**. When creating the wallet, after printing the seed to the command +line, it is hashed and only the hash (or to be more exact, the BIP32 extended +root key) is stored in the `wallet.db` file. +There is +[a tool being worked on](https://github.com/lightningnetwork/lnd/pull/2373) +that can extract the BIP32 extended root key but currently you cannot restore +lnd with only this root key. + +Important to know: +* Setting a password/passphrase for the aezeed is meant to protect it from + an attacker that finds the paper/storage device. Writing down the password + alongside the 24 seed words does not enhance the security in any way. + Therefore the password should be stored in a separate place. + +### File based backups + +There is a lot of confusion and also some myths about how to best backup the +off-chain funds of an `lnd` node. Making a mistake here is also still the single +biggest risk of losing off-chain funds, even though we do everything to mitigate +those risks. + +**What files can/should I regularly backup?** +The single most important file that needs to be backed up whenever it changes +is the `/data/chain/bitcoin/mainnet/channel.backup` file which holds +the Static Channel Backups (SCBs). This file is only updated every time `lnd` +starts, a channel is opened or a channel is closed. + +Most consumer Lightning wallet apps upload the file to the cloud automatically. + +See the [SCB chapter](#static-channel-backups-scbs) for more +information on how to use the file to restore channels. + +**What files should never be backed up to avoid problems?** +This is a bit of a trick question, as making the backup is not the problem. +Restoring/using an old version of a specific file called +`/data/graph/mainnet/channel.db` is what is very risky and should +_never_ be done! +This requires some explanation: +The way LN channels are currently set up (until `eltoo` is implemented) is that +both parties agree on a current balance. To make sure none of the two peers in +a channel ever try to publish an old state of that balance, they both hand over +their keys to the other peer that gives them the means to take _all_ funds (not +just their agreed upon part) from a channel, if an _old_ state is ever +published. Therefore, having an old state of a channel basically means +forfeiting the balance to the other party. + +As payments in `lnd` can be made multiple times a second, it's very hard to +make a backup of the channel database every time it is updated. And even if it +can be technically done, the confidence that a particular state is certainly the +most up-to-date can never be very high. That's why the focus should be on +[making sure the channel database is not corrupted](#prevent-data-corruption), +[closing out the zombie channels](#zombie-channels) and keeping your SCBs safe. + +### Keeping Static Channel Backups (SCB) safe + +As mentioned in the previous chapter, there is a file where `lnd` stores and +updates a backup of all channels whenever the node is restarted, a new channel +is opened or a channel is closed: +`/data/chain/bitcoin/mainnet/channel.backup` + +One straight-forward way of backing that file up is to create a file watcher and +react whenever the file is changed. Here is an example script that +[automatically makes a copy of the file whenever it changes](https://gist.github.com/alexbosworth/2c5e185aedbdac45a03655b709e255a3). + +Other ways of obtaining SCBs for a node's channels are +[described in the recovery documentation](recovery.md#obtaining-scbs). + +Because the backup file is encrypted with a key from the seed the node was +created with, it can safely be stored on a cloud storage or any other storage +medium. Many consumer focused wallet smartphone apps automatically store a +backup file to the cloud, if the phone is set up to allow it. + +### Keep `lnd` updated + +With every larger update of `lnd`, new security features are added. Users are +always encouraged to update their nodes as soon as possible. This also helps the +network in general as new safety features that require compatibility among nodes +can be used sooner. + +### Zombie channels + +Zombie channels are channels that are most likely dead but are still around. +This can happen if one of the channel peers has gone offline for good (possibly +due to a failure of some sort) and didn't close its channels. The other, still +online node doesn't necessarily know that its partner will never come back +online. + +Funds that are in such channels are at great risk, as is described quite +dramatically in +[this article](https://medium.com/@gcomxx/get-rid-of-those-zombie-channels-1267d5a2a708?) +. + +The TL;DR of the article is that if you have funds in a zombie channel and you +need to recover your node after a failure, SCBs won't be able to recover those +funds. Because SCB restore +[relies on the remote node cooperating](#static-channel-backups-scbs). + +That's why it's important to **close channels with peers that have been +offline** for a length of time as a precautionary measure. + +Of course this might not be good advice for a routing node operator that wants +to support mobile users and route for them. Nodes running on a mobile device +tend to be offline for long periods of time. It would be bad for those users if +they needed to open a new channel every time they want to use the wallet. +Most mobile wallets only open private channels as they do not intend to route +payments through them. A routing node operator should therefore take into +account if a channel is public or private when thinking about closing it. + +### Migrating a node to a new device + +As mentioned in the chapters [aezeed](#aezeed) and +[SCB](#static-channel-backups-scbs) you should never use the same seed on two +different nodes and restoring from SCB is not a migration but an emergency +procedure. +What is the correct way to migrate an existing node to a new device? There is +an easy way that should work for most people and there's the harder/costlier +fallback way to do it. + +**Option 1: Move the whole data directory to the new device** +This option works very well if the new device runs the same operating system on +the same architecture. If that is the case, the whole `/home//.lnd` +directory in Linux (or `$HOME/Library/Application Support/lnd` in MacOS, +`%LOCALAPPDATA%\lnd` in Windows) can be moved to the new device and `lnd` +started there. It is important to shut down `lnd` on the old device before +moving the directory! +**Not supported/untested** is moving the data directory between different +operating systems (for example `MacOS` -> `Linux`) or different system +architectures (for example `32bit` -> `64bit` or `ARM` -> `amd64`). Data +corruption or unexpected behavior can be the result. Users switching between +operating systems or architectures should always use Option 2! + +**Option 2: Start from scratch** +If option 1 does not work or is too risky, the safest course of action is to +initialize the existing node again from scratch. Unfortunately this incurs some +on-chain fee costs as all channels will need to be closed. Using the same seed +means restoring the same network node identity as before. If a new identity +should be created, a new seed needs to be created. +Follow these steps to create the **same node (with the same seed)** from +scratch: +1. On the old device, close all channels (`lncli closeallchannels`). The + command can take up to several minutes depending on the number of channels. + **Do not interrupt the command!** +1. Wait for all channels to be fully closed. If some nodes don't respond to the + close request it can be that `lnd` will go ahead and force close those + channels. This means that the local balance will be time locked for up to + two weeks (depending on the channel size). Check `lncli pendingchannels` to + see if any channels are still in the process of being force closed. +1. After all channels are fully closed (and `lncli pendingchannels` lists zero + channels), `lnd` can be shut down on the old device. +1. Start `lnd` on the new device and create a new wallet with the existing seed + that was used on the old device (answer "yes" when asked if an existing seed + should be used). +1. Wait for the wallet to rescan the blockchain. This can take up to several + hours depending on the age of the seed and the speed of the chain backend. +1. After the chain is fully synced (`lncli getinfo` shows + `"synced_to_chain": true`) the on-chain funds from the previous device should + now be visible on the new device as well and new channels can be opened. + +**What to do after the move** +If things don't work as expected on the moved or re-created node, consider this +list things that possibly need to be changed to work on a new device: +* In case the new device has a different hostname and TLS connection problems + occur, delete the `tls.key` and `tls.cert` files in the data directory and + restart `lnd` to recreate them. +* If an external IP is set (either with `--externalip` or `--tlsextraip`) these + might need to be changed if the new machine has a different address. Changing + the `--tlsextraip` setting also means regenerating the certificate pair. See + point 1. +* If port `9735` (or `10009` for gRPC) was forwarded on the router, these + forwarded ports need to point to the new device. The same applies to firewall + rules. +* It might take more than 24 hours for a new IP address to be visible on + network explorers. +* If channels show as offline after several hours, try to manually connect to + the remote peer. They might still try to reach `lnd` on the old address. + +### Migrating a node from clearnet to Tor + +If an `lnd` node has already been connected to the internet with an IPv4 or IPv6 +(clearnet) address and has any non-private channels, this connection between +channels and IP address is known to the network and cannot be deleted. +Starting the same node with the same identity and channels using Tor is trivial +to link back to any previously used clearnet IP address and does therefore not +provide any privacy benefits. +The following steps are recommended to cut all links between the old clearnet +node and the new Tor node: +1. Close all channels on the old node and wait for them to fully close. +1. Send all on-chain funds of the old node through a Coin Join service (like + Wasabi or Samurai/Whirlpool) until a sufficiently high anonymity set is + reached. +1. Create a new `lnd` node with a **new seed** that is only connected to Tor + and generate an on-chain address on the new node. +1. Send the mixed/coinjoined coins to the address of the new node. +1. Start opening channels. +1. Check an online network explorer that no IPv4 or IPv6 address is associated + with the new node's identity. + +### Prevent data corruption + +Many problems while running an `lnd` node can be prevented by avoiding data +corruption in the channel database (`/data/graph/mainnet/channel.db`). + +The following (non-exhaustive) list of things can lead to data corruption: +* A spinning hard drive gets a physical shock. +* `lnd`'s main data directory being written on an SD card or USB thumb drive + (SD cards and USB thumb drives _must_ be considered unsafe for critical files + that are written to very often, as the channel DB is). +* `lnd`'s main data directory being written to a network drive without + `fsync` support. +* Unclean shutdown of `lnd`. +* Aborting channel operation commands (see next chapter). +* Not enough disk space for a growing channel DB file. +* Moving `lnd`'s main data directory between different operating systems/ + architectures. + +To avoid most of these factors, it is recommended to store `lnd`'s main data +directory on an Solid State Drive (SSD) of a reliable manufacturer. +An alternative or extension to that is to use a replicated disk setup. Making +sure a power failure does not interrupt the node by running a UPS ( +uninterruptible power supply) might also make sense depending on the reliability +of the local power grid and the amount of funds at stake. + +### Don't interrupt `lncli` commands + +Things can start to take a while to execute if a node has more than 50 to 100 +channels. It is extremely important to **never interrupt an `lncli` command** +if it is manipulating the channel database, which is true for the following +commands: + - `openchannel` + - `closechannel` and `closeallchannels` + - `abandonchannel` + - `updatechanpolicy` + - `restorechanbackup` + +Interrupting any of those commands can lead to an inconsistent state of the +channel database and unpredictable behavior. If it is uncertain if a command +is really stuck or if the node is still working on it, a look at the log file +can help to get an idea. + +### Regular accounting/monitoring + +Regular monitoring of a node and keeping track of the movement of funds can help +prevent problems. Tools like [`lndmon`](https://github.com/lightninglabs/lndmon) +can assist with these tasks. + +### Pruned bitcoind node + +Running `lnd` connected to a `bitcoind` node that is running in prune mode is +not supported! `lnd` needs to verify the funding transaction of every channel +in the network and be able to retrieve that information from `bitcoind` which +it cannot deliver when that information is pruned away. + +In theory pruning away all blocks _before_ the SegWit activation would work +as LN channels rely on SegWit. But this has neither been tested nor would it +be recommended/supported. + +In addition to not running a pruned node, it is recommended to run `bitcoind` +with the `-txindex` flag for performance reasons, though this is not strictly +required. + +Multiple `lnd` nodes can run off of a single `bitcoind` instance. There will be +connection/thread/performance limits at some number of `lnd` nodes but in +practice running 2 or 3 `lnd` instances per `bitcoind` node didn't show any +problems. + +### The `--noseedbackup` flag + +This is a flag that is only used for integration tests and should **never** be +used on mainnet! Turning this flag on means that the 24 word seed will not be +shown when creating a wallet. The seed is required to restore a node in case +of data corruption and without it all funds (on-chain and off-chain) are +being put at risk. diff --git a/docs/watchtower.md b/docs/watchtower.md index 0e7e17328d..d9bb78fc3c 100644 --- a/docs/watchtower.md +++ b/docs/watchtower.md @@ -102,6 +102,24 @@ If the watchtower's clients will need remote access, be sure to either: - Use a proxy to direct traffic from an open port to the watchtower's listening address. +### Tor Hidden Services + +Watchtowers have tor hidden service support and can automatically generate a +hidden service on startup with the following flags: + +``` +🏔 lnd --tor.active --tor.v3 --watchtower.active +``` + +The onion address is then shown in the "uris" field when queried with `lncli tower info`: + +``` +... +"uris": [ + "03281d603b2c5e19b8893a484eb938d7377179a9ef1a6bca4c0bcbbfc291657b63@bn2kxggzjysvsd5o3uqe4h7655u7v2ydhxzy7ea2fx26duaixlwuguad.onion:9911" +] +``` + Note: *The watchtower’s public key is distinct from `lnd`’s node public key. For now this acts as a soft whitelist as it requires clients to know the tower’s public key in order to use it for backups before more advanced whitelisting diff --git a/feature/default_sets.go b/feature/default_sets.go new file mode 100644 index 0000000000..8054894e72 --- /dev/null +++ b/feature/default_sets.go @@ -0,0 +1,50 @@ +package feature + +import "github.com/lightningnetwork/lnd/lnwire" + +// setDesc describes which feature bits should be advertised in which feature +// sets. +type setDesc map[lnwire.FeatureBit]map[Set]struct{} + +// defaultSetDesc are the default set descriptors for generating feature +// vectors. Each set is annotated with the corresponding identifier from BOLT 9 +// indicating where it should be advertised. +var defaultSetDesc = setDesc{ + lnwire.DataLossProtectRequired: { + SetInit: {}, // I + SetNodeAnn: {}, // N + }, + lnwire.GossipQueriesOptional: { + SetInit: {}, // I + SetNodeAnn: {}, // N + }, + lnwire.TLVOnionPayloadOptional: { + SetInit: {}, // I + SetNodeAnn: {}, // N + SetInvoice: {}, // 9 + SetLegacyGlobal: {}, + }, + lnwire.StaticRemoteKeyOptional: { + SetInit: {}, // I + SetNodeAnn: {}, // N + SetLegacyGlobal: {}, + }, + lnwire.UpfrontShutdownScriptOptional: { + SetInit: {}, // I + SetNodeAnn: {}, // N + }, + lnwire.PaymentAddrOptional: { + SetInit: {}, // I + SetNodeAnn: {}, // N + SetInvoice: {}, // 9 + }, + lnwire.MPPOptional: { + SetInit: {}, // I + SetNodeAnn: {}, // N + SetInvoice: {}, // 9 + }, + lnwire.AnchorsOptional: { + SetInit: {}, // I + SetNodeAnn: {}, // N + }, +} diff --git a/feature/deps.go b/feature/deps.go new file mode 100644 index 0000000000..c2b8f17569 --- /dev/null +++ b/feature/deps.go @@ -0,0 +1,132 @@ +package feature + +import ( + "fmt" + + "github.com/lightningnetwork/lnd/lnwire" +) + +type ( + // featureSet contains a set of feature bits. + featureSet map[lnwire.FeatureBit]struct{} + + // supportedFeatures maps the feature bit from a feature vector to a + // boolean indicating if this features dependencies have already been + // verified. This allows us to short circuit verification if multiple + // features have common dependencies, or map traversal starts verifying + // from the bottom up. + supportedFeatures map[lnwire.FeatureBit]bool + + // depDesc maps a features to its set of dependent features, which must + // also be present for the vector to be valid. This can be used to + // recursively check the dependency chain for features in a feature + // vector. + depDesc map[lnwire.FeatureBit]featureSet +) + +// ErrMissingFeatureDep is an error signaling that a transitive dependency in a +// feature vector is not set properly. +type ErrMissingFeatureDep struct { + dep lnwire.FeatureBit +} + +// NewErrMissingFeatureDep creates a new ErrMissingFeatureDep error. +func NewErrMissingFeatureDep(dep lnwire.FeatureBit) ErrMissingFeatureDep { + return ErrMissingFeatureDep{dep: dep} +} + +// Error returns a human-readable description of the missing dep error. +func (e ErrMissingFeatureDep) Error() string { + return fmt.Sprintf("missing feature dependency: %v", e.dep) +} + +// deps is the default set of dependencies for assigned feature bits. If a +// feature is not present in the depDesc it is assumed to have no dependencies. +// +// NOTE: For proper functioning, only the optional variant of feature bits +// should be used in the following descriptor. In the future it may be necessary +// to distinguish the dependencies for optional and required bits, but for now +// the validation code maps required bits to optional ones since it simplifies +// the number of constraints. +var deps = depDesc{ + lnwire.PaymentAddrOptional: { + lnwire.TLVOnionPayloadOptional: {}, + }, + lnwire.MPPOptional: { + lnwire.PaymentAddrOptional: {}, + }, + lnwire.AnchorsOptional: { + lnwire.StaticRemoteKeyOptional: {}, + }, +} + +// ValidateDeps asserts that a feature vector sets all features and their +// transitive dependencies properly. It assumes that the dependencies between +// optional and required features are identical, e.g. if a feature is required +// but its dependency is optional, that is sufficient. +func ValidateDeps(fv *lnwire.FeatureVector) error { + features := fv.Features() + supported := initSupported(features) + + return validateDeps(features, supported) +} + +// validateDeps is a subroutine that recursively checks that the passed features +// have all of their associated dependencies in the supported map. +func validateDeps(features featureSet, supported supportedFeatures) error { + for bit := range features { + // Convert any required bits to optional. + bit = mapToOptional(bit) + + // If the supported features doesn't contain the dependency, this + // vector is invalid. + checked, ok := supported[bit] + if !ok { + return NewErrMissingFeatureDep(bit) + } + + // Alternatively, if we know that this dependency is valid, we + // can short circuit and continue verifying other bits. + if checked { + continue + } + + // Recursively validate dependencies, since this method ranges + // over the subDeps. This method will return true even if + // subDeps is nil. + subDeps := deps[bit] + if err := validateDeps(subDeps, supported); err != nil { + return err + } + + // Once we've confirmed that this feature's dependencies, if + // any, are sound, we record this so other paths taken through + // `bit` return early when inspecting the supported map. + supported[bit] = true + } + + return nil +} + +// initSupported sets all bits from the feature vector as supported but not +// checked. This signals that the validity of their dependencies has not been +// verified. All required bits are mapped to optional to simplify the DAG. +func initSupported(features featureSet) supportedFeatures { + supported := make(supportedFeatures) + for bit := range features { + bit = mapToOptional(bit) + supported[bit] = false + } + + return supported +} + +// mapToOptional returns the optional variant of a given feature bit pair. Our +// dependendency graph is described using only optional feature bits, which +// reduces the number of constraints we need to express in the descriptor. +func mapToOptional(bit lnwire.FeatureBit) lnwire.FeatureBit { + if bit.IsRequired() { + bit ^= 0x01 + } + return bit +} diff --git a/feature/deps_test.go b/feature/deps_test.go new file mode 100644 index 0000000000..f1d950df89 --- /dev/null +++ b/feature/deps_test.go @@ -0,0 +1,167 @@ +package feature + +import ( + "reflect" + "testing" + + "github.com/lightningnetwork/lnd/lnwire" +) + +type depTest struct { + name string + raw *lnwire.RawFeatureVector + expErr error +} + +var depTests = []depTest{ + { + name: "empty", + raw: lnwire.NewRawFeatureVector(), + }, + { + name: "no deps optional", + raw: lnwire.NewRawFeatureVector( + lnwire.GossipQueriesOptional, + ), + }, + { + name: "no deps required", + raw: lnwire.NewRawFeatureVector( + lnwire.TLVOnionPayloadRequired, + ), + }, + { + name: "one dep optional", + raw: lnwire.NewRawFeatureVector( + lnwire.TLVOnionPayloadOptional, + lnwire.PaymentAddrOptional, + ), + }, + { + name: "one dep required", + raw: lnwire.NewRawFeatureVector( + lnwire.TLVOnionPayloadRequired, + lnwire.PaymentAddrRequired, + ), + }, + { + name: "one missing optional", + raw: lnwire.NewRawFeatureVector( + lnwire.PaymentAddrOptional, + ), + expErr: ErrMissingFeatureDep{lnwire.TLVOnionPayloadOptional}, + }, + { + name: "one missing required", + raw: lnwire.NewRawFeatureVector( + lnwire.PaymentAddrRequired, + ), + expErr: ErrMissingFeatureDep{lnwire.TLVOnionPayloadOptional}, + }, + { + name: "two dep optional", + raw: lnwire.NewRawFeatureVector( + lnwire.TLVOnionPayloadOptional, + lnwire.PaymentAddrOptional, + lnwire.MPPOptional, + ), + }, + { + name: "two dep required", + raw: lnwire.NewRawFeatureVector( + lnwire.TLVOnionPayloadRequired, + lnwire.PaymentAddrRequired, + lnwire.MPPRequired, + ), + }, + { + name: "two dep last missing optional", + raw: lnwire.NewRawFeatureVector( + lnwire.PaymentAddrOptional, + lnwire.MPPOptional, + ), + expErr: ErrMissingFeatureDep{lnwire.TLVOnionPayloadOptional}, + }, + { + name: "two dep last missing required", + raw: lnwire.NewRawFeatureVector( + lnwire.PaymentAddrRequired, + lnwire.MPPRequired, + ), + expErr: ErrMissingFeatureDep{lnwire.TLVOnionPayloadOptional}, + }, + { + name: "two dep first missing optional", + raw: lnwire.NewRawFeatureVector( + lnwire.TLVOnionPayloadOptional, + lnwire.MPPOptional, + ), + expErr: ErrMissingFeatureDep{lnwire.PaymentAddrOptional}, + }, + { + name: "two dep first missing required", + raw: lnwire.NewRawFeatureVector( + lnwire.TLVOnionPayloadRequired, + lnwire.MPPRequired, + ), + expErr: ErrMissingFeatureDep{lnwire.PaymentAddrOptional}, + }, + { + name: "forest optional", + raw: lnwire.NewRawFeatureVector( + lnwire.GossipQueriesOptional, + lnwire.TLVOnionPayloadOptional, + lnwire.PaymentAddrOptional, + lnwire.MPPOptional, + ), + }, + { + name: "forest required", + raw: lnwire.NewRawFeatureVector( + lnwire.GossipQueriesRequired, + lnwire.TLVOnionPayloadRequired, + lnwire.PaymentAddrRequired, + lnwire.MPPRequired, + ), + }, + { + name: "broken forest optional", + raw: lnwire.NewRawFeatureVector( + lnwire.GossipQueriesOptional, + lnwire.TLVOnionPayloadOptional, + lnwire.MPPOptional, + ), + expErr: ErrMissingFeatureDep{lnwire.PaymentAddrOptional}, + }, + { + name: "broken forest required", + raw: lnwire.NewRawFeatureVector( + lnwire.GossipQueriesRequired, + lnwire.TLVOnionPayloadRequired, + lnwire.MPPRequired, + ), + expErr: ErrMissingFeatureDep{lnwire.PaymentAddrOptional}, + }, +} + +// TestValidateDeps tests that ValidateDeps correctly asserts whether or not the +// set features constitute a valid feature chain when accounting for transititve +// dependencies. +func TestValidateDeps(t *testing.T) { + for _, test := range depTests { + test := test + t.Run(test.name, func(t *testing.T) { + testValidateDeps(t, test) + }) + } +} + +func testValidateDeps(t *testing.T, test depTest) { + fv := lnwire.NewFeatureVector(test.raw, lnwire.Features) + err := ValidateDeps(fv) + if !reflect.DeepEqual(err, test.expErr) { + t.Fatalf("validation mismatch, want: %v, got: %v", + test.expErr, err) + + } +} diff --git a/feature/manager.go b/feature/manager.go new file mode 100644 index 0000000000..159540c24b --- /dev/null +++ b/feature/manager.go @@ -0,0 +1,128 @@ +package feature + +import ( + "fmt" + + "github.com/lightningnetwork/lnd/lnwire" +) + +// Config houses any runtime modifications to the default set descriptors. For +// our purposes, this typically means disabling certain features to test legacy +// protocol interoperability or functionality. +type Config struct { + // NoTLVOnion unsets any optional or required TLVOnionPaylod bits from + // all feature sets. + NoTLVOnion bool + + // NoStaticRemoteKey unsets any optional or required StaticRemoteKey + // bits from all feature sets. + NoStaticRemoteKey bool + + // NoAnchors unsets any bits signaling support for anchor outputs. + NoAnchors bool +} + +// Manager is responsible for generating feature vectors for different requested +// feature sets. +type Manager struct { + // fsets is a static map of feature set to raw feature vectors. Requests + // are fulfilled by cloning these interal feature vectors. + fsets map[Set]*lnwire.RawFeatureVector +} + +// NewManager creates a new feature Manager, applying any custom modifications +// to its feature sets before returning. +func NewManager(cfg Config) (*Manager, error) { + return newManager(cfg, defaultSetDesc) +} + +// newManager creates a new feeature Manager, applying any custom modifications +// to its feature sets before returning. This method accepts the setDesc as its +// own parameter so that it can be unit tested. +func newManager(cfg Config, desc setDesc) (*Manager, error) { + // First build the default feature vector for all known sets. + fsets := make(map[Set]*lnwire.RawFeatureVector) + for bit, sets := range desc { + for set := range sets { + // Fetch the feature vector for this set, allocating a + // new one if it doesn't exist. + fv, ok := fsets[set] + if !ok { + fv = lnwire.NewRawFeatureVector() + } + + // Set the configured bit on the feature vector, + // ensuring that we don't set two feature bits for the + // same pair. + err := fv.SafeSet(bit) + if err != nil { + return nil, fmt.Errorf("unable to set "+ + "%v in %v: %v", bit, set, err) + } + + // Write the updated feature vector under its set. + fsets[set] = fv + } + } + + // Now, remove any features as directed by the config. + for set, raw := range fsets { + if cfg.NoTLVOnion { + raw.Unset(lnwire.TLVOnionPayloadOptional) + raw.Unset(lnwire.TLVOnionPayloadRequired) + raw.Unset(lnwire.PaymentAddrOptional) + raw.Unset(lnwire.PaymentAddrRequired) + raw.Unset(lnwire.MPPOptional) + raw.Unset(lnwire.MPPRequired) + } + if cfg.NoStaticRemoteKey { + raw.Unset(lnwire.StaticRemoteKeyOptional) + raw.Unset(lnwire.StaticRemoteKeyRequired) + } + if cfg.NoAnchors { + raw.Unset(lnwire.AnchorsOptional) + raw.Unset(lnwire.AnchorsRequired) + } + + // Ensure that all of our feature sets properly set any + // dependent features. + fv := lnwire.NewFeatureVector(raw, lnwire.Features) + err := ValidateDeps(fv) + if err != nil { + return nil, fmt.Errorf("invalid feature set %v: %v", + set, err) + } + } + + return &Manager{ + fsets: fsets, + }, nil +} + +// GetRaw returns a raw feature vector for the passed set. If no set is known, +// an empty raw feature vector is returned. +func (m *Manager) GetRaw(set Set) *lnwire.RawFeatureVector { + if fv, ok := m.fsets[set]; ok { + return fv.Clone() + } + + return lnwire.NewRawFeatureVector() +} + +// Get returns a feature vector for the passed set. If no set is known, an empty +// feature vector is returned. +func (m *Manager) Get(set Set) *lnwire.FeatureVector { + raw := m.GetRaw(set) + return lnwire.NewFeatureVector(raw, lnwire.Features) +} + +// ListSets returns a list of the feature sets that our node supports. +func (m *Manager) ListSets() []Set { + var sets []Set + + for set := range m.fsets { + sets = append(sets, set) + } + + return sets +} diff --git a/feature/manager_internal_test.go b/feature/manager_internal_test.go new file mode 100644 index 0000000000..9c97bd052b --- /dev/null +++ b/feature/manager_internal_test.go @@ -0,0 +1,128 @@ +package feature + +import ( + "reflect" + "testing" + + "github.com/lightningnetwork/lnd/lnwire" +) + +type managerTest struct { + name string + cfg Config +} + +const unknownFeature lnwire.FeatureBit = 30 + +var testSetDesc = setDesc{ + lnwire.DataLossProtectRequired: { + SetNodeAnn: {}, // I + }, + lnwire.TLVOnionPayloadOptional: { + SetInit: {}, // I + SetNodeAnn: {}, // N + }, + lnwire.StaticRemoteKeyOptional: { + SetInit: {}, // I + SetNodeAnn: {}, // N + }, +} + +var managerTests = []managerTest{ + { + name: "default", + cfg: Config{}, + }, + { + name: "no tlv", + cfg: Config{ + NoTLVOnion: true, + }, + }, + { + name: "no static remote key", + cfg: Config{ + NoStaticRemoteKey: true, + }, + }, + { + name: "no tlv or static remote key", + cfg: Config{ + NoTLVOnion: true, + NoStaticRemoteKey: true, + }, + }, +} + +// TestManager asserts basic initialazation and operation of a feature manager, +// including that the proper features are removed in response to config changes. +func TestManager(t *testing.T) { + for _, test := range managerTests { + test := test + t.Run(test.name, func(t *testing.T) { + testManager(t, test) + }) + } +} + +func testManager(t *testing.T, test managerTest) { + m, err := newManager(test.cfg, testSetDesc) + if err != nil { + t.Fatalf("unable to create feature manager: %v", err) + } + + sets := []Set{ + SetInit, + SetLegacyGlobal, + SetNodeAnn, + SetInvoice, + } + + for _, set := range sets { + raw := m.GetRaw(set) + fv := m.Get(set) + + fv2 := lnwire.NewFeatureVector(raw, lnwire.Features) + + if !reflect.DeepEqual(fv, fv2) { + t.Fatalf("mismatch Get vs GetRaw, raw: %v vs fv: %v", + fv2, fv) + } + + assertUnset := func(bit lnwire.FeatureBit) { + hasBit := fv.HasFeature(bit) || fv.HasFeature(bit^1) + if hasBit { + t.Fatalf("bit %v or %v is set", bit, bit^1) + } + } + + // Assert that the manager properly unset the configured feature + // bits from all sets. + if test.cfg.NoTLVOnion { + assertUnset(lnwire.TLVOnionPayloadOptional) + } + if test.cfg.NoStaticRemoteKey { + assertUnset(lnwire.StaticRemoteKeyOptional) + } + + assertUnset(unknownFeature) + } + + // Do same basic sanity checks on features that are always present. + nodeFeatures := m.Get(SetNodeAnn) + + assertSet := func(bit lnwire.FeatureBit) { + has := nodeFeatures.HasFeature(bit) + if !has { + t.Fatalf("node features don't advertised %v", bit) + } + } + + assertSet(lnwire.DataLossProtectOptional) + if !test.cfg.NoTLVOnion { + assertSet(lnwire.TLVOnionPayloadRequired) + } + if !test.cfg.NoStaticRemoteKey { + assertSet(lnwire.StaticRemoteKeyOptional) + } +} diff --git a/feature/required.go b/feature/required.go new file mode 100644 index 0000000000..d141604df6 --- /dev/null +++ b/feature/required.go @@ -0,0 +1,37 @@ +package feature + +import ( + "fmt" + + "github.com/lightningnetwork/lnd/lnwire" +) + +// ErrUnknownRequired signals that a feature vector requires certain features +// that our node is unaware of or does not implement. +type ErrUnknownRequired struct { + unknown []lnwire.FeatureBit +} + +// NewErrUnknownRequired initializes an ErrUnknownRequired with the unknown +// feature bits. +func NewErrUnknownRequired(unknown []lnwire.FeatureBit) ErrUnknownRequired { + return ErrUnknownRequired{ + unknown: unknown, + } +} + +// Error returns a human-readable description of the error. +func (e ErrUnknownRequired) Error() string { + return fmt.Sprintf("feature vector contains unknown required "+ + "features: %v", e.unknown) +} + +// ValidateRequired returns an error if the feature vector contains a non-zero +// number of unknown, required feature bits. +func ValidateRequired(fv *lnwire.FeatureVector) error { + unknown := fv.UnknownRequiredFeatures() + if len(unknown) > 0 { + return NewErrUnknownRequired(unknown) + } + return nil +} diff --git a/feature/set.go b/feature/set.go new file mode 100644 index 0000000000..2ac2ce52c4 --- /dev/null +++ b/feature/set.go @@ -0,0 +1,41 @@ +package feature + +// Set is an enum identifying various feature sets, which separates the single +// feature namespace into distinct categories depending what context a feature +// vector is being used. +type Set uint8 + +const ( + // SetInit identifies features that should be sent in an Init message to + // a remote peer. + SetInit Set = iota + + // SetLegacyGlobal identifies features that should be set in the legacy + // GlobalFeatures field of an Init message, which maintains backwards + // compatibility with nodes that haven't implemented flat features. + SetLegacyGlobal + + // SetNodeAnn identifies features that should be advertised on node + // announcements. + SetNodeAnn + + // SetInvoice identifies features that should be advertised on invoices + // generated by the daemon. + SetInvoice +) + +// String returns a human-readable description of a Set. +func (s Set) String() string { + switch s { + case SetInit: + return "SetInit" + case SetLegacyGlobal: + return "SetLegacyGlobal" + case SetNodeAnn: + return "SetNodeAnn" + case SetInvoice: + return "SetInvoice" + default: + return "SetUnknown" + } +} diff --git a/fundingmanager.go b/fundingmanager.go index 99161a1447..95e6464ea5 100644 --- a/fundingmanager.go +++ b/fundingmanager.go @@ -9,14 +9,15 @@ import ( "github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcutil" - "github.com/coreos/bbolt" "github.com/davecgh/go-spew/spew" "github.com/go-errors/errors" "github.com/lightningnetwork/lnd/chainntnfs" "github.com/lightningnetwork/lnd/chanacceptor" "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/discovery" "github.com/lightningnetwork/lnd/htlcswitch" "github.com/lightningnetwork/lnd/input" @@ -24,6 +25,8 @@ import ( "github.com/lightningnetwork/lnd/lnpeer" "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lnwallet" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" + "github.com/lightningnetwork/lnd/lnwallet/chanfunding" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/routing" "golang.org/x/crypto/salsa20" @@ -92,6 +95,13 @@ var ( // blocks pass without confirmation. ErrConfirmationTimeout = errors.New("timeout waiting for funding " + "confirmation") + + // errUpfrontShutdownScriptNotSupported is returned if an upfront shutdown + // script is set for a peer that does not support the feature bit. + errUpfrontShutdownScriptNotSupported = errors.New("peer does not support" + + "option upfront shutdown script") + + zeroID [32]byte ) // reservationWithCtx encapsulates a pending channel reservation. This wrapper @@ -230,7 +240,7 @@ type fundingConfig struct { // FeeEstimator calculates appropriate fee rates based on historical // transaction information. - FeeEstimator lnwallet.FeeEstimator + FeeEstimator chainfee.Estimator // Notifier is used by the FundingManager to determine when the // channel's funding transaction has been confirmed on the blockchain @@ -244,7 +254,8 @@ type fundingConfig struct { // // TODO(roasbeef): should instead pass on this responsibility to a // distinct sub-system? - SignMessage func(pubKey *btcec.PublicKey, msg []byte) (*btcec.Signature, error) + SignMessage func(pubKey *btcec.PublicKey, + msg []byte) (input.Signature, error) // CurrentNodeAnnouncement should return the latest, fully signed node // announcement from the backing Lightning Network node. @@ -278,6 +289,10 @@ type fundingConfig struct { // initially announcing channels. DefaultRoutingPolicy htlcswitch.ForwardingPolicy + // DefaultMinHtlcIn is the default minimum incoming htlc value that is + // set as a channel parameter. + DefaultMinHtlcIn lnwire.MilliSatoshi + // NumRequiredConfs is a function closure that helps the funding // manager decide how many confirmations it should require for a // channel extended to it. The function is able to take into account @@ -350,6 +365,10 @@ type fundingConfig struct { // and on the requesting node's public key that returns a bool which tells // the funding manager whether or not to accept the channel. OpenChannelPredicate chanacceptor.ChannelAcceptor + + // NotifyPendingOpenChannelEvent informs the ChannelNotifier when channels + // enter a pending state. + NotifyPendingOpenChannelEvent func(wire.OutPoint, *channeldb.OpenChannel) } // fundingManager acts as an orchestrator/bridge between the wallet's @@ -520,8 +539,9 @@ func (f *fundingManager) start() error { // Rebroadcast the funding transaction for any pending // channel that we initiated. No error will be returned - // if the transaction already has been broadcasted. - if channel.ChanType == channeldb.SingleFunder && + // if the transaction already has been broadcast. + chanType := channel.ChanType + if chanType.IsSingleFunder() && chanType.HasFundingTx() && channel.IsInitiator { err := f.cfg.PublishTransaction( @@ -677,10 +697,10 @@ func (f *fundingManager) CancelPeerReservations(nodePub [33]byte) { func (f *fundingManager) failFundingFlow(peer lnpeer.Peer, tempChanID [32]byte, fundingErr error) { - fndgLog.Debugf("Failing funding flow for pendingID=%x: %v", + fndgLog.Debugf("Failing funding flow for pending_id=%x: %v", tempChanID, fundingErr) - ctx, err := f.cancelReservationCtx(peer.IdentityKey(), tempChanID) + ctx, err := f.cancelReservationCtx(peer.IdentityKey(), tempChanID, false) if err != nil { fndgLog.Errorf("unable to cancel reservation: %v", err) } @@ -811,7 +831,7 @@ func (f *fundingManager) advanceFundingState(channel *channeldb.OpenChannel, // network. // TODO(halseth): could do graph consistency check // here, and re-add the edge if missing. - fndgLog.Debugf("ChannlPoint(%v) with chanID=%v not "+ + fndgLog.Debugf("ChannelPoint(%v) with chan_id=%x not "+ "found in opening database, assuming already "+ "announced to the network", channel.FundingOutpoint, pendingChanID) @@ -828,8 +848,8 @@ func (f *fundingManager) advanceFundingState(channel *channeldb.OpenChannel, // are still steps left of the setup procedure. We continue the // procedure where we left off. err = f.stateStep( - channel, lnChannel, shortChanID, channelState, - updateChan, + channel, lnChannel, shortChanID, pendingChanID, + channelState, updateChan, ) if err != nil { fndgLog.Errorf("Unable to advance state(%v): %v", @@ -845,7 +865,8 @@ func (f *fundingManager) advanceFundingState(channel *channeldb.OpenChannel, // updateChan can be set non-nil to get OpenStatusUpdates. func (f *fundingManager) stateStep(channel *channeldb.OpenChannel, lnChannel *lnwallet.LightningChannel, - shortChanID *lnwire.ShortChannelID, channelState channelOpeningState, + shortChanID *lnwire.ShortChannelID, pendingChanID [32]byte, + channelState channelOpeningState, updateChan chan<- *lnrpc.OpenStatusUpdate) error { chanID := lnwire.NewChanIDFromOutPoint(&channel.FundingOutpoint) @@ -924,6 +945,7 @@ func (f *fundingManager) stateStep(channel *channeldb.OpenChannel, ChannelPoint: cp, }, }, + PendingChanId: pendingChanID[:], } select { @@ -989,7 +1011,11 @@ func (f *fundingManager) advancePendingChannelState( LocalChanConfig: ch.LocalChanCfg, } - if err := ch.CloseChannel(closeInfo); err != nil { + // Close the channel with us as the initiator because we are + // timing the channel out. + if err := ch.CloseChannel( + closeInfo, channeldb.ChanStatusLocalCloseInitiator, + ); err != nil { return fmt.Errorf("failed closing channel "+ "%v: %v", ch.FundingOutpoint, err) } @@ -1082,6 +1108,42 @@ func (f *fundingManager) processFundingOpen(msg *lnwire.OpenChannel, } } +// commitmentType returns the commitment type to use for the channel, based on +// the features the two peers have available. +func commitmentType(localFeatures, + remoteFeatures *lnwire.FeatureVector) lnwallet.CommitmentType { + + // If both peers are signalling support for anchor commitments, this + // implicitly mean we'll create the channel of this type. Note that + // this also enables tweakless commitments, as anchor commitments are + // always tweakless. + localAnchors := localFeatures.HasFeature( + lnwire.AnchorsOptional, + ) + remoteAnchors := remoteFeatures.HasFeature( + lnwire.AnchorsOptional, + ) + if localAnchors && remoteAnchors { + return lnwallet.CommitmentTypeAnchors + } + + localTweakless := localFeatures.HasFeature( + lnwire.StaticRemoteKeyOptional, + ) + remoteTweakless := remoteFeatures.HasFeature( + lnwire.StaticRemoteKeyOptional, + ) + + // If both nodes are signaling the proper feature bit for tweakless + // copmmitments, we'll use that. + if localTweakless && remoteTweakless { + return lnwallet.CommitmentTypeTweakless + } + + // Otherwise we'll fall back to the legacy type. + return lnwallet.CommitmentTypeLegacy +} + // handleFundingOpen creates an initial 'ChannelReservation' within the wallet, // then responds to the source peer with an accept channel message progressing // the funding workflow. @@ -1204,26 +1266,23 @@ func (f *fundingManager) handleFundingOpen(fmsg *fundingOpenMsg) { // negotiated the new tweakless commitment format. This is only the // case if *both* us and the remote peer are signaling the proper // feature bit. - localTweakless := fmsg.peer.LocalGlobalFeatures().HasFeature( - lnwire.StaticRemoteKeyOptional, + commitType := commitmentType( + fmsg.peer.LocalFeatures(), fmsg.peer.RemoteFeatures(), ) - remoteTweakless := fmsg.peer.RemoteGlobalFeatures().HasFeature( - lnwire.StaticRemoteKeyOptional, - ) - tweaklessCommitment := localTweakless && remoteTweakless chainHash := chainhash.Hash(msg.ChainHash) req := &lnwallet.InitFundingReserveMsg{ ChainHash: &chainHash, + PendingChanID: msg.PendingChannelID, NodeID: fmsg.peer.IdentityKey(), NodeAddr: fmsg.peer.Address(), LocalFundingAmt: 0, RemoteFundingAmt: amt, - CommitFeePerKw: lnwallet.SatPerKWeight(msg.FeePerKiloWeight), + CommitFeePerKw: chainfee.SatPerKWeight(msg.FeePerKiloWeight), FundingFeePerKw: 0, PushMSat: msg.PushAmount, Flags: msg.ChannelFlags, MinConfs: 1, - Tweakless: tweaklessCommitment, + CommitType: commitType, } reservation, err := f.cfg.Wallet.InitChannelReservation(req) @@ -1258,17 +1317,40 @@ func (f *fundingManager) handleFundingOpen(fmsg *fundingOpenMsg) { return } + // Check whether the peer supports upfront shutdown, and get a new wallet + // address if our node is configured to set shutdown addresses by default. + // A nil address is set in place of user input, because this channel open + // was not initiated by the user. + shutdown, err := getUpfrontShutdownScript( + fmsg.peer, nil, + func() (lnwire.DeliveryAddress, error) { + addr, err := f.cfg.Wallet.NewAddress(lnwallet.WitnessPubKey, false) + if err != nil { + return nil, err + } + return txscript.PayToAddrScript(addr) + }, + ) + if err != nil { + f.failFundingFlow( + fmsg.peer, fmsg.msg.PendingChannelID, + fmt.Errorf("getUpfrontShutdownScript error: %v", err), + ) + return + } + reservation.SetOurUpfrontShutdown(shutdown) + fndgLog.Infof("Requiring %v confirmations for pendingChan(%x): "+ - "amt=%v, push_amt=%v, tweakless=%v", numConfsReq, + "amt=%v, push_amt=%v, committype=%v, upfrontShutdown=%x", numConfsReq, fmsg.msg.PendingChannelID, amt, msg.PushAmount, - tweaklessCommitment) + commitType, msg.UpfrontShutdownScript) // Generate our required constraints for the remote party. remoteCsvDelay := f.cfg.RequiredRemoteDelay(amt) chanReserve := f.cfg.RequiredRemoteChanReserve(amt, msg.DustLimit) maxValue := f.cfg.RequiredRemoteMaxValue(amt) maxHtlcs := f.cfg.RequiredRemoteMaxHTLCs(amt) - minHtlc := f.cfg.DefaultRoutingPolicy.MinHTLC + minHtlc := f.cfg.DefaultMinHtlcIn // Once the reservation has been created successfully, we add it to // this peer's map of pending reservations to track this particular @@ -1321,6 +1403,7 @@ func (f *fundingManager) handleFundingOpen(fmsg *fundingOpenMsg) { PubKey: copyPubKey(msg.HtlcPoint), }, }, + UpfrontShutdown: msg.UpfrontShutdownScript, } err = reservation.ProcessSingleContribution(remoteContribution) if err != nil { @@ -1329,7 +1412,7 @@ func (f *fundingManager) handleFundingOpen(fmsg *fundingOpenMsg) { return } - fndgLog.Infof("Sending fundingResp for pendingID(%x)", + fndgLog.Infof("Sending fundingResp for pending_id(%x)", msg.PendingChannelID) fndgLog.Debugf("Remote party accepted commitment constraints: %v", spew.Sdump(remoteContribution.ChannelConfig.ChannelConstraints)) @@ -1338,22 +1421,23 @@ func (f *fundingManager) handleFundingOpen(fmsg *fundingOpenMsg) { // contribution in the next message of the workflow. ourContribution := reservation.OurContribution() fundingAccept := lnwire.AcceptChannel{ - PendingChannelID: msg.PendingChannelID, - DustLimit: ourContribution.DustLimit, - MaxValueInFlight: maxValue, - ChannelReserve: chanReserve, - MinAcceptDepth: uint32(numConfsReq), - HtlcMinimum: minHtlc, - CsvDelay: remoteCsvDelay, - MaxAcceptedHTLCs: maxHtlcs, - FundingKey: ourContribution.MultiSigKey.PubKey, - RevocationPoint: ourContribution.RevocationBasePoint.PubKey, - PaymentPoint: ourContribution.PaymentBasePoint.PubKey, - DelayedPaymentPoint: ourContribution.DelayBasePoint.PubKey, - HtlcPoint: ourContribution.HtlcBasePoint.PubKey, - FirstCommitmentPoint: ourContribution.FirstCommitmentPoint, - } - if err := fmsg.peer.SendMessage(false, &fundingAccept); err != nil { + PendingChannelID: msg.PendingChannelID, + DustLimit: ourContribution.DustLimit, + MaxValueInFlight: maxValue, + ChannelReserve: chanReserve, + MinAcceptDepth: uint32(numConfsReq), + HtlcMinimum: minHtlc, + CsvDelay: remoteCsvDelay, + MaxAcceptedHTLCs: maxHtlcs, + FundingKey: ourContribution.MultiSigKey.PubKey, + RevocationPoint: ourContribution.RevocationBasePoint.PubKey, + PaymentPoint: ourContribution.PaymentBasePoint.PubKey, + DelayedPaymentPoint: ourContribution.DelayBasePoint.PubKey, + HtlcPoint: ourContribution.HtlcBasePoint.PubKey, + FirstCommitmentPoint: ourContribution.FirstCommitmentPoint, + UpfrontShutdownScript: ourContribution.UpfrontShutdown, + } + if err := fmsg.peer.SendMessage(true, &fundingAccept); err != nil { fndgLog.Errorf("unable to send funding response to peer: %v", err) f.failFundingFlow(fmsg.peer, msg.PendingChannelID, err) return @@ -1382,7 +1466,7 @@ func (f *fundingManager) handleFundingAccept(fmsg *fundingAcceptMsg) { resCtx, err := f.getReservationCtx(peerKey, pendingChanID) if err != nil { - fndgLog.Warnf("Can't find reservation (peerKey:%v, chanID:%v)", + fndgLog.Warnf("Can't find reservation (peerKey:%v, chan_id:%v)", peerKey, pendingChanID) return } @@ -1390,7 +1474,8 @@ func (f *fundingManager) handleFundingAccept(fmsg *fundingAcceptMsg) { // Update the timestamp once the fundingAcceptMsg has been handled. defer resCtx.updateTimestamp() - fndgLog.Infof("Recv'd fundingResponse for pendingID(%x)", pendingChanID[:]) + fndgLog.Infof("Recv'd fundingResponse for pending_id(%x)", + pendingChanID[:]) // The required number of confirmations should not be greater than the // maximum number of confirmations required by the ChainNotifier to @@ -1461,9 +1546,45 @@ func (f *fundingManager) handleFundingAccept(fmsg *fundingAcceptMsg) { PubKey: copyPubKey(msg.HtlcPoint), }, }, + UpfrontShutdown: msg.UpfrontShutdownScript, } err = resCtx.reservation.ProcessContribution(remoteContribution) - if err != nil { + + // The wallet has detected that a PSBT funding process was requested by + // the user and has halted the funding process after negotiating the + // multisig keys. We now have everything that is needed for the user to + // start constructing a PSBT that sends to the multisig funding address. + var psbtIntent *chanfunding.PsbtIntent + if psbtErr, ok := err.(*lnwallet.PsbtFundingRequired); ok { + // Return the information that is needed by the user to + // construct the PSBT back to the caller. + addr, amt, packet, err := psbtErr.Intent.FundingParams() + if err != nil { + fndgLog.Errorf("Unable to process PSBT funding params "+ + "for contribution from %v: %v", peerKey, err) + f.failFundingFlow(fmsg.peer, msg.PendingChannelID, err) + return + } + var buf bytes.Buffer + err = packet.Serialize(&buf) + if err != nil { + fndgLog.Errorf("Unable to serialize PSBT for "+ + "contribution from %v: %v", peerKey, err) + f.failFundingFlow(fmsg.peer, msg.PendingChannelID, err) + return + } + resCtx.updates <- &lnrpc.OpenStatusUpdate{ + PendingChanId: pendingChanID[:], + Update: &lnrpc.OpenStatusUpdate_PsbtFund{ + PsbtFund: &lnrpc.ReadyForPsbtFunding{ + FundingAddress: addr.EncodeAddress(), + FundingAmount: amt, + Psbt: buf.Bytes(), + }, + }, + } + psbtIntent = psbtErr.Intent + } else if err != nil { fndgLog.Errorf("Unable to process contribution from %v: %v", peerKey, err) f.failFundingFlow(fmsg.peer, msg.PendingChannelID, err) @@ -1475,6 +1596,105 @@ func (f *fundingManager) handleFundingAccept(fmsg *fundingAcceptMsg) { fndgLog.Debugf("Remote party accepted commitment constraints: %v", spew.Sdump(remoteContribution.ChannelConfig.ChannelConstraints)) + // If the user requested funding through a PSBT, we cannot directly + // continue now and need to wait for the fully funded and signed PSBT + // to arrive. To not block any other channels from opening, we wait in + // a separate goroutine. + if psbtIntent != nil { + f.wg.Add(1) + go func() { + defer f.wg.Done() + f.waitForPsbt(psbtIntent, resCtx, pendingChanID) + }() + + // With the new goroutine spawned, we can now exit to unblock + // the main event loop. + return + } + + // In a normal, non-PSBT funding flow, we can jump directly to the next + // step where we expect our contribution to be finalized. + f.continueFundingAccept(resCtx, pendingChanID) +} + +// waitForPsbt blocks until either a signed PSBT arrives, an error occurs or +// the funding manager shuts down. In the case of a valid PSBT, the funding flow +// is continued. +// +// NOTE: This method must be called as a goroutine. +func (f *fundingManager) waitForPsbt(intent *chanfunding.PsbtIntent, + resCtx *reservationWithCtx, pendingChanID [32]byte) { + + // failFlow is a helper that logs an error message with the current + // context and then fails the funding flow. + peerKey := resCtx.peer.IdentityKey() + failFlow := func(errMsg string, cause error) { + fndgLog.Errorf("Unable to handle funding accept message "+ + "for peer_key=%x, pending_chan_id=%x: %s: %v", + peerKey.SerializeCompressed(), pendingChanID, errMsg, + cause) + f.failFundingFlow(resCtx.peer, pendingChanID, cause) + } + + // We'll now wait until the intent has received the final and complete + // funding transaction. If the channel is closed without any error being + // sent, we know everything's going as expected. + select { + case err := <-intent.PsbtReady: + switch err { + // If the user canceled the funding reservation, we need to + // inform the other peer about us canceling the reservation. + case chanfunding.ErrUserCanceled: + failFlow("aborting PSBT flow", err) + return + + // If the remote canceled the funding reservation, we don't need + // to send another fail message. But we want to inform the user + // about what happened. + case chanfunding.ErrRemoteCanceled: + fndgLog.Infof("Remote canceled, aborting PSBT flow "+ + "for peer_key=%x, pending_chan_id=%x", + peerKey.SerializeCompressed(), pendingChanID) + return + + // Nil error means the flow continues normally now. + case nil: + + // For any other error, we'll fail the funding flow. + default: + failFlow("error waiting for PSBT flow", err) + return + } + + // A non-nil error means we can continue the funding flow. + // Notify the wallet so it can prepare everything we need to + // continue. + err = resCtx.reservation.ProcessPsbt() + if err != nil { + failFlow("error continuing PSBT flow", err) + return + } + + // We are now ready to continue the funding flow. + f.continueFundingAccept(resCtx, pendingChanID) + + // Handle a server shutdown as well because the reservation won't + // survive a restart as it's in memory only. + case <-f.quit: + fndgLog.Errorf("Unable to handle funding accept message "+ + "for peer_key=%x, pending_chan_id=%x: funding manager "+ + "shutting down", peerKey.SerializeCompressed(), + pendingChanID) + return + } +} + +// continueFundingAccept continues the channel funding flow once our +// contribution is finalized, the channel output is known and the funding +// transaction is signed. +func (f *fundingManager) continueFundingAccept(resCtx *reservationWithCtx, + pendingChanID [32]byte) { + // Now that we have their contribution, we can extract, then send over // both the funding out point and our signature for their version of // the commitment transaction to the remote peer. @@ -1499,22 +1719,23 @@ func (f *fundingManager) handleFundingAccept(fmsg *fundingAcceptMsg) { f.signedReservations[channelID] = pendingChanID f.resMtx.Unlock() - fndgLog.Infof("Generated ChannelPoint(%v) for pendingID(%x)", outPoint, + fndgLog.Infof("Generated ChannelPoint(%v) for pending_id(%x)", outPoint, pendingChanID[:]) + var err error fundingCreated := &lnwire.FundingCreated{ PendingChannelID: pendingChanID, FundingPoint: *outPoint, } - fundingCreated.CommitSig, err = lnwire.NewSigFromRawSignature(sig) + fundingCreated.CommitSig, err = lnwire.NewSigFromSignature(sig) if err != nil { fndgLog.Errorf("Unable to parse signature: %v", err) - f.failFundingFlow(fmsg.peer, msg.PendingChannelID, err) + f.failFundingFlow(resCtx.peer, pendingChanID, err) return } - if err := fmsg.peer.SendMessage(false, fundingCreated); err != nil { + if err := resCtx.peer.SendMessage(true, fundingCreated); err != nil { fndgLog.Errorf("Unable to send funding complete message: %v", err) - f.failFundingFlow(fmsg.peer, msg.PendingChannelID, err) + f.failFundingFlow(resCtx.peer, pendingChanID, err) return } } @@ -1541,7 +1762,7 @@ func (f *fundingManager) handleFundingCreated(fmsg *fundingCreatedMsg) { resCtx, err := f.getReservationCtx(peerKey, pendingChanID) if err != nil { - fndgLog.Warnf("can't find reservation (peerID:%v, chanID:%x)", + fndgLog.Warnf("can't find reservation (peer_id:%v, chan_id:%x)", peerKey, pendingChanID[:]) return } @@ -1552,17 +1773,24 @@ func (f *fundingManager) handleFundingCreated(fmsg *fundingCreatedMsg) { // initiator's commitment transaction, then send our own if it's valid. // TODO(roasbeef): make case (p vs P) consistent throughout fundingOut := fmsg.msg.FundingPoint - fndgLog.Infof("completing pendingID(%x) with ChannelPoint(%v)", + fndgLog.Infof("completing pending_id(%x) with ChannelPoint(%v)", pendingChanID[:], fundingOut) + commitSig, err := fmsg.msg.CommitSig.ToSignature() + if err != nil { + fndgLog.Errorf("unable to parse signature: %v", err) + f.failFundingFlow(fmsg.peer, pendingChanID, err) + return + } + // With all the necessary data available, attempt to advance the // funding workflow to the next stage. If this succeeds then the // funding transaction will broadcast after our next message. // CompleteReservationSingle will also mark the channel as 'IsPending' // in the database. - commitSig := fmsg.msg.CommitSig.ToSignatureBytes() completeChan, err := resCtx.reservation.CompleteReservationSingle( - &fundingOut, commitSig) + &fundingOut, commitSig, + ) if err != nil { // TODO(roasbeef): better error logging: peerID, channelID, etc. fndgLog.Errorf("unable to complete single reservation: %v", err) @@ -1591,7 +1819,11 @@ func (f *fundingManager) handleFundingCreated(fmsg *fundingCreatedMsg) { LocalChanConfig: completeChan.LocalChanCfg, } - if err := completeChan.CloseChannel(closeInfo); err != nil { + // Close the channel with us as the initiator because we are + // deciding to exit the funding flow due to an internal error. + if err := completeChan.CloseChannel( + closeInfo, channeldb.ChanStatusLocalCloseInitiator, + ); err != nil { fndgLog.Errorf("Failed closing channel %v: %v", completeChan.FundingOutpoint, err) } @@ -1607,13 +1839,13 @@ func (f *fundingManager) handleFundingCreated(fmsg *fundingCreatedMsg) { f.newChanBarriers[channelID] = make(chan struct{}) f.barrierMtx.Unlock() - fndgLog.Infof("sending FundingSigned for pendingID(%x) over "+ + fndgLog.Infof("sending FundingSigned for pending_id(%x) over "+ "ChannelPoint(%v)", pendingChanID[:], fundingOut) // With their signature for our version of the commitment transaction // verified, we can now send over our signature to the remote peer. _, sig := resCtx.reservation.OurSignatures() - ourCommitSig, err := lnwire.NewSigFromRawSignature(sig) + ourCommitSig, err := lnwire.NewSigFromSignature(sig) if err != nil { fndgLog.Errorf("unable to parse signature: %v", err) f.failFundingFlow(fmsg.peer, pendingChanID, err) @@ -1625,7 +1857,7 @@ func (f *fundingManager) handleFundingCreated(fmsg *fundingCreatedMsg) { ChanID: channelID, CommitSig: ourCommitSig, } - if err := fmsg.peer.SendMessage(false, fundingSigned); err != nil { + if err := fmsg.peer.SendMessage(true, fundingSigned); err != nil { fndgLog.Errorf("unable to send FundingSigned message: %v", err) f.failFundingFlow(fmsg.peer, pendingChanID, err) deleteFromDatabase() @@ -1647,6 +1879,10 @@ func (f *fundingManager) handleFundingCreated(fmsg *fundingCreatedMsg) { f.localDiscoverySignals[channelID] = make(chan struct{}) f.localDiscoveryMtx.Unlock() + // Inform the ChannelNotifier that the channel has entered + // pending open state. + f.cfg.NotifyPendingOpenChannelEvent(fundingOut, completeChan) + // At this point we have sent our last funding message to the // initiating peer before the funding transaction will be broadcast. // With this last message, our job as the responder is now complete. @@ -1703,8 +1939,8 @@ func (f *fundingManager) handleFundingSigned(fmsg *fundingSignedMsg) { peerKey := fmsg.peer.IdentityKey() resCtx, err := f.getReservationCtx(peerKey, pendingChanID) if err != nil { - fndgLog.Warnf("Unable to find reservation (peerID:%v, chanID:%x)", - peerKey, pendingChanID[:]) + fndgLog.Warnf("Unable to find reservation (peer_id:%v, "+ + "chan_id:%x)", peerKey, pendingChanID[:]) // TODO: add ErrChanNotFound? f.failFundingFlow(fmsg.peer, pendingChanID, err) return @@ -1722,7 +1958,13 @@ func (f *fundingManager) handleFundingSigned(fmsg *fundingSignedMsg) { // The remote peer has responded with a signature for our commitment // transaction. We'll verify the signature for validity, then commit // the state to disk as we can now open the channel. - commitSig := fmsg.msg.CommitSig.ToSignatureBytes() + commitSig, err := fmsg.msg.CommitSig.ToSignature() + if err != nil { + fndgLog.Errorf("Unable to parse signature: %v", err) + f.failFundingFlow(fmsg.peer, pendingChanID, err) + return + } + completeChan, err := resCtx.reservation.CompleteReservation( nil, commitSig, ) @@ -1737,21 +1979,28 @@ func (f *fundingManager) handleFundingSigned(fmsg *fundingSignedMsg) { // delete it from our set of active reservations. f.deleteReservationCtx(peerKey, pendingChanID) - // Broadcast the finalized funding transaction to the network. - fundingTx := completeChan.FundingTxn - fndgLog.Infof("Broadcasting funding tx for ChannelPoint(%v): %v", - completeChan.FundingOutpoint, spew.Sdump(fundingTx)) + // Broadcast the finalized funding transaction to the network, but only + // if we actually have the funding transaction. + if completeChan.ChanType.HasFundingTx() { + fundingTx := completeChan.FundingTxn - err = f.cfg.PublishTransaction(fundingTx) - if err != nil { - fndgLog.Errorf("Unable to broadcast funding tx for "+ - "ChannelPoint(%v): %v", completeChan.FundingOutpoint, - err) - // We failed to broadcast the funding transaction, but watch - // the channel regardless, in case the transaction made it to - // the network. We will retry broadcast at startup. - // TODO(halseth): retry more often? Handle with CPFP? Just - // delete from the DB? + fndgLog.Infof("Broadcasting funding tx for ChannelPoint(%v): %v", + completeChan.FundingOutpoint, spew.Sdump(fundingTx)) + + err = f.cfg.PublishTransaction(fundingTx) + if err != nil { + fndgLog.Errorf("Unable to broadcast funding tx for "+ + "ChannelPoint(%v): %v", + completeChan.FundingOutpoint, err) + + // We failed to broadcast the funding transaction, but + // watch the channel regardless, in case the + // transaction made it to the network. We will retry + // broadcast at startup. + // + // TODO(halseth): retry more often? Handle with CPFP? + // Just delete from the DB? + } } // Now that we have a finalized reservation for this funding flow, @@ -1763,12 +2012,13 @@ func (f *fundingManager) handleFundingSigned(fmsg *fundingSignedMsg) { "arbitration: %v", fundingPoint, err) } - fndgLog.Infof("Finalizing pendingID(%x) over ChannelPoint(%v), "+ + fndgLog.Infof("Finalizing pending_id(%x) over ChannelPoint(%v), "+ "waiting for channel open on-chain", pendingChanID[:], fundingPoint) // Send an update to the upstream client that the negotiation process // is over. + // // TODO(roasbeef): add abstraction over updates to accommodate // long-polling, or SSE, etc. upd := &lnrpc.OpenStatusUpdate{ @@ -1778,10 +2028,14 @@ func (f *fundingManager) handleFundingSigned(fmsg *fundingSignedMsg) { OutputIndex: fundingPoint.Index, }, }, + PendingChanId: pendingChanID[:], } select { case resCtx.updates <- upd: + // Inform the ChannelNotifier that the channel has entered + // pending open state. + f.cfg.NotifyPendingOpenChannelEvent(*fundingPoint, completeChan) case <-f.quit: return } @@ -1935,7 +2189,7 @@ func (f *fundingManager) waitForFundingConfirmation( } fundingPoint := completeChan.FundingOutpoint - fndgLog.Infof("ChannelPoint(%v) is now active: ChannelID(%x)", + fndgLog.Infof("ChannelPoint(%v) is now active: ChannelID(%v)", fundingPoint, lnwire.NewChanIDFromOutPoint(&fundingPoint)) // With the block height and the transaction index known, we can @@ -2133,7 +2387,7 @@ func (f *fundingManager) sendFundingLocked( fndgLog.Infof("Peer(%x) is online, sending FundingLocked "+ "for ChannelID(%v)", peerKey, chanID) - if err := peer.SendMessage(false, fundingLockedMsg); err == nil { + if err := peer.SendMessage(true, fundingLockedMsg); err == nil { // Sending succeeded, we can break out and continue the // funding flow. break @@ -2162,6 +2416,12 @@ func (f *fundingManager) addToRouterGraph(completeChan *channeldb.OpenChannel, // need to determine the smallest HTLC it deems economically relevant. fwdMinHTLC := completeChan.LocalChanCfg.MinHTLC + // We don't necessarily want to go as low as the remote party + // allows. Check it against our default forwarding policy. + if fwdMinHTLC < f.cfg.DefaultRoutingPolicy.MinHTLCOut { + fwdMinHTLC = f.cfg.DefaultRoutingPolicy.MinHTLCOut + } + // We'll obtain the max HTLC value we can forward in our direction, as // we'll use this value within our ChannelUpdate. This value must be <= // channel capacity and <= the maximum in-flight msats set by the peer. @@ -2332,30 +2592,13 @@ func (f *fundingManager) annAfterSixConfs(completeChan *channeldb.OpenChannel, fndgLog.Infof("Announcing ChannelPoint(%v), short_chan_id=%v", &fundingPoint, shortChanID) - // We'll obtain the min HTLC value we can forward in our - // direction, as we'll use this value within our ChannelUpdate. - // This constraint is originally set by the remote node, as it - // will be the one that will need to determine the smallest - // HTLC it deems economically relevant. - fwdMinHTLC := completeChan.LocalChanCfg.MinHTLC - - // We'll obtain the max HTLC value we can forward in our - // direction, as we'll use this value within our ChannelUpdate. - // This value must be <= channel capacity and <= the maximum - // in-flight msats set by the peer. - fwdMaxHTLC := completeChan.LocalChanCfg.MaxPendingAmount - capacityMSat := lnwire.NewMSatFromSatoshis(completeChan.Capacity) - if fwdMaxHTLC > capacityMSat { - fwdMaxHTLC = capacityMSat - } - // Create and broadcast the proofs required to make this channel // public and usable for other nodes for routing. err = f.announceChannel( f.cfg.IDKey, completeChan.IdentityPub, completeChan.LocalChanCfg.MultiSigKey.PubKey, completeChan.RemoteChanCfg.MultiSigKey.PubKey, - *shortChanID, chanID, fwdMinHTLC, fwdMaxHTLC, + *shortChanID, chanID, ) if err != nil { return fmt.Errorf("channel announcement failed: %v", err) @@ -2647,15 +2890,18 @@ func (f *fundingManager) newChanAnnouncement(localPubKey, remotePubKey, // finish, either successfully or with an error. func (f *fundingManager) announceChannel(localIDKey, remoteIDKey, localFundingKey, remoteFundingKey *btcec.PublicKey, shortChanID lnwire.ShortChannelID, - chanID lnwire.ChannelID, fwdMinHTLC, fwdMaxHTLC lnwire.MilliSatoshi) error { + chanID lnwire.ChannelID) error { // First, we'll create the batch of announcements to be sent upon // initial channel creation. This includes the channel announcement // itself, the channel update announcement, and our half of the channel // proof needed to fully authenticate the channel. + // + // We can pass in zeroes for the min and max htlc policy, because we + // only use the channel announcement message from the returned struct. ann, err := f.newChanAnnouncement(localIDKey, remoteIDKey, localFundingKey, remoteFundingKey, shortChanID, chanID, - fwdMinHTLC, fwdMaxHTLC, + 0, 0, ) if err != nil { fndgLog.Errorf("can't generate channel announcement: %v", err) @@ -2727,6 +2973,49 @@ func (f *fundingManager) initFundingWorkflow(peer lnpeer.Peer, req *openChanReq) } } +// getUpfrontShutdownScript takes a user provided script and a getScript +// function which can be used to generate an upfront shutdown script. If our +// peer does not support the feature, this function will error if a non-zero +// script was provided by the user, and return an empty script otherwise. If +// our peer does support the feature, we will return the user provided script +// if non-zero, or a freshly generated script if our node is configured to set +// upfront shutdown scripts automatically. +func getUpfrontShutdownScript(peer lnpeer.Peer, script lnwire.DeliveryAddress, + getScript func() (lnwire.DeliveryAddress, error)) (lnwire.DeliveryAddress, + error) { + + // Check whether the remote peer supports upfront shutdown scripts. + remoteUpfrontShutdown := peer.RemoteFeatures().HasFeature( + lnwire.UpfrontShutdownScriptOptional, + ) + + // If the peer does not support upfront shutdown scripts, and one has been + // provided, return an error because the feature is not supported. + if !remoteUpfrontShutdown && len(script) != 0 { + return nil, errUpfrontShutdownScriptNotSupported + } + + // If the peer does not support upfront shutdown, return an empty address. + if !remoteUpfrontShutdown { + return nil, nil + } + + // If the user has provided an script and the peer supports the feature, + // return it. Note that user set scripts override the enable upfront + // shutdown flag. + if len(script) > 0 { + return script, nil + } + + // If we do not have setting of upfront shutdown script enabled, return + // an empty script. + if !cfg.EnableUpfrontShutdown { + return nil, nil + } + + return getScript() +} + // handleInitFundingMsg creates a channel reservation within the daemon's // wallet, then sends a funding request to the remote peer kicking off the // funding workflow. @@ -2734,7 +3023,7 @@ func (f *fundingManager) handleInitFundingMsg(msg *initFundingMsg) { var ( peerKey = msg.peer.IdentityKey() localAmt = msg.localFundingAmt - minHtlc = msg.minHtlc + minHtlcIn = msg.minHtlcIn remoteCsvDelay = msg.remoteCsvDelay ) @@ -2771,6 +3060,45 @@ func (f *fundingManager) handleInitFundingMsg(msg *initFundingMsg) { channelFlags = lnwire.FFAnnounceChannel } + // If the caller specified their own channel ID, then we'll use that. + // Otherwise we'll generate a fresh one as normal. This will be used + // to track this reservation throughout its lifetime. + var chanID [32]byte + if msg.pendingChanID == zeroID { + chanID = f.nextPendingChanID() + } else { + // If the user specified their own pending channel ID, then + // we'll ensure it doesn't collide with any existing pending + // channel ID. + chanID = msg.pendingChanID + if _, err := f.getReservationCtx(peerKey, chanID); err == nil { + msg.err <- fmt.Errorf("pendingChannelID(%x) "+ + "already present", chanID[:]) + return + } + } + + // Check whether the peer supports upfront shutdown, and get an address + // which should be used (either a user specified address or a new + // address from the wallet if our node is configured to set shutdown + // address by default). + shutdown, err := getUpfrontShutdownScript( + msg.peer, msg.openChanReq.shutdownScript, + func() (lnwire.DeliveryAddress, error) { + addr, err := f.cfg.Wallet.NewAddress( + lnwallet.WitnessPubKey, false, + ) + if err != nil { + return nil, err + } + return txscript.PayToAddrScript(addr) + }, + ) + if err != nil { + msg.err <- err + return + } + // Initialize a funding reservation with the local wallet. If the // wallet doesn't have enough funds to commit to this channel, then the // request will fail, and be aborted. @@ -2779,15 +3107,12 @@ func (f *fundingManager) handleInitFundingMsg(msg *initFundingMsg) { // negotiated the new tweakless commitment format. This is only the // case if *both* us and the remote peer are signaling the proper // feature bit. - localTweakless := msg.peer.LocalGlobalFeatures().HasFeature( - lnwire.StaticRemoteKeyOptional, - ) - remoteTweakless := msg.peer.RemoteGlobalFeatures().HasFeature( - lnwire.StaticRemoteKeyOptional, + commitType := commitmentType( + msg.peer.LocalFeatures(), msg.peer.RemoteFeatures(), ) - tweaklessCommitment := localTweakless && remoteTweakless req := &lnwallet.InitFundingReserveMsg{ ChainHash: &msg.chainHash, + PendingChanID: chanID, NodeID: peerKey, NodeAddr: msg.peer.Address(), SubtractFees: msg.subtractFees, @@ -2798,7 +3123,8 @@ func (f *fundingManager) handleInitFundingMsg(msg *initFundingMsg) { PushMSat: msg.pushAmt, Flags: channelFlags, MinConfs: msg.minConfs, - Tweakless: tweaklessCommitment, + CommitType: commitType, + ChanFunder: msg.chanFunder, } reservation, err := f.cfg.Wallet.InitChannelReservation(req) @@ -2807,16 +3133,15 @@ func (f *fundingManager) handleInitFundingMsg(msg *initFundingMsg) { return } + // Set our upfront shutdown address in the existing reservation. + reservation.SetOurUpfrontShutdown(shutdown) + // Now that we have successfully reserved funds for this channel in the // wallet, we can fetch the final channel capacity. This is done at // this point since the final capacity might change in case of // SubtractFees=true. capacity := reservation.Capacity() - // Obtain a new pending channel ID which is used to track this - // reservation throughout its lifetime. - chanID := f.nextPendingChanID() - fndgLog.Infof("Target commit tx sat/kw for pendingID(%x): %v", chanID, int64(commitFeePerKw)) @@ -2828,8 +3153,8 @@ func (f *fundingManager) handleInitFundingMsg(msg *initFundingMsg) { } // If no minimum HTLC value was specified, use the default one. - if minHtlc == 0 { - minHtlc = f.cfg.DefaultRoutingPolicy.MinHTLC + if minHtlcIn == 0 { + minHtlcIn = f.cfg.DefaultMinHtlcIn } // If a pending channel map for this peer isn't already created, then @@ -2844,7 +3169,7 @@ func (f *fundingManager) handleInitFundingMsg(msg *initFundingMsg) { resCtx := &reservationWithCtx{ chanAmt: capacity, remoteCsvDelay: remoteCsvDelay, - remoteMinHtlc: minHtlc, + remoteMinHtlc: minHtlcIn, reservation: reservation, peer: msg.peer, updates: msg.updates, @@ -2867,37 +3192,39 @@ func (f *fundingManager) handleInitFundingMsg(msg *initFundingMsg) { maxValue := f.cfg.RequiredRemoteMaxValue(capacity) maxHtlcs := f.cfg.RequiredRemoteMaxHTLCs(capacity) - fndgLog.Infof("Starting funding workflow with %v for pendingID(%x), "+ - "tweakless=%v", msg.peer.Address(), chanID, tweaklessCommitment) + fndgLog.Infof("Starting funding workflow with %v for pending_id(%x), "+ + "committype=%v", msg.peer.Address(), chanID, commitType) fundingOpen := lnwire.OpenChannel{ - ChainHash: *f.cfg.Wallet.Cfg.NetParams.GenesisHash, - PendingChannelID: chanID, - FundingAmount: capacity, - PushAmount: msg.pushAmt, - DustLimit: ourContribution.DustLimit, - MaxValueInFlight: maxValue, - ChannelReserve: chanReserve, - HtlcMinimum: minHtlc, - FeePerKiloWeight: uint32(commitFeePerKw), - CsvDelay: remoteCsvDelay, - MaxAcceptedHTLCs: maxHtlcs, - FundingKey: ourContribution.MultiSigKey.PubKey, - RevocationPoint: ourContribution.RevocationBasePoint.PubKey, - PaymentPoint: ourContribution.PaymentBasePoint.PubKey, - HtlcPoint: ourContribution.HtlcBasePoint.PubKey, - DelayedPaymentPoint: ourContribution.DelayBasePoint.PubKey, - FirstCommitmentPoint: ourContribution.FirstCommitmentPoint, - ChannelFlags: channelFlags, - } - if err := msg.peer.SendMessage(false, &fundingOpen); err != nil { + ChainHash: *f.cfg.Wallet.Cfg.NetParams.GenesisHash, + PendingChannelID: chanID, + FundingAmount: capacity, + PushAmount: msg.pushAmt, + DustLimit: ourContribution.DustLimit, + MaxValueInFlight: maxValue, + ChannelReserve: chanReserve, + HtlcMinimum: minHtlcIn, + FeePerKiloWeight: uint32(commitFeePerKw), + CsvDelay: remoteCsvDelay, + MaxAcceptedHTLCs: maxHtlcs, + FundingKey: ourContribution.MultiSigKey.PubKey, + RevocationPoint: ourContribution.RevocationBasePoint.PubKey, + PaymentPoint: ourContribution.PaymentBasePoint.PubKey, + HtlcPoint: ourContribution.HtlcBasePoint.PubKey, + DelayedPaymentPoint: ourContribution.DelayBasePoint.PubKey, + FirstCommitmentPoint: ourContribution.FirstCommitmentPoint, + ChannelFlags: channelFlags, + UpfrontShutdownScript: shutdown, + } + if err := msg.peer.SendMessage(true, &fundingOpen); err != nil { e := fmt.Errorf("Unable to send funding request message: %v", err) fndgLog.Errorf(e.Error()) // Since we were unable to send the initial message to the peer // and start the funding flow, we'll cancel this reservation. - if _, err := f.cancelReservationCtx(peerKey, chanID); err != nil { + _, err := f.cancelReservationCtx(peerKey, chanID, false) + if err != nil { fndgLog.Errorf("unable to cancel reservation: %v", err) } @@ -2957,7 +3284,7 @@ func (f *fundingManager) handleErrorMsg(fmsg *fundingErrorMsg) { // First, we'll attempt to retrieve and cancel the funding workflow // that this error was tied to. If we're unable to do so, then we'll // exit early as this was an unwarranted error. - resCtx, err := f.cancelReservationCtx(fmsg.peerKey, chanID) + resCtx, err := f.cancelReservationCtx(fmsg.peerKey, chanID, true) if err != nil { fndgLog.Warnf("Received error for non-existent funding "+ "flow: %v (%v)", err, protocolErr.Error()) @@ -2971,6 +3298,14 @@ func (f *fundingManager) handleErrorMsg(fmsg *fundingErrorMsg) { ) fndgLog.Errorf(fundingErr.Error()) + // If this was a PSBT funding flow, the remote likely timed out because + // we waited too long. Return a nice error message to the user in that + // case so the user knows what's the problem. + if resCtx.reservation.IsPsbt() { + fundingErr = fmt.Errorf("%w: %v", chanfunding.ErrRemoteCanceled, + fundingErr) + } + resCtx.err <- fundingErr } @@ -2987,7 +3322,14 @@ func (f *fundingManager) pruneZombieReservations() { continue } - if time.Since(resCtx.lastUpdated) > f.cfg.ReservationTimeout { + // We don't want to expire PSBT funding reservations. + // These reservations are always initiated by us and the + // remote peer is likely going to cancel them after some + // idle time anyway. So no need for us to also prune + // them. + sinceLastUpdate := time.Since(resCtx.lastUpdated) + isExpired := sinceLastUpdate > f.cfg.ReservationTimeout + if !resCtx.reservation.IsPsbt() && isExpired { zombieReservations[pendingChanID] = resCtx } } @@ -2996,7 +3338,7 @@ func (f *fundingManager) pruneZombieReservations() { for pendingChanID, resCtx := range zombieReservations { err := fmt.Errorf("reservation timed out waiting for peer "+ - "(peerID:%v, chanID:%x)", resCtx.peer.IdentityKey(), + "(peer_id:%x, chan_id:%x)", resCtx.peer.IdentityKey(), pendingChanID[:]) fndgLog.Warnf(err.Error()) f.failFundingFlow(resCtx.peer, pendingChanID, err) @@ -3006,7 +3348,7 @@ func (f *fundingManager) pruneZombieReservations() { // cancelReservationCtx does all needed work in order to securely cancel the // reservation. func (f *fundingManager) cancelReservationCtx(peerKey *btcec.PublicKey, - pendingChanID [32]byte) (*reservationWithCtx, error) { + pendingChanID [32]byte, byRemote bool) (*reservationWithCtx, error) { fndgLog.Infof("Cancelling funding reservation for node_key=%x, "+ "chan_id=%x", peerKey.SerializeCompressed(), pendingChanID[:]) @@ -3028,6 +3370,14 @@ func (f *fundingManager) cancelReservationCtx(peerKey *btcec.PublicKey, "peer(%x)", pendingChanID[:], peerIDKey[:]) } + // If the reservation was a PSBT funding flow and it was canceled by the + // remote peer, then we need to thread through a different error message + // to the subroutine that's waiting for the user input so it can return + // a nice error message to the user. + if ctx.reservation.IsPsbt() && byRemote { + ctx.reservation.RemoteCanceled() + } + if err := ctx.reservation.Cancel(); err != nil { return nil, errors.Errorf("unable to cancel reservation: %v", err) @@ -3114,9 +3464,9 @@ func copyPubKey(pub *btcec.PublicKey) *btcec.PublicKey { // chanPoint to the channelOpeningStateBucket. func (f *fundingManager) saveChannelOpeningState(chanPoint *wire.OutPoint, state channelOpeningState, shortChanID *lnwire.ShortChannelID) error { - return f.cfg.Wallet.Cfg.Database.Update(func(tx *bbolt.Tx) error { + return kvdb.Update(f.cfg.Wallet.Cfg.Database, func(tx kvdb.RwTx) error { - bucket, err := tx.CreateBucketIfNotExists(channelOpeningStateBucket) + bucket, err := tx.CreateTopLevelBucket(channelOpeningStateBucket) if err != nil { return err } @@ -3144,9 +3494,9 @@ func (f *fundingManager) getChannelOpeningState(chanPoint *wire.OutPoint) ( var state channelOpeningState var shortChanID lnwire.ShortChannelID - err := f.cfg.Wallet.Cfg.Database.View(func(tx *bbolt.Tx) error { + err := kvdb.View(f.cfg.Wallet.Cfg.Database, func(tx kvdb.ReadTx) error { - bucket := tx.Bucket(channelOpeningStateBucket) + bucket := tx.ReadBucket(channelOpeningStateBucket) if bucket == nil { // If the bucket does not exist, it means we never added // a channel to the db, so return ErrChannelNotFound. @@ -3176,8 +3526,8 @@ func (f *fundingManager) getChannelOpeningState(chanPoint *wire.OutPoint) ( // deleteChannelOpeningState removes any state for chanPoint from the database. func (f *fundingManager) deleteChannelOpeningState(chanPoint *wire.OutPoint) error { - return f.cfg.Wallet.Cfg.Database.Update(func(tx *bbolt.Tx) error { - bucket := tx.Bucket(channelOpeningStateBucket) + return kvdb.Update(f.cfg.Wallet.Cfg.Database, func(tx kvdb.RwTx) error { + bucket := tx.ReadWriteBucket(channelOpeningStateBucket) if bucket == nil { return fmt.Errorf("Bucket not found") } diff --git a/fundingmanager_test.go b/fundingmanager_test.go index 759f783b07..b222bf94b2 100644 --- a/fundingmanager_test.go +++ b/fundingmanager_test.go @@ -25,6 +25,7 @@ import ( "github.com/lightningnetwork/lnd/chainntnfs" "github.com/lightningnetwork/lnd/chanacceptor" "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/channelnotifier" "github.com/lightningnetwork/lnd/discovery" "github.com/lightningnetwork/lnd/htlcswitch" "github.com/lightningnetwork/lnd/input" @@ -32,6 +33,7 @@ import ( "github.com/lightningnetwork/lnd/lnpeer" "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lnwallet" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" "github.com/lightningnetwork/lnd/lnwire" ) @@ -47,6 +49,10 @@ const ( // testPollSleepMs is the number of milliseconds to sleep between // each attempt to access the database to check its state. testPollSleepMs = 500 + + // maxPending is the maximum number of channels we allow opening to the + // same peer in the max pending channels test. + maxPending = 4 ) var ( @@ -137,6 +143,24 @@ func (m *mockNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint, _ []byte, }, nil } +type mockChanEvent struct { + openEvent chan wire.OutPoint + pendingOpenEvent chan channelnotifier.PendingOpenChannelEvent +} + +func (m *mockChanEvent) NotifyOpenChannelEvent(outpoint wire.OutPoint) { + m.openEvent <- outpoint +} + +func (m *mockChanEvent) NotifyPendingOpenChannelEvent(outpoint wire.OutPoint, + pendingChannel *channeldb.OpenChannel) { + + m.pendingOpenEvent <- channelnotifier.PendingOpenChannelEvent{ + ChannelPoint: &outpoint, + PendingChannel: pendingChannel, + } +} + type testNode struct { privKey *btcec.PrivateKey addr *lnwire.NetAddress @@ -146,8 +170,10 @@ type testNode struct { fundingMgr *fundingManager newChannels chan *newChannelMsg mockNotifier *mockNotifier + mockChanEvent *mockChanEvent testDir string shutdownChannel chan struct{} + remoteFeatures []lnwire.FeatureBit remotePeer *testNode sendMessage func(lnwire.Message) error @@ -175,20 +201,20 @@ func (n *testNode) SendMessageLazy(sync bool, msgs ...lnwire.Message) error { return n.SendMessage(sync, msgs...) } -func (n *testNode) WipeChannel(_ *wire.OutPoint) error { - return nil -} +func (n *testNode) WipeChannel(_ *wire.OutPoint) {} func (n *testNode) QuitSignal() <-chan struct{} { return n.shutdownChannel } -func (n *testNode) LocalGlobalFeatures() *lnwire.FeatureVector { +func (n *testNode) LocalFeatures() *lnwire.FeatureVector { return lnwire.NewFeatureVector(nil, nil) } -func (n *testNode) RemoteGlobalFeatures() *lnwire.FeatureVector { - return lnwire.NewFeatureVector(nil, nil) +func (n *testNode) RemoteFeatures() *lnwire.FeatureVector { + return lnwire.NewFeatureVector( + lnwire.NewRawFeatureVector(n.remoteFeatures...), nil, + ) } func (n *testNode) AddNewChannel(channel *channeldb.OpenChannel, @@ -218,7 +244,7 @@ func createTestWallet(cdb *channeldb.DB, netParams *chaincfg.Params, notifier chainntnfs.ChainNotifier, wc lnwallet.WalletController, signer input.Signer, keyRing keychain.SecretKeyRing, bio lnwallet.BlockChainIO, - estimator lnwallet.FeeEstimator) (*lnwallet.LightningWallet, error) { + estimator chainfee.Estimator) (*lnwallet.LightningWallet, error) { wallet, err := lnwallet.NewLightningWallet(lnwallet.Config{ Database: cdb, @@ -247,7 +273,7 @@ func createTestFundingManager(t *testing.T, privKey *btcec.PrivateKey, options ...cfgOption) (*testNode, error) { netParams := activeNetParams.Params - estimator := lnwallet.NewStaticFeeEstimator(62500, 0) + estimator := chainfee.NewStaticEstimator(62500, 0) chainNotifier := &mockNotifier{ oneConfChannel: make(chan *chainntnfs.TxConfirmation, 1), @@ -270,6 +296,17 @@ func createTestFundingManager(t *testing.T, privKey *btcec.PrivateKey, bestHeight: fundingBroadcastHeight, } + // The mock channel event notifier will receive events for each pending + // open and open channel. Because some tests will create multiple + // channels in a row before advancing to the next step, these channels + // need to be buffered. + evt := &mockChanEvent{ + openEvent: make(chan wire.OutPoint, maxPending), + pendingOpenEvent: make( + chan channelnotifier.PendingOpenChannelEvent, maxPending, + ), + } + dbDir := filepath.Join(tempTestDir, "cdb") cdb, err := channeldb.Open(dbDir) if err != nil { @@ -297,7 +334,9 @@ func createTestFundingManager(t *testing.T, privKey *btcec.PrivateKey, Wallet: lnw, Notifier: chainNotifier, FeeEstimator: estimator, - SignMessage: func(pubKey *btcec.PublicKey, msg []byte) (*btcec.Signature, error) { + SignMessage: func(pubKey *btcec.PublicKey, + msg []byte) (input.Signature, error) { + return testSig, nil }, SendAnnouncement: func(msg lnwire.Message, @@ -332,11 +371,12 @@ func createTestFundingManager(t *testing.T, privKey *btcec.PrivateKey, return nil, fmt.Errorf("unable to find channel") }, DefaultRoutingPolicy: htlcswitch.ForwardingPolicy{ - MinHTLC: 5, + MinHTLCOut: 5, BaseFee: 100, FeeRate: 1000, TimeLockDelta: 10, }, + DefaultMinHtlcIn: 5, NumRequiredConfs: func(chanAmt btcutil.Amount, pushAmt lnwire.MilliSatoshi) uint16 { return 3 @@ -371,11 +411,12 @@ func createTestFundingManager(t *testing.T, privKey *btcec.PrivateKey, publTxChan <- txn return nil }, - ZombieSweeperInterval: 1 * time.Hour, - ReservationTimeout: 1 * time.Nanosecond, - MaxPendingChannels: DefaultMaxPendingChannels, - NotifyOpenChannelEvent: func(wire.OutPoint) {}, - OpenChannelPredicate: chainedAcceptor, + ZombieSweeperInterval: 1 * time.Hour, + ReservationTimeout: 1 * time.Nanosecond, + MaxPendingChannels: DefaultMaxPendingChannels, + NotifyOpenChannelEvent: evt.NotifyOpenChannelEvent, + OpenChannelPredicate: chainedAcceptor, + NotifyPendingOpenChannelEvent: evt.NotifyPendingOpenChannelEvent, } for _, op := range options { @@ -398,6 +439,7 @@ func createTestFundingManager(t *testing.T, privKey *btcec.PrivateKey, publTxChan: publTxChan, fundingMgr: f, mockNotifier: chainNotifier, + mockChanEvent: evt, testDir: tempTestDir, shutdownChannel: shutdownChan, addr: addr, @@ -434,7 +476,7 @@ func recreateAliceFundingManager(t *testing.T, alice *testNode) { Notifier: oldCfg.Notifier, FeeEstimator: oldCfg.FeeEstimator, SignMessage: func(pubKey *btcec.PublicKey, - msg []byte) (*btcec.Signature, error) { + msg []byte) (input.Signature, error) { return testSig, nil }, SendAnnouncement: func(msg lnwire.Message, @@ -460,11 +502,12 @@ func recreateAliceFundingManager(t *testing.T, alice *testNode) { TempChanIDSeed: oldCfg.TempChanIDSeed, FindChannel: oldCfg.FindChannel, DefaultRoutingPolicy: htlcswitch.ForwardingPolicy{ - MinHTLC: 5, + MinHTLCOut: 5, BaseFee: 100, FeeRate: 1000, TimeLockDelta: 10, }, + DefaultMinHtlcIn: 5, RequiredRemoteMaxValue: oldCfg.RequiredRemoteMaxValue, PublishTransaction: func(txn *wire.MsgTx) error { publishChan <- txn @@ -678,6 +721,18 @@ func fundChannel(t *testing.T, alice, bob *testNode, localFundingAmt, t.Fatalf("alice did not publish funding tx") } + // Make sure the notification about the pending channel was sent out. + select { + case <-alice.mockChanEvent.pendingOpenEvent: + case <-time.After(time.Second * 5): + t.Fatalf("alice did not send pending channel event") + } + select { + case <-bob.mockChanEvent.pendingOpenEvent: + case <-time.After(time.Second * 5): + t.Fatalf("bob did not send pending channel event") + } + // Finally, make sure neither have active reservation for the channel // now pending open in the database. assertNumPendingReservations(t, alice, bobPubKey, 0) @@ -860,6 +915,18 @@ func assertMarkedOpen(t *testing.T, alice, bob *testNode, fundingOutPoint *wire.OutPoint) { t.Helper() + // Make sure the notification about the pending channel was sent out. + select { + case <-alice.mockChanEvent.openEvent: + case <-time.After(time.Second * 5): + t.Fatalf("alice did not send open channel event") + } + select { + case <-bob.mockChanEvent.openEvent: + case <-time.After(time.Second * 5): + t.Fatalf("bob did not send open channel event") + } + assertDatabaseState(t, alice, fundingOutPoint, markedOpen) assertDatabaseState(t, bob, fundingOutPoint, markedOpen) } @@ -922,7 +989,7 @@ func assertChannelAnnouncements(t *testing.T, alice, bob *testNode, // _other_ node. other := (j + 1) % 2 minHtlc := nodes[other].fundingMgr.cfg. - DefaultRoutingPolicy.MinHTLC + DefaultMinHtlcIn // We might expect a custom MinHTLC value. if len(customMinHtlc) > 0 { @@ -2321,7 +2388,7 @@ func TestFundingManagerCustomChannelParameters(t *testing.T) { // This is the custom parameters we'll use. const csvDelay = 67 - const minHtlc = 1234 + const minHtlcIn = 1234 // We will consume the channel updates as we go, so no buffering is // needed. @@ -2340,7 +2407,7 @@ func TestFundingManagerCustomChannelParameters(t *testing.T) { localFundingAmt: localAmt, pushAmt: lnwire.NewMSatFromSatoshis(pushAmt), private: false, - minHtlc: minHtlc, + minHtlcIn: minHtlcIn, remoteCsvDelay: csvDelay, updates: updateChan, err: errChan, @@ -2377,9 +2444,9 @@ func TestFundingManagerCustomChannelParameters(t *testing.T) { } // Check that the custom minHTLC value is sent. - if openChannelReq.HtlcMinimum != minHtlc { + if openChannelReq.HtlcMinimum != minHtlcIn { t.Fatalf("expected OpenChannel to have minHtlc %v, got %v", - minHtlc, openChannelReq.HtlcMinimum) + minHtlcIn, openChannelReq.HtlcMinimum) } chanID := openChannelReq.PendingChannelID @@ -2464,7 +2531,7 @@ func TestFundingManagerCustomChannelParameters(t *testing.T) { // The minimum HTLC value Alice can offer should be 5, and the minimum // Bob can offer should be 1234. - if err := assertMinHtlc(resCtx, 5, minHtlc); err != nil { + if err := assertMinHtlc(resCtx, 5, minHtlcIn); err != nil { t.Fatal(err) } @@ -2478,7 +2545,7 @@ func TestFundingManagerCustomChannelParameters(t *testing.T) { t.Fatal(err) } - if err := assertMinHtlc(resCtx, minHtlc, 5); err != nil { + if err := assertMinHtlc(resCtx, minHtlcIn, 5); err != nil { t.Fatal(err) } @@ -2539,7 +2606,7 @@ func TestFundingManagerCustomChannelParameters(t *testing.T) { // announcements. Alice should advertise the default MinHTLC value of // 5, while bob should advertise the value minHtlc, since Alice // required him to use it. - assertChannelAnnouncements(t, alice, bob, capacity, 5, minHtlc) + assertChannelAnnouncements(t, alice, bob, capacity, 5, minHtlcIn) // The funding transaction is now confirmed, wait for the // OpenStatusUpdate_ChanOpen update @@ -2551,8 +2618,6 @@ func TestFundingManagerCustomChannelParameters(t *testing.T) { func TestFundingManagerMaxPendingChannels(t *testing.T) { t.Parallel() - const maxPending = 4 - alice, bob := setupFundingManagers( t, func(cfg *fundingConfig) { cfg.MaxPendingChannels = maxPending @@ -2863,7 +2928,7 @@ func TestFundingManagerFundAll(t *testing.T) { Value: btcutil.Amount( 0.05 * btcutil.SatoshiPerBitcoin, ), - PkScript: make([]byte, 22), + PkScript: coinPkScript, OutPoint: wire.OutPoint{ Hash: chainhash.Hash{}, Index: 0, @@ -2874,7 +2939,7 @@ func TestFundingManagerFundAll(t *testing.T) { Value: btcutil.Amount( 0.06 * btcutil.SatoshiPerBitcoin, ), - PkScript: make([]byte, 22), + PkScript: coinPkScript, OutPoint: wire.OutPoint{ Hash: chainhash.Hash{}, Index: 1, @@ -2947,3 +3012,92 @@ func TestFundingManagerFundAll(t *testing.T) { } } } + +// TestGetUpfrontShutdown tests different combinations of inputs for getting a +// shutdown script. It varies whether the peer has the feature set, whether +// the user has provided a script and our local configuration to test that +// GetUpfrontShutdownScript returns the expected outcome. +func TestGetUpfrontShutdownScript(t *testing.T) { + upfrontScript := []byte("upfront script") + generatedScript := []byte("generated script") + + getScript := func() (lnwire.DeliveryAddress, error) { + return generatedScript, nil + } + + tests := []struct { + name string + getScript func() (lnwire.DeliveryAddress, error) + upfrontScript lnwire.DeliveryAddress + peerEnabled bool + localEnabled bool + expectedScript lnwire.DeliveryAddress + expectedErr error + }{ + { + name: "peer disabled, no shutdown", + getScript: getScript, + }, + { + name: "peer disabled, upfront provided", + upfrontScript: upfrontScript, + expectedErr: errUpfrontShutdownScriptNotSupported, + }, + { + name: "peer enabled, upfront provided", + upfrontScript: upfrontScript, + peerEnabled: true, + expectedScript: upfrontScript, + }, + { + name: "peer enabled, local disabled", + peerEnabled: true, + }, + { + name: "local enabled, no upfront script", + getScript: getScript, + peerEnabled: true, + localEnabled: true, + expectedScript: generatedScript, + }, + { + name: "local enabled, upfront script", + peerEnabled: true, + upfrontScript: upfrontScript, + localEnabled: true, + expectedScript: upfrontScript, + }, + } + + for _, test := range tests { + test := test + + t.Run(test.name, func(t *testing.T) { + var mockPeer testNode + + // If the remote peer in the test should support upfront shutdown, + // add the feature bit. + if test.peerEnabled { + mockPeer.remoteFeatures = []lnwire.FeatureBit{ + lnwire.UpfrontShutdownScriptOptional, + } + } + + // Set the command line option in config as needed. + cfg = &config{EnableUpfrontShutdown: test.localEnabled} + + addr, err := getUpfrontShutdownScript( + &mockPeer, test.upfrontScript, test.getScript, + ) + if err != test.expectedErr { + t.Fatalf("got: %v, expected error: %v", err, test.expectedErr) + } + + if !bytes.Equal(addr, test.expectedScript) { + t.Fatalf("expected address: %x, got: %x", + test.expectedScript, addr) + } + + }) + } +} diff --git a/fuzz/brontide/fuzz_utils.go b/fuzz/brontide/fuzz_utils.go new file mode 100644 index 0000000000..48388dc0d8 --- /dev/null +++ b/fuzz/brontide/fuzz_utils.go @@ -0,0 +1,137 @@ +// +build gofuzz + +package brontidefuzz + +import ( + "encoding/hex" + "fmt" + + "github.com/btcsuite/btcd/btcec" + "github.com/davecgh/go-spew/spew" + "github.com/lightningnetwork/lnd/brontide" +) + +var ( + initBytes = []byte{ + 0x81, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda, + 0x63, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17, + 0xd, 0xe7, 0x95, 0xe4, 0xb7, 0x25, 0xb8, 0x4d, + 0x1e, 0xb, 0x4c, 0xfd, 0x9e, 0xc5, 0x8c, 0xe9, + } + + respBytes = []byte{ + 0xaa, 0xb6, 0x37, 0xd9, 0xfc, 0xd2, 0xc6, 0xda, + 0x63, 0x59, 0xe6, 0x99, 0x31, 0x13, 0xa1, 0x17, + 0xd, 0xe7, 0x95, 0xe9, 0xb7, 0x25, 0xb8, 0x4d, + 0x1e, 0xb, 0x4c, 0xf9, 0x9e, 0xc5, 0x8c, 0xe9, + } + + // Returns the initiator's ephemeral private key. + initEphemeral = brontide.EphemeralGenerator(func() (*btcec.PrivateKey, error) { + e := "121212121212121212121212121212121212121212121212121212" + + "1212121212" + eBytes, err := hex.DecodeString(e) + if err != nil { + return nil, err + } + + priv, _ := btcec.PrivKeyFromBytes(btcec.S256(), eBytes) + return priv, nil + }) + + // Returns the responder's ephemeral private key. + respEphemeral = brontide.EphemeralGenerator(func() (*btcec.PrivateKey, error) { + e := "222222222222222222222222222222222222222222222222222" + + "2222222222222" + eBytes, err := hex.DecodeString(e) + if err != nil { + return nil, err + } + + priv, _ := btcec.PrivKeyFromBytes(btcec.S256(), eBytes) + return priv, nil + }) +) + +// completeHandshake takes two brontide machines (initiator, responder) +// and completes the brontide handshake between them. If any part of the +// handshake fails, this function will panic. +func completeHandshake(initiator, responder *brontide.Machine) { + if err := handshake(initiator, responder); err != nil { + nilAndPanic(initiator, responder, err) + } +} + +// handshake actually completes the brontide handshake and bubbles up +// an error to the calling function. +func handshake(initiator, responder *brontide.Machine) error { + // Generate ActOne and send to the responder. + actOne, err := initiator.GenActOne() + if err != nil { + return err + } + + if err := responder.RecvActOne(actOne); err != nil { + return err + } + + // Generate ActTwo and send to initiator. + actTwo, err := responder.GenActTwo() + if err != nil { + return err + } + + if err := initiator.RecvActTwo(actTwo); err != nil { + return err + } + + // Generate ActThree and send to responder. + actThree, err := initiator.GenActThree() + if err != nil { + return err + } + + return responder.RecvActThree(actThree) +} + +// nilAndPanic first nils the initiator and responder's Curve fields and then +// panics. +func nilAndPanic(initiator, responder *brontide.Machine, err error) { + if initiator != nil { + initiator.SetCurveToNil() + } + if responder != nil { + responder.SetCurveToNil() + } + panic(fmt.Errorf("error: %v, initiator: %v, responder: %v", err, + spew.Sdump(initiator), spew.Sdump(responder))) +} + +// getBrontideMachines returns two brontide machines that use random keys +// everywhere. +func getBrontideMachines() (*brontide.Machine, *brontide.Machine) { + initPriv, _ := btcec.NewPrivateKey(btcec.S256()) + respPriv, _ := btcec.NewPrivateKey(btcec.S256()) + respPub := (*btcec.PublicKey)(&respPriv.PublicKey) + + initiator := brontide.NewBrontideMachine(true, initPriv, respPub) + responder := brontide.NewBrontideMachine(false, respPriv, nil) + + return initiator, responder +} + +// getStaticBrontideMachines returns two brontide machines that use static keys +// everywhere. +func getStaticBrontideMachines() (*brontide.Machine, *brontide.Machine) { + initPriv, _ := btcec.PrivKeyFromBytes(btcec.S256(), initBytes) + respPriv, respPub := btcec.PrivKeyFromBytes(btcec.S256(), respBytes) + + initiator := brontide.NewBrontideMachine( + true, initPriv, respPub, initEphemeral, + ) + responder := brontide.NewBrontideMachine( + false, respPriv, nil, respEphemeral, + ) + + return initiator, responder +} diff --git a/fuzz/brontide/random_actone.go b/fuzz/brontide/random_actone.go new file mode 100644 index 0000000000..259d7d6ad1 --- /dev/null +++ b/fuzz/brontide/random_actone.go @@ -0,0 +1,30 @@ +// +build gofuzz + +package brontidefuzz + +import ( + "github.com/lightningnetwork/lnd/brontide" +) + +// Fuzz_random_actone is a go-fuzz harness for ActOne in the brontide +// handshake. +func Fuzz_random_actone(data []byte) int { + // Check if data is large enough. + if len(data) < brontide.ActOneSize { + return -1 + } + + // This will return brontide machines with random keys. + _, responder := getBrontideMachines() + + // Copy data into [ActOneSize]byte. + var actOne [brontide.ActOneSize]byte + copy(actOne[:], data) + + // Responder receives ActOne, should fail on the MAC check. + if err := responder.RecvActOne(actOne); err == nil { + nilAndPanic(nil, responder, nil) + } + + return 1 +} diff --git a/fuzz/brontide/random_actthree.go b/fuzz/brontide/random_actthree.go new file mode 100644 index 0000000000..38a3847ffb --- /dev/null +++ b/fuzz/brontide/random_actthree.go @@ -0,0 +1,50 @@ +// +build gofuzz + +package brontidefuzz + +import ( + "github.com/lightningnetwork/lnd/brontide" +) + +// Fuzz_random_actthree is a go-fuzz harness for ActThree in the brontide +// handshake. +func Fuzz_random_actthree(data []byte) int { + // Check if data is large enough. + if len(data) < brontide.ActThreeSize { + return -1 + } + + // This will return brontide machines with random keys. + initiator, responder := getBrontideMachines() + + // Generate ActOne and send to the responder. + actOne, err := initiator.GenActOne() + if err != nil { + nilAndPanic(initiator, responder, err) + } + + // Receiving ActOne should succeed, so we panic on error. + if err := responder.RecvActOne(actOne); err != nil { + nilAndPanic(initiator, responder, err) + } + + // Generate ActTwo - this is not sent to the initiator because nothing is + // done with the initiator after this point and it would slow down fuzzing. + // GenActTwo needs to be called to set the appropriate state in the + // responder machine. + _, err = responder.GenActTwo() + if err != nil { + nilAndPanic(initiator, responder, err) + } + + // Copy data into [ActThreeSize]byte. + var actThree [brontide.ActThreeSize]byte + copy(actThree[:], data) + + // Responder receives ActThree, should fail on the MAC check. + if err := responder.RecvActThree(actThree); err == nil { + nilAndPanic(initiator, responder, nil) + } + + return 1 +} diff --git a/fuzz/brontide/random_acttwo.go b/fuzz/brontide/random_acttwo.go new file mode 100644 index 0000000000..1cda2f9326 --- /dev/null +++ b/fuzz/brontide/random_acttwo.go @@ -0,0 +1,39 @@ +// +build gofuzz + +package brontidefuzz + +import ( + "github.com/lightningnetwork/lnd/brontide" +) + +// Fuzz_random_acttwo is a go-fuzz harness for ActTwo in the brontide +// handshake. +func Fuzz_random_acttwo(data []byte) int { + // Check if data is large enough. + if len(data) < brontide.ActTwoSize { + return -1 + } + + // This will return brontide machines with random keys. + initiator, _ := getBrontideMachines() + + // Generate ActOne - this isn't sent to the responder because nothing is + // done with the responder machine and this would slow down fuzzing. + // GenActOne needs to be called to set the appropriate state in the + // initiator machine. + _, err := initiator.GenActOne() + if err != nil { + nilAndPanic(initiator, nil, err) + } + + // Copy data into [ActTwoSize]byte. + var actTwo [brontide.ActTwoSize]byte + copy(actTwo[:], data) + + // Initiator receives ActTwo, should fail. + if err := initiator.RecvActTwo(actTwo); err == nil { + nilAndPanic(initiator, nil, nil) + } + + return 1 +} diff --git a/fuzz/brontide/random_init_decrypt.go b/fuzz/brontide/random_init_decrypt.go new file mode 100644 index 0000000000..3328a2b6be --- /dev/null +++ b/fuzz/brontide/random_init_decrypt.go @@ -0,0 +1,27 @@ +// +build gofuzz + +package brontidefuzz + +import ( + "bytes" +) + +// Fuzz_random_init_decrypt is a go-fuzz harness that decrypts arbitrary data +// with the initiator. +func Fuzz_random_init_decrypt(data []byte) int { + // This will return brontide machines with random keys. + initiator, responder := getBrontideMachines() + + // Complete the brontide handshake. + completeHandshake(initiator, responder) + + // Create a reader with the byte array. + r := bytes.NewReader(data) + + // Decrypt the encrypted message using ReadMessage w/ initiator machine. + if _, err := initiator.ReadMessage(r); err == nil { + nilAndPanic(initiator, responder, nil) + } + + return 1 +} diff --git a/fuzz/brontide/random_init_enc_dec.go b/fuzz/brontide/random_init_enc_dec.go new file mode 100644 index 0000000000..17f5583f6d --- /dev/null +++ b/fuzz/brontide/random_init_enc_dec.go @@ -0,0 +1,48 @@ +// +build gofuzz + +package brontidefuzz + +import ( + "bytes" + "math" +) + +// Fuzz_random_init_enc_dec is a go-fuzz harness that tests round-trip +// encryption and decryption between the initiator and the responder. +func Fuzz_random_init_enc_dec(data []byte) int { + // Ensure that length of message is not greater than max allowed size. + if len(data) > math.MaxUint16 { + return 0 + } + + // This will return brontide machines with random keys. + initiator, responder := getBrontideMachines() + + // Complete the brontide handshake. + completeHandshake(initiator, responder) + + var b bytes.Buffer + + // Encrypt the message using WriteMessage w/ initiator machine. + if err := initiator.WriteMessage(data); err != nil { + nilAndPanic(initiator, responder, err) + } + + // Flush the encrypted message w/ initiator machine. + if _, err := initiator.Flush(&b); err != nil { + nilAndPanic(initiator, responder, err) + } + + // Decrypt the ciphertext using ReadMessage w/ responder machine. + plaintext, err := responder.ReadMessage(&b) + if err != nil { + nilAndPanic(initiator, responder, err) + } + + // Check that the decrypted message and the original message are equal. + if !bytes.Equal(data, plaintext) { + nilAndPanic(initiator, responder, nil) + } + + return 1 +} diff --git a/fuzz/brontide/random_init_encrypt.go b/fuzz/brontide/random_init_encrypt.go new file mode 100644 index 0000000000..c041429e89 --- /dev/null +++ b/fuzz/brontide/random_init_encrypt.go @@ -0,0 +1,37 @@ +// +build gofuzz + +package brontidefuzz + +import ( + "bytes" + "math" +) + +// Fuzz_random_init_encrypt is a go-fuzz harness that encrypts arbitrary data +// with the initiator. +func Fuzz_random_init_encrypt(data []byte) int { + // Ensure that length of message is not greater than max allowed size. + if len(data) > math.MaxUint16 { + return 0 + } + + // This will return brontide machines with random keys. + initiator, responder := getBrontideMachines() + + // Complete the brontide handshake. + completeHandshake(initiator, responder) + + var b bytes.Buffer + + // Encrypt the message using WriteMessage w/ initiator machine. + if err := initiator.WriteMessage(data); err != nil { + nilAndPanic(initiator, responder, err) + } + + // Flush the encrypted message w/ initiator machine. + if _, err := initiator.Flush(&b); err != nil { + nilAndPanic(initiator, responder, err) + } + + return 1 +} diff --git a/fuzz/brontide/random_resp_decrypt.go b/fuzz/brontide/random_resp_decrypt.go new file mode 100644 index 0000000000..1ae40bd27f --- /dev/null +++ b/fuzz/brontide/random_resp_decrypt.go @@ -0,0 +1,27 @@ +// +build gofuzz + +package brontidefuzz + +import ( + "bytes" +) + +// Fuzz_random_resp_decrypt is a go-fuzz harness that decrypts arbitrary data +// with the responder. +func Fuzz_random_resp_decrypt(data []byte) int { + // This will return brontide machines with random keys. + initiator, responder := getBrontideMachines() + + // Complete the brontide handshake. + completeHandshake(initiator, responder) + + // Create a reader with the byte array. + r := bytes.NewReader(data) + + // Decrypt the encrypted message using ReadMessage w/ responder machine. + if _, err := responder.ReadMessage(r); err == nil { + nilAndPanic(initiator, responder, nil) + } + + return 1 +} diff --git a/fuzz/brontide/random_resp_enc_dec.go b/fuzz/brontide/random_resp_enc_dec.go new file mode 100644 index 0000000000..e2fad9a58b --- /dev/null +++ b/fuzz/brontide/random_resp_enc_dec.go @@ -0,0 +1,48 @@ +// +build gofuzz + +package brontidefuzz + +import ( + "bytes" + "math" +) + +// Fuzz_random_resp_enc_dec is a go-fuzz harness that tests round-trip +// encryption and decryption between the responder and the initiator. +func Fuzz_random_resp_enc_dec(data []byte) int { + // Ensure that length of message is not greater than max allowed size. + if len(data) > math.MaxUint16 { + return 0 + } + + // This will return brontide machines with random keys. + initiator, responder := getBrontideMachines() + + // Complete the brontide handshake. + completeHandshake(initiator, responder) + + var b bytes.Buffer + + // Encrypt the message using WriteMessage w/ responder machine. + if err := responder.WriteMessage(data); err != nil { + nilAndPanic(initiator, responder, err) + } + + // Flush the encrypted message w/ responder machine. + if _, err := responder.Flush(&b); err != nil { + nilAndPanic(initiator, responder, err) + } + + // Decrypt the ciphertext using ReadMessage w/ initiator machine. + plaintext, err := initiator.ReadMessage(&b) + if err != nil { + nilAndPanic(initiator, responder, err) + } + + // Check that the decrypted message and the original message are equal. + if !bytes.Equal(data, plaintext) { + nilAndPanic(initiator, responder, nil) + } + + return 1 +} diff --git a/fuzz/brontide/random_resp_encrypt.go b/fuzz/brontide/random_resp_encrypt.go new file mode 100644 index 0000000000..691bcff50a --- /dev/null +++ b/fuzz/brontide/random_resp_encrypt.go @@ -0,0 +1,37 @@ +// +build gofuzz + +package brontidefuzz + +import ( + "bytes" + "math" +) + +// Fuzz_random_resp_encrypt is a go-fuzz harness that encrypts arbitrary data +// with the responder. +func Fuzz_random_resp_encrypt(data []byte) int { + // Ensure that length of message is not greater than max allowed size. + if len(data) > math.MaxUint16 { + return 0 + } + + // This will return brontide machines with random keys. + initiator, responder := getBrontideMachines() + + // Complete the brontide handshake. + completeHandshake(initiator, responder) + + var b bytes.Buffer + + // Encrypt the message using WriteMessage w/ responder machine. + if err := responder.WriteMessage(data); err != nil { + nilAndPanic(initiator, responder, err) + } + + // Flush the encrypted message w/ responder machine. + if _, err := responder.Flush(&b); err != nil { + nilAndPanic(initiator, responder, err) + } + + return 1 +} diff --git a/fuzz/brontide/static_actone.go b/fuzz/brontide/static_actone.go new file mode 100644 index 0000000000..7c351680f1 --- /dev/null +++ b/fuzz/brontide/static_actone.go @@ -0,0 +1,30 @@ +// +build gofuzz + +package brontidefuzz + +import ( + "github.com/lightningnetwork/lnd/brontide" +) + +// Fuzz_static_actone is a go-fuzz harness for ActOne in the brontide +// handshake. +func Fuzz_static_actone(data []byte) int { + // Check if data is large enough. + if len(data) < brontide.ActOneSize { + return -1 + } + + // This will return brontide machines with static keys. + _, responder := getStaticBrontideMachines() + + // Copy data into [ActOneSize]byte. + var actOne [brontide.ActOneSize]byte + copy(actOne[:], data) + + // Responder receives ActOne, should fail. + if err := responder.RecvActOne(actOne); err == nil { + nilAndPanic(nil, responder, nil) + } + + return 1 +} diff --git a/fuzz/brontide/static_actthree.go b/fuzz/brontide/static_actthree.go new file mode 100644 index 0000000000..3f4878bfac --- /dev/null +++ b/fuzz/brontide/static_actthree.go @@ -0,0 +1,50 @@ +// +build gofuzz + +package brontidefuzz + +import ( + "github.com/lightningnetwork/lnd/brontide" +) + +// Fuzz_static_actthree is a go-fuzz harness for ActThree in the brontide +// handshake. +func Fuzz_static_actthree(data []byte) int { + // Check if data is large enough. + if len(data) < brontide.ActThreeSize { + return -1 + } + + // This will return brontide machines with static keys. + initiator, responder := getStaticBrontideMachines() + + // Generate ActOne and send to the responder. + actOne, err := initiator.GenActOne() + if err != nil { + nilAndPanic(initiator, responder, err) + } + + // Receiving ActOne should succeed, so we panic on error. + if err := responder.RecvActOne(actOne); err != nil { + nilAndPanic(initiator, responder, err) + } + + // Generate ActTwo - this is not sent to the initiator because nothing is + // done with the initiator after this point and it would slow down fuzzing. + // GenActTwo needs to be called to set the appropriate state in the responder + // machine. + _, err = responder.GenActTwo() + if err != nil { + nilAndPanic(initiator, responder, err) + } + + // Copy data into [ActThreeSize]byte. + var actThree [brontide.ActThreeSize]byte + copy(actThree[:], data) + + // Responder receives ActThree, should fail. + if err := responder.RecvActThree(actThree); err == nil { + nilAndPanic(initiator, responder, nil) + } + + return 1 +} diff --git a/fuzz/brontide/static_acttwo.go b/fuzz/brontide/static_acttwo.go new file mode 100644 index 0000000000..4a2b094f7d --- /dev/null +++ b/fuzz/brontide/static_acttwo.go @@ -0,0 +1,39 @@ +// +build gofuzz + +package brontidefuzz + +import ( + "github.com/lightningnetwork/lnd/brontide" +) + +// Fuzz_static_acttwo is a go-fuzz harness for ActTwo in the brontide +// handshake. +func Fuzz_static_acttwo(data []byte) int { + // Check if data is large enough. + if len(data) < brontide.ActTwoSize { + return -1 + } + + // This will return brontide machines with static keys. + initiator, _ := getStaticBrontideMachines() + + // Generate ActOne - this isn't sent to the responder because nothing is + // done with the responder machine and this would slow down fuzzing. + // GenActOne needs to be called to set the appropriate state in the initiator + // machine. + _, err := initiator.GenActOne() + if err != nil { + nilAndPanic(initiator, nil, err) + } + + // Copy data into [ActTwoSize]byte. + var actTwo [brontide.ActTwoSize]byte + copy(actTwo[:], data) + + // Initiator receives ActTwo, should fail. + if err := initiator.RecvActTwo(actTwo); err == nil { + nilAndPanic(initiator, nil, nil) + } + + return 1 +} diff --git a/fuzz/brontide/static_init_decrypt.go b/fuzz/brontide/static_init_decrypt.go new file mode 100644 index 0000000000..35525d203a --- /dev/null +++ b/fuzz/brontide/static_init_decrypt.go @@ -0,0 +1,27 @@ +// +build gofuzz + +package brontidefuzz + +import ( + "bytes" +) + +// Fuzz_static_init_decrypt is a go-fuzz harness that decrypts arbitrary data +// with the initiator. +func Fuzz_static_init_decrypt(data []byte) int { + // This will return brontide machines with static keys. + initiator, responder := getStaticBrontideMachines() + + // Complete the brontide handshake. + completeHandshake(initiator, responder) + + // Create a reader with the byte array. + r := bytes.NewReader(data) + + // Decrypt the encrypted message using ReadMessage w/ initiator machine. + if _, err := initiator.ReadMessage(r); err == nil { + nilAndPanic(initiator, responder, nil) + } + + return 1 +} diff --git a/fuzz/brontide/static_init_enc_dec.go b/fuzz/brontide/static_init_enc_dec.go new file mode 100644 index 0000000000..a333c3f4fc --- /dev/null +++ b/fuzz/brontide/static_init_enc_dec.go @@ -0,0 +1,49 @@ +// +build gofuzz + +package brontidefuzz + +import ( + "bytes" + "math" +) + +// Fuzz_static_init_enc_dec is a go-fuzz harness that tests round-trip +// encryption and decryption +// between the initiator and the responder. +func Fuzz_static_init_enc_dec(data []byte) int { + // Ensure that length of message is not greater than max allowed size. + if len(data) > math.MaxUint16 { + return 0 + } + + // This will return brontide machines with static keys. + initiator, responder := getStaticBrontideMachines() + + // Complete the brontide handshake. + completeHandshake(initiator, responder) + + var b bytes.Buffer + + // Encrypt the message using WriteMessage w/ initiator machine. + if err := initiator.WriteMessage(data); err != nil { + nilAndPanic(initiator, responder, err) + } + + // Flush the encrypted message w/ initiator machine. + if _, err := initiator.Flush(&b); err != nil { + nilAndPanic(initiator, responder, err) + } + + // Decrypt the ciphertext using ReadMessage w/ responder machine. + plaintext, err := responder.ReadMessage(&b) + if err != nil { + nilAndPanic(initiator, responder, err) + } + + // Check that the decrypted message and the original message are equal. + if !bytes.Equal(data, plaintext) { + nilAndPanic(initiator, responder, nil) + } + + return 1 +} diff --git a/fuzz/brontide/static_init_encrypt.go b/fuzz/brontide/static_init_encrypt.go new file mode 100644 index 0000000000..96040e7457 --- /dev/null +++ b/fuzz/brontide/static_init_encrypt.go @@ -0,0 +1,37 @@ +// +build gofuzz + +package brontidefuzz + +import ( + "bytes" + "math" +) + +// Fuzz_static_init_encrypt is a go-fuzz harness that encrypts arbitrary data +// with the initiator. +func Fuzz_static_init_encrypt(data []byte) int { + // Ensure that length of message is not greater than max allowed size. + if len(data) > math.MaxUint16 { + return 0 + } + + // This will return brontide machines with static keys. + initiator, responder := getStaticBrontideMachines() + + // Complete the brontide handshake. + completeHandshake(initiator, responder) + + var b bytes.Buffer + + // Encrypt the message using WriteMessage w/ initiator machine. + if err := initiator.WriteMessage(data); err != nil { + nilAndPanic(initiator, responder, err) + } + + // Flush the encrypted message w/ initiator machine. + if _, err := initiator.Flush(&b); err != nil { + nilAndPanic(initiator, responder, err) + } + + return 1 +} diff --git a/fuzz/brontide/static_resp_decrypt.go b/fuzz/brontide/static_resp_decrypt.go new file mode 100644 index 0000000000..fee4500b50 --- /dev/null +++ b/fuzz/brontide/static_resp_decrypt.go @@ -0,0 +1,27 @@ +// +build gofuzz + +package brontidefuzz + +import ( + "bytes" +) + +// Fuzz_static_resp_decrypt is a go-fuzz harness that decrypts arbitrary data +// with the responder. +func Fuzz_static_resp_decrypt(data []byte) int { + // This will return brontide machines with static keys. + initiator, responder := getStaticBrontideMachines() + + // Complete the brontide handshake. + completeHandshake(initiator, responder) + + // Create a reader with the byte array. + r := bytes.NewReader(data) + + // Decrypt the encrypted message using ReadMessage w/ responder machine. + if _, err := responder.ReadMessage(r); err == nil { + nilAndPanic(initiator, responder, nil) + } + + return 1 +} diff --git a/fuzz/brontide/static_resp_enc_dec.go b/fuzz/brontide/static_resp_enc_dec.go new file mode 100644 index 0000000000..b9a3ad3946 --- /dev/null +++ b/fuzz/brontide/static_resp_enc_dec.go @@ -0,0 +1,48 @@ +// +build gofuzz + +package brontidefuzz + +import ( + "bytes" + "math" +) + +// Fuzz_static_resp_enc_dec is a go-fuzz harness that tests round-trip +// encryption and decryption between the responder and the initiator. +func Fuzz_static_resp_enc_dec(data []byte) int { + // Ensure that length of message is not greater than max allowed size. + if len(data) > math.MaxUint16 { + return 0 + } + + // This will return brontide machines with static keys. + initiator, responder := getStaticBrontideMachines() + + // Complete the brontide handshake. + completeHandshake(initiator, responder) + + var b bytes.Buffer + + // Encrypt the message using WriteMessage w/ responder machine. + if err := responder.WriteMessage(data); err != nil { + nilAndPanic(initiator, responder, err) + } + + // Flush the encrypted message w/ responder machine. + if _, err := responder.Flush(&b); err != nil { + nilAndPanic(initiator, responder, err) + } + + // Decrypt the ciphertext using ReadMessage w/ initiator machine. + plaintext, err := initiator.ReadMessage(&b) + if err != nil { + nilAndPanic(initiator, responder, err) + } + + // Check that the decrypted message and the original message are equal. + if !bytes.Equal(data, plaintext) { + nilAndPanic(initiator, responder, nil) + } + + return 1 +} diff --git a/fuzz/brontide/static_resp_encrypt.go b/fuzz/brontide/static_resp_encrypt.go new file mode 100644 index 0000000000..b97a0390b5 --- /dev/null +++ b/fuzz/brontide/static_resp_encrypt.go @@ -0,0 +1,37 @@ +// +build gofuzz + +package brontidefuzz + +import ( + "bytes" + "math" +) + +// Fuzz_static_resp_encrypt is a go-fuzz harness that encrypts arbitrary data +// with the responder. +func Fuzz_static_resp_encrypt(data []byte) int { + // Ensure that length of message is not greater than max allowed size. + if len(data) > math.MaxUint16 { + return 0 + } + + // This will return brontide machines with static keys. + initiator, responder := getStaticBrontideMachines() + + // Complete the brontide handshake. + completeHandshake(initiator, responder) + + var b bytes.Buffer + + // Encrypt the message using WriteMessage w/ responder machine. + if err := responder.WriteMessage(data); err != nil { + nilAndPanic(initiator, responder, err) + } + + // Flush the encrypted message w/ responder machine. + if _, err := responder.Flush(&b); err != nil { + nilAndPanic(initiator, responder, err) + } + + return 1 +} diff --git a/fuzz/lnwire/accept_channel.go b/fuzz/lnwire/accept_channel.go new file mode 100644 index 0000000000..1f70282508 --- /dev/null +++ b/fuzz/lnwire/accept_channel.go @@ -0,0 +1,135 @@ +// +build gofuzz + +package lnwirefuzz + +import ( + "bytes" + + "github.com/lightningnetwork/lnd/lnwire" +) + +// Fuzz_accept_channel is used by go-fuzz. +func Fuzz_accept_channel(data []byte) int { + // Prefix with MsgAcceptChannel. + data = prefixWithMsgType(data, lnwire.MsgAcceptChannel) + + // Create an empty message so that the FuzzHarness func can check + // if the max payload constraint is violated. + emptyMsg := lnwire.AcceptChannel{} + + // We have to do this here instead of in fuzz.Harness so that + // reflect.DeepEqual isn't called. Because of the UpfrontShutdownScript + // encoding, the first message and second message aren't deeply equal since + // the first has a nil slice and the other has an empty slice. + + // Create a reader with the byte array. + r := bytes.NewReader(data) + + // Make sure byte array length (excluding 2 bytes for message type) is + // less than max payload size for the wire message. We check this because + // otherwise `go-fuzz` will keep creating inputs that crash on ReadMessage + // due to a large message size. + payloadLen := uint32(len(data)) - 2 + if payloadLen > emptyMsg.MaxPayloadLength(0) { + // Ignore this input - max payload constraint violated. + return -1 + } + + msg, err := lnwire.ReadMessage(r, 0) + if err != nil { + // go-fuzz generated []byte that cannot be represented as a + // wire message but we will return 0 so go-fuzz can modify the + // input. + return 0 + } + + // We will serialize the message into a new bytes buffer. + var b bytes.Buffer + if _, err := lnwire.WriteMessage(&b, msg, 0); err != nil { + // Could not serialize message into bytes buffer, panic + panic(err) + } + + // Deserialize the message from the serialized bytes buffer, and then + // assert that the original message is equal to the newly deserialized + // message. + newMsg, err := lnwire.ReadMessage(&b, 0) + if err != nil { + // Could not deserialize message from bytes buffer, panic + panic(err) + } + + // Now compare every field instead of using reflect.DeepEqual. + // For UpfrontShutdownScript, we only compare bytes. This probably takes + // up more branches than necessary, but that's fine for now. + var shouldPanic bool + first := msg.(*lnwire.AcceptChannel) + second := newMsg.(*lnwire.AcceptChannel) + + if !bytes.Equal(first.PendingChannelID[:], second.PendingChannelID[:]) { + shouldPanic = true + } + + if first.DustLimit != second.DustLimit { + shouldPanic = true + } + + if first.MaxValueInFlight != second.MaxValueInFlight { + shouldPanic = true + } + + if first.ChannelReserve != second.ChannelReserve { + shouldPanic = true + } + + if first.HtlcMinimum != second.HtlcMinimum { + shouldPanic = true + } + + if first.MinAcceptDepth != second.MinAcceptDepth { + shouldPanic = true + } + + if first.CsvDelay != second.CsvDelay { + shouldPanic = true + } + + if first.MaxAcceptedHTLCs != second.MaxAcceptedHTLCs { + shouldPanic = true + } + + if !first.FundingKey.IsEqual(second.FundingKey) { + shouldPanic = true + } + + if !first.RevocationPoint.IsEqual(second.RevocationPoint) { + shouldPanic = true + } + + if !first.PaymentPoint.IsEqual(second.PaymentPoint) { + shouldPanic = true + } + + if !first.DelayedPaymentPoint.IsEqual(second.DelayedPaymentPoint) { + shouldPanic = true + } + + if !first.HtlcPoint.IsEqual(second.HtlcPoint) { + shouldPanic = true + } + + if !first.FirstCommitmentPoint.IsEqual(second.FirstCommitmentPoint) { + shouldPanic = true + } + + if !bytes.Equal(first.UpfrontShutdownScript, second.UpfrontShutdownScript) { + shouldPanic = true + } + + if shouldPanic { + panic("original message and deserialized message are not equal") + } + + // Add this input to the corpus. + return 1 +} diff --git a/fuzz/lnwire/announce_signatures.go b/fuzz/lnwire/announce_signatures.go new file mode 100644 index 0000000000..048cf5abe5 --- /dev/null +++ b/fuzz/lnwire/announce_signatures.go @@ -0,0 +1,20 @@ +// +build gofuzz + +package lnwirefuzz + +import ( + "github.com/lightningnetwork/lnd/lnwire" +) + +// Fuzz_announce_signatures is used by go-fuzz. +func Fuzz_announce_signatures(data []byte) int { + // Prefix with MsgAnnounceSignatures. + data = prefixWithMsgType(data, lnwire.MsgAnnounceSignatures) + + // Create an empty message so that the FuzzHarness func can check + // if the max payload constraint is violated. + emptyMsg := lnwire.AnnounceSignatures{} + + // Pass the message into our general fuzz harness for wire messages! + return harness(data, &emptyMsg) +} diff --git a/fuzz/lnwire/channel_announcement.go b/fuzz/lnwire/channel_announcement.go new file mode 100644 index 0000000000..771df5d1ca --- /dev/null +++ b/fuzz/lnwire/channel_announcement.go @@ -0,0 +1,20 @@ +// +build gofuzz + +package lnwirefuzz + +import ( + "github.com/lightningnetwork/lnd/lnwire" +) + +// Fuzz_channel_announcement is used by go-fuzz. +func Fuzz_channel_announcement(data []byte) int { + // Prefix with MsgChannelAnnouncement. + data = prefixWithMsgType(data, lnwire.MsgChannelAnnouncement) + + // Create an empty message so that the FuzzHarness func can check + // if the max payload constraint is violated. + emptyMsg := lnwire.ChannelAnnouncement{} + + // Pass the message into our general fuzz harness for wire messages! + return harness(data, &emptyMsg) +} diff --git a/fuzz/lnwire/channel_reestablish.go b/fuzz/lnwire/channel_reestablish.go new file mode 100644 index 0000000000..08cca9a560 --- /dev/null +++ b/fuzz/lnwire/channel_reestablish.go @@ -0,0 +1,20 @@ +// +build gofuzz + +package lnwirefuzz + +import ( + "github.com/lightningnetwork/lnd/lnwire" +) + +// Fuzz_channel_reestablish is used by go-fuzz. +func Fuzz_channel_reestablish(data []byte) int { + // Prefix with MsgChannelReestablish. + data = prefixWithMsgType(data, lnwire.MsgChannelReestablish) + + // Create an empty message so that the FuzzHarness func can check + // if the max payload constraint is violated. + emptyMsg := lnwire.ChannelReestablish{} + + // Pass the message into our general fuzz harness for wire messages! + return harness(data, &emptyMsg) +} diff --git a/fuzz/lnwire/channel_update.go b/fuzz/lnwire/channel_update.go new file mode 100644 index 0000000000..993181f7be --- /dev/null +++ b/fuzz/lnwire/channel_update.go @@ -0,0 +1,20 @@ +// +build gofuzz + +package lnwirefuzz + +import ( + "github.com/lightningnetwork/lnd/lnwire" +) + +// Fuzz_channel_update is used by go-fuzz. +func Fuzz_channel_update(data []byte) int { + // Prefix with MsgChannelUpdate. + data = prefixWithMsgType(data, lnwire.MsgChannelUpdate) + + // Create an empty message so that the FuzzHarness func can check + // if the max payload constraint is violated. + emptyMsg := lnwire.ChannelUpdate{} + + // Pass the message into our general fuzz harness for wire messages! + return harness(data, &emptyMsg) +} diff --git a/fuzz/lnwire/closing_signed.go b/fuzz/lnwire/closing_signed.go new file mode 100644 index 0000000000..b6898a7cb8 --- /dev/null +++ b/fuzz/lnwire/closing_signed.go @@ -0,0 +1,20 @@ +// +build gofuzz + +package lnwirefuzz + +import ( + "github.com/lightningnetwork/lnd/lnwire" +) + +// Fuzz_closing_signed is used by go-fuzz. +func Fuzz_closing_signed(data []byte) int { + // Prefix with MsgClosingSigned. + data = prefixWithMsgType(data, lnwire.MsgClosingSigned) + + // Create an empty message so that the FuzzHarness func can check + // if the max payload constraint is violated. + emptyMsg := lnwire.ClosingSigned{} + + // Pass the message into our general fuzz harness for wire messages! + return harness(data, &emptyMsg) +} diff --git a/fuzz/lnwire/commit_sig.go b/fuzz/lnwire/commit_sig.go new file mode 100644 index 0000000000..6f9c76ec10 --- /dev/null +++ b/fuzz/lnwire/commit_sig.go @@ -0,0 +1,20 @@ +// +build gofuzz + +package lnwirefuzz + +import ( + "github.com/lightningnetwork/lnd/lnwire" +) + +// Fuzz_commit_sig is used by go-fuzz. +func Fuzz_commit_sig(data []byte) int { + // Prefix with MsgCommitSig. + data = prefixWithMsgType(data, lnwire.MsgCommitSig) + + // Create an empty message so that the FuzzHarness func can check + // if the max payload constraint is violated. + emptyMsg := lnwire.CommitSig{} + + // Pass the message into our general fuzz harness for wire messages! + return harness(data, &emptyMsg) +} diff --git a/fuzz/lnwire/error.go b/fuzz/lnwire/error.go new file mode 100644 index 0000000000..8b5dd671a1 --- /dev/null +++ b/fuzz/lnwire/error.go @@ -0,0 +1,20 @@ +// +build gofuzz + +package lnwirefuzz + +import ( + "github.com/lightningnetwork/lnd/lnwire" +) + +// Fuzz_error is used by go-fuzz. +func Fuzz_error(data []byte) int { + // Prefix with MsgError. + data = prefixWithMsgType(data, lnwire.MsgError) + + // Create an empty message so that the FuzzHarness func can check + // if the max payload constraint is violated. + emptyMsg := lnwire.Error{} + + // Pass the message into our general fuzz harness for wire messages! + return harness(data, &emptyMsg) +} diff --git a/fuzz/lnwire/funding_created.go b/fuzz/lnwire/funding_created.go new file mode 100644 index 0000000000..ffdb3390a2 --- /dev/null +++ b/fuzz/lnwire/funding_created.go @@ -0,0 +1,20 @@ +// +build gofuzz + +package lnwirefuzz + +import ( + "github.com/lightningnetwork/lnd/lnwire" +) + +// Fuzz_funding_created is used by go-fuzz. +func Fuzz_funding_created(data []byte) int { + // Prefix with MsgFundingCreated. + data = prefixWithMsgType(data, lnwire.MsgFundingCreated) + + // Create an empty message so that the FuzzHarness func can check + // if the max payload constraint is violated. + emptyMsg := lnwire.FundingCreated{} + + // Pass the message into our general fuzz harness for wire messages! + return harness(data, &emptyMsg) +} diff --git a/fuzz/lnwire/funding_locked.go b/fuzz/lnwire/funding_locked.go new file mode 100644 index 0000000000..aa5d2c26d2 --- /dev/null +++ b/fuzz/lnwire/funding_locked.go @@ -0,0 +1,20 @@ +// +build gofuzz + +package lnwirefuzz + +import ( + "github.com/lightningnetwork/lnd/lnwire" +) + +// Fuzz_funding_locked is used by go-fuzz. +func Fuzz_funding_locked(data []byte) int { + // Prefix with MsgFundingLocked. + data = prefixWithMsgType(data, lnwire.MsgFundingLocked) + + // Create an empty message so that the FuzzHarness func can check + // if the max payload constraint is violated. + emptyMsg := lnwire.FundingLocked{} + + // Pass the message into our general fuzz harness for wire messages! + return harness(data, &emptyMsg) +} diff --git a/fuzz/lnwire/funding_signed.go b/fuzz/lnwire/funding_signed.go new file mode 100644 index 0000000000..47f2bfd061 --- /dev/null +++ b/fuzz/lnwire/funding_signed.go @@ -0,0 +1,20 @@ +// +build gofuzz + +package lnwirefuzz + +import ( + "github.com/lightningnetwork/lnd/lnwire" +) + +// Fuzz_funding_signed is used by go-fuzz. +func Fuzz_funding_signed(data []byte) int { + // Prefix with MsgFundingSigned. + prefixWithMsgType(data, lnwire.MsgFundingSigned) + + // Create an empty message so that the FuzzHarness func can check + // if the max payload constraint is violated. + emptyMsg := lnwire.FundingSigned{} + + // Pass the message into our general fuzz harness for wire messages! + return harness(data, &emptyMsg) +} diff --git a/fuzz/lnwire/fuzz_utils.go b/fuzz/lnwire/fuzz_utils.go new file mode 100644 index 0000000000..767f602abb --- /dev/null +++ b/fuzz/lnwire/fuzz_utils.go @@ -0,0 +1,72 @@ +// +build gofuzz + +package lnwirefuzz + +import ( + "bytes" + "encoding/binary" + "reflect" + + "github.com/lightningnetwork/lnd/lnwire" +) + +// prefixWithMsgType takes []byte and adds a wire protocol prefix +// to make the []byte into an actual message to be used in fuzzing. +func prefixWithMsgType(data []byte, prefix lnwire.MessageType) []byte { + var prefixBytes [2]byte + binary.BigEndian.PutUint16(prefixBytes[:], uint16(prefix)) + data = append(prefixBytes[:], data...) + return data +} + +// harness performs the actual fuzz testing of the appropriate wire message. +// This function will check that the passed-in message passes wire length checks, +// is a valid message once deserialized, and passes a sequence of serialization +// and deserialization checks. Returns an int that determines whether the input +// is unique or not. +func harness(data []byte, emptyMsg lnwire.Message) int { + // Create a reader with the byte array. + r := bytes.NewReader(data) + + // Make sure byte array length (excluding 2 bytes for message type) is + // less than max payload size for the wire message. We check this because + // otherwise `go-fuzz` will keep creating inputs that crash on ReadMessage + // due to a large message size. + payloadLen := uint32(len(data)) - 2 + if payloadLen > emptyMsg.MaxPayloadLength(0) { + // Ignore this input - max payload constraint violated. + return -1 + } + + msg, err := lnwire.ReadMessage(r, 0) + if err != nil { + // go-fuzz generated []byte that cannot be represented as a + // wire message but we will return 0 so go-fuzz can modify the + // input. + return 0 + } + + // We will serialize the message into a new bytes buffer. + var b bytes.Buffer + if _, err := lnwire.WriteMessage(&b, msg, 0); err != nil { + // Could not serialize message into bytes buffer, panic + panic(err) + } + + // Deserialize the message from the serialized bytes buffer, and then + // assert that the original message is equal to the newly deserialized + // message. + newMsg, err := lnwire.ReadMessage(&b, 0) + if err != nil { + // Could not deserialize message from bytes buffer, panic + panic(err) + } + + if !reflect.DeepEqual(msg, newMsg) { + // Deserialized message and original message are not deeply equal. + panic("original message and deserialized message are not deeply equal") + } + + // Add this input to the corpus. + return 1 +} diff --git a/fuzz/lnwire/gossip_timestamp_range.go b/fuzz/lnwire/gossip_timestamp_range.go new file mode 100644 index 0000000000..9e5e4f11aa --- /dev/null +++ b/fuzz/lnwire/gossip_timestamp_range.go @@ -0,0 +1,20 @@ +// +build gofuzz + +package lnwirefuzz + +import ( + "github.com/lightningnetwork/lnd/lnwire" +) + +// Fuzz_gossip_timestamp_range is used by go-fuzz. +func Fuzz_gossip_timestamp_range(data []byte) int { + // Prefix with MsgGossipTimestampRange. + data = prefixWithMsgType(data, lnwire.MsgGossipTimestampRange) + + // Create an empty message so that the FuzzHarness func can check + // if the max payload constraint is violated. + emptyMsg := lnwire.GossipTimestampRange{} + + // Pass the message into our general fuzz harness for wire messages! + return harness(data, &emptyMsg) +} diff --git a/fuzz/lnwire/init.go b/fuzz/lnwire/init.go new file mode 100644 index 0000000000..b2be1aba3e --- /dev/null +++ b/fuzz/lnwire/init.go @@ -0,0 +1,20 @@ +// +build gofuzz + +package lnwirefuzz + +import ( + "github.com/lightningnetwork/lnd/lnwire" +) + +// Fuzz_init is used by go-fuzz. +func Fuzz_init(data []byte) int { + // Prefix with MsgInit. + data = prefixWithMsgType(data, lnwire.MsgInit) + + // Create an empty message so that the FuzzHarness func can check + // if the max payload constraint is violated. + emptyMsg := lnwire.Init{} + + // Pass the message into our general fuzz harness for wire messages! + return harness(data, &emptyMsg) +} diff --git a/fuzz/lnwire/node_announcement.go b/fuzz/lnwire/node_announcement.go new file mode 100644 index 0000000000..76eabe351c --- /dev/null +++ b/fuzz/lnwire/node_announcement.go @@ -0,0 +1,112 @@ +// +build gofuzz + +package lnwirefuzz + +import ( + "bytes" + "reflect" + + "github.com/lightningnetwork/lnd/lnwire" +) + +// Fuzz_node_announcement is used by go-fuzz. +func Fuzz_node_announcement(data []byte) int { + // Prefix with MsgNodeAnnouncement. + data = prefixWithMsgType(data, lnwire.MsgNodeAnnouncement) + + // Create an empty message so that the FuzzHarness func can check + // if the max payload constraint is violated. + emptyMsg := lnwire.NodeAnnouncement{} + + // We have to do this here instead of in fuzz.Harness so that + // reflect.DeepEqual isn't called. Address (de)serialization messes up + // the fuzzing assertions. + + // Create a reader with the byte array. + r := bytes.NewReader(data) + + // Make sure byte array length (excluding 2 bytes for message type) is + // less than max payload size for the wire message. We check this because + // otherwise `go-fuzz` will keep creating inputs that crash on ReadMessage + // due to a large message size. + payloadLen := uint32(len(data)) - 2 + if payloadLen > emptyMsg.MaxPayloadLength(0) { + // Ignore this input - max payload constraint violated. + return -1 + } + + msg, err := lnwire.ReadMessage(r, 0) + if err != nil { + // go-fuzz generated []byte that cannot be represented as a + // wire message but we will return 0 so go-fuzz can modify the + // input. + return 0 + } + + // We will serialize the message into a new bytes buffer. + var b bytes.Buffer + if _, err := lnwire.WriteMessage(&b, msg, 0); err != nil { + // Could not serialize message into bytes buffer, panic + panic(err) + } + + // Deserialize the message from the serialized bytes buffer, and then + // assert that the original message is equal to the newly deserialized + // message. + newMsg, err := lnwire.ReadMessage(&b, 0) + if err != nil { + // Could not deserialize message from bytes buffer, panic + panic(err) + } + + // Now compare every field instead of using reflect.DeepEqual for the + // Addresses field. + var shouldPanic bool + first := msg.(*lnwire.NodeAnnouncement) + second := newMsg.(*lnwire.NodeAnnouncement) + if !bytes.Equal(first.Signature[:], second.Signature[:]) { + shouldPanic = true + } + + if !reflect.DeepEqual(first.Features, second.Features) { + shouldPanic = true + } + + if first.Timestamp != second.Timestamp { + shouldPanic = true + } + + if !bytes.Equal(first.NodeID[:], second.NodeID[:]) { + shouldPanic = true + } + + if !reflect.DeepEqual(first.RGBColor, second.RGBColor) { + shouldPanic = true + } + + if !bytes.Equal(first.Alias[:], second.Alias[:]) { + shouldPanic = true + } + + if len(first.Addresses) != len(second.Addresses) { + shouldPanic = true + } + + for i := range first.Addresses { + if first.Addresses[i].String() != second.Addresses[i].String() { + shouldPanic = true + break + } + } + + if !reflect.DeepEqual(first.ExtraOpaqueData, second.ExtraOpaqueData) { + shouldPanic = true + } + + if shouldPanic { + panic("original message and deserialized message are not equal") + } + + // Add this input to the corpus. + return 1 +} diff --git a/fuzz/lnwire/open_channel.go b/fuzz/lnwire/open_channel.go new file mode 100644 index 0000000000..ddd268cc28 --- /dev/null +++ b/fuzz/lnwire/open_channel.go @@ -0,0 +1,151 @@ +// +build gofuzz + +package lnwirefuzz + +import ( + "bytes" + + "github.com/lightningnetwork/lnd/lnwire" +) + +// Fuzz_open_channel is used by go-fuzz. +func Fuzz_open_channel(data []byte) int { + // Prefix with MsgOpenChannel. + data = prefixWithMsgType(data, lnwire.MsgOpenChannel) + + // Create an empty message so that the FuzzHarness func can check + // if the max payload constraint is violated. + emptyMsg := lnwire.OpenChannel{} + + // We have to do this here instead of in fuzz.Harness so that + // reflect.DeepEqual isn't called. Because of the UpfrontShutdownScript + // encoding, the first message and second message aren't deeply equal since + // the first has a nil slice and the other has an empty slice. + + // Create a reader with the byte array. + r := bytes.NewReader(data) + + // Make sure byte array length (excluding 2 bytes for message type) is + // less than max payload size for the wire message. We check this because + // otherwise `go-fuzz` will keep creating inputs that crash on ReadMessage + // due to a large message size. + payloadLen := uint32(len(data)) - 2 + if payloadLen > emptyMsg.MaxPayloadLength(0) { + // Ignore this input - max payload constraint violated. + return -1 + } + + msg, err := lnwire.ReadMessage(r, 0) + if err != nil { + // go-fuzz generated []byte that cannot be represented as a + // wire message but we will return 0 so go-fuzz can modify the + // input. + return 0 + } + + // We will serialize the message into a new bytes buffer. + var b bytes.Buffer + if _, err := lnwire.WriteMessage(&b, msg, 0); err != nil { + // Could not serialize message into bytes buffer, panic + panic(err) + } + + // Deserialize the message from the serialized bytes buffer, and then + // assert that the original message is equal to the newly deserialized + // message. + newMsg, err := lnwire.ReadMessage(&b, 0) + if err != nil { + // Could not deserialize message from bytes buffer, panic + panic(err) + } + + // Now compare every field instead of using reflect.DeepEqual. + // For UpfrontShutdownScript, we only compare bytes. This probably takes + // up more branches than necessary, but that's fine for now. + var shouldPanic bool + first := msg.(*lnwire.OpenChannel) + second := newMsg.(*lnwire.OpenChannel) + + if !first.ChainHash.IsEqual(&second.ChainHash) { + shouldPanic = true + } + + if !bytes.Equal(first.PendingChannelID[:], second.PendingChannelID[:]) { + shouldPanic = true + } + + if first.FundingAmount != second.FundingAmount { + shouldPanic = true + } + + if first.PushAmount != second.PushAmount { + shouldPanic = true + } + + if first.DustLimit != second.DustLimit { + shouldPanic = true + } + + if first.MaxValueInFlight != second.MaxValueInFlight { + shouldPanic = true + } + + if first.ChannelReserve != second.ChannelReserve { + shouldPanic = true + } + + if first.HtlcMinimum != second.HtlcMinimum { + shouldPanic = true + } + + if first.FeePerKiloWeight != second.FeePerKiloWeight { + shouldPanic = true + } + + if first.CsvDelay != second.CsvDelay { + shouldPanic = true + } + + if first.MaxAcceptedHTLCs != second.MaxAcceptedHTLCs { + shouldPanic = true + } + + if !first.FundingKey.IsEqual(second.FundingKey) { + shouldPanic = true + } + + if !first.RevocationPoint.IsEqual(second.RevocationPoint) { + shouldPanic = true + } + + if !first.PaymentPoint.IsEqual(second.PaymentPoint) { + shouldPanic = true + } + + if !first.DelayedPaymentPoint.IsEqual(second.DelayedPaymentPoint) { + shouldPanic = true + } + + if !first.HtlcPoint.IsEqual(second.HtlcPoint) { + shouldPanic = true + } + + if !first.FirstCommitmentPoint.IsEqual(second.FirstCommitmentPoint) { + shouldPanic = true + } + + if first.ChannelFlags != second.ChannelFlags { + shouldPanic = true + } + + if !bytes.Equal(first.UpfrontShutdownScript, second.UpfrontShutdownScript) { + shouldPanic = true + } + + if shouldPanic { + panic("original message and deserialized message are not equal") + } + + // Add this input to the corpus. + return 1 +} diff --git a/fuzz/lnwire/ping.go b/fuzz/lnwire/ping.go new file mode 100644 index 0000000000..87e893e060 --- /dev/null +++ b/fuzz/lnwire/ping.go @@ -0,0 +1,20 @@ +// +build gofuzz + +package lnwirefuzz + +import ( + "github.com/lightningnetwork/lnd/lnwire" +) + +// Fuzz_ping is used by go-fuzz. +func Fuzz_ping(data []byte) int { + // Prefix with MsgPing. + data = prefixWithMsgType(data, lnwire.MsgPing) + + // Create an empty message so that the FuzzHarness func can check + // if the max payload constraint is violated. + emptyMsg := lnwire.Ping{} + + // Pass the message into our general fuzz harness for wire messages! + return harness(data, &emptyMsg) +} diff --git a/fuzz/lnwire/pong.go b/fuzz/lnwire/pong.go new file mode 100644 index 0000000000..b51e315249 --- /dev/null +++ b/fuzz/lnwire/pong.go @@ -0,0 +1,20 @@ +// +build gofuzz + +package lnwirefuzz + +import ( + "github.com/lightningnetwork/lnd/lnwire" +) + +// Fuzz_pong is used by go-fuzz. +func Fuzz_pong(data []byte) int { + // Prefix with MsgPong. + data = prefixWithMsgType(data, lnwire.MsgPong) + + // Create an empty message so that the FuzzHarness func can check + // if the max payload constraint is violated. + emptyMsg := lnwire.Pong{} + + // Pass the message into our general fuzz harness for wire messages! + return harness(data, &emptyMsg) +} diff --git a/fuzz/lnwire/query_channel_range.go b/fuzz/lnwire/query_channel_range.go new file mode 100644 index 0000000000..38f5f4d7b6 --- /dev/null +++ b/fuzz/lnwire/query_channel_range.go @@ -0,0 +1,20 @@ +// +build gofuzz + +package lnwirefuzz + +import ( + "github.com/lightningnetwork/lnd/lnwire" +) + +// Fuzz_query_channel_range is used by go-fuzz. +func Fuzz_query_channel_range(data []byte) int { + // Prefix with MsgQueryChannelRange. + data = prefixWithMsgType(data, lnwire.MsgQueryChannelRange) + + // Create an empty message so that the FuzzHarness func can check + // if the max payload constraint is violated. + emptyMsg := lnwire.QueryChannelRange{} + + // Pass the message into our general fuzz harness for wire messages! + return harness(data, &emptyMsg) +} diff --git a/fuzz/lnwire/query_short_chan_ids.go b/fuzz/lnwire/query_short_chan_ids.go new file mode 100644 index 0000000000..b0adb87920 --- /dev/null +++ b/fuzz/lnwire/query_short_chan_ids.go @@ -0,0 +1,20 @@ +// +build gofuzz + +package lnwirefuzz + +import ( + "github.com/lightningnetwork/lnd/lnwire" +) + +// Fuzz_query_short_chan_ids is used by go-fuzz. +func Fuzz_query_short_chan_ids(data []byte) int { + // Prefix with MsgQueryShortChanIDs. + data = prefixWithMsgType(data, lnwire.MsgQueryShortChanIDs) + + // Create an empty message so that the FuzzHarness func can check + // if the max payload constraint is violated. + emptyMsg := lnwire.QueryShortChanIDs{} + + // Pass the message into our general fuzz harness for wire messages! + return harness(data, &emptyMsg) +} diff --git a/fuzz/lnwire/query_short_chan_ids_zlib.go b/fuzz/lnwire/query_short_chan_ids_zlib.go new file mode 100644 index 0000000000..7b2061d77d --- /dev/null +++ b/fuzz/lnwire/query_short_chan_ids_zlib.go @@ -0,0 +1,51 @@ +// +build gofuzz + +package lnwirefuzz + +import ( + "bytes" + "compress/zlib" + "encoding/binary" + + "github.com/lightningnetwork/lnd/lnwire" +) + +// Fuzz_query_short_chan_ids_zlib is used by go-fuzz. +func Fuzz_query_short_chan_ids_zlib(data []byte) int { + + var buf bytes.Buffer + zlibWriter := zlib.NewWriter(&buf) + _, err := zlibWriter.Write(data) + if err != nil { + // Zlib bug? + panic(err) + } + + if err := zlibWriter.Close(); err != nil { + // Zlib bug? + panic(err) + } + + compressedPayload := buf.Bytes() + + chainhash := []byte("00000000000000000000000000000000") + numBytesInBody := len(compressedPayload) + 1 + zlibByte := []byte("\x01") + + bodyBytes := make([]byte, 2) + binary.BigEndian.PutUint16(bodyBytes, uint16(numBytesInBody)) + + payload := append(chainhash, bodyBytes...) + payload = append(payload, zlibByte...) + payload = append(payload, compressedPayload...) + + // Prefix with MsgQueryShortChanIDs. + payload = prefixWithMsgType(payload, lnwire.MsgQueryShortChanIDs) + + // Create an empty message so that the FuzzHarness func can check + // if the max payload constraint is violated. + emptyMsg := lnwire.QueryShortChanIDs{} + + // Pass the message into our general fuzz harness for wire messages! + return harness(payload, &emptyMsg) +} diff --git a/fuzz/lnwire/reply_channel_range.go b/fuzz/lnwire/reply_channel_range.go new file mode 100644 index 0000000000..3bfdbf3623 --- /dev/null +++ b/fuzz/lnwire/reply_channel_range.go @@ -0,0 +1,20 @@ +// +build gofuzz + +package lnwirefuzz + +import ( + "github.com/lightningnetwork/lnd/lnwire" +) + +// Fuzz_reply_channel_range is used by go-fuzz. +func Fuzz_reply_channel_range(data []byte) int { + // Prefix with MsgReplyChannelRange. + data = prefixWithMsgType(data, lnwire.MsgReplyChannelRange) + + // Create an empty message so that the FuzzHarness func can check + // if the max payload constraint is violated. + emptyMsg := lnwire.ReplyChannelRange{} + + // Pass the message into our general fuzz harness for wire messages! + return harness(data, &emptyMsg) +} diff --git a/fuzz/lnwire/reply_channel_range_zlib.go b/fuzz/lnwire/reply_channel_range_zlib.go new file mode 100644 index 0000000000..944d1fa1af --- /dev/null +++ b/fuzz/lnwire/reply_channel_range_zlib.go @@ -0,0 +1,59 @@ +// +build gofuzz + +package lnwirefuzz + +import ( + "bytes" + "compress/zlib" + "encoding/binary" + + "github.com/lightningnetwork/lnd/lnwire" +) + +// Fuzz_reply_channel_range_zlib is used by go-fuzz. +func Fuzz_reply_channel_range_zlib(data []byte) int { + + var buf bytes.Buffer + zlibWriter := zlib.NewWriter(&buf) + _, err := zlibWriter.Write(data) + if err != nil { + // Zlib bug? + panic(err) + } + + if err := zlibWriter.Close(); err != nil { + // Zlib bug? + panic(err) + } + + compressedPayload := buf.Bytes() + + // Initialize some []byte vars which will prefix our payload + chainhash := []byte("00000000000000000000000000000000") + firstBlockHeight := []byte("\x00\x00\x00\x00") + numBlocks := []byte("\x00\x00\x00\x00") + completeByte := []byte("\x00") + + numBytesInBody := len(compressedPayload) + 1 + zlibByte := []byte("\x01") + + bodyBytes := make([]byte, 2) + binary.BigEndian.PutUint16(bodyBytes, uint16(numBytesInBody)) + + payload := append(chainhash, firstBlockHeight...) + payload = append(payload, numBlocks...) + payload = append(payload, completeByte...) + payload = append(payload, bodyBytes...) + payload = append(payload, zlibByte...) + payload = append(payload, compressedPayload...) + + // Prefix with MsgReplyChannelRange. + payload = prefixWithMsgType(payload, lnwire.MsgReplyChannelRange) + + // Create an empty message so that the FuzzHarness func can check + // if the max payload constraint is violated. + emptyMsg := lnwire.ReplyChannelRange{} + + // Pass the message into our general fuzz harness for wire messages! + return harness(payload, &emptyMsg) +} diff --git a/fuzz/lnwire/reply_short_chan_ids_end.go b/fuzz/lnwire/reply_short_chan_ids_end.go new file mode 100644 index 0000000000..c09282d9a5 --- /dev/null +++ b/fuzz/lnwire/reply_short_chan_ids_end.go @@ -0,0 +1,20 @@ +// +build gofuzz + +package lnwirefuzz + +import ( + "github.com/lightningnetwork/lnd/lnwire" +) + +// Fuzz_reply_short_chan_ids_end is used by go-fuzz. +func Fuzz_reply_short_chan_ids_end(data []byte) int { + // Prefix with MsgReplyShortChanIDsEnd. + data = prefixWithMsgType(data, lnwire.MsgReplyShortChanIDsEnd) + + // Create an empty message so that the FuzzHarness func can check + // if the max payload constraint is violated. + emptyMsg := lnwire.ReplyShortChanIDsEnd{} + + // Pass the message into our general fuzz harness for wire messages! + return harness(data, &emptyMsg) +} diff --git a/fuzz/lnwire/revoke_and_ack.go b/fuzz/lnwire/revoke_and_ack.go new file mode 100644 index 0000000000..23951c356e --- /dev/null +++ b/fuzz/lnwire/revoke_and_ack.go @@ -0,0 +1,20 @@ +// +build gofuzz + +package lnwirefuzz + +import ( + "github.com/lightningnetwork/lnd/lnwire" +) + +// Fuzz_revoke_and_ack is used by go-fuzz. +func Fuzz_revoke_and_ack(data []byte) int { + // Prefix with MsgRevokeAndAck. + data = prefixWithMsgType(data, lnwire.MsgRevokeAndAck) + + // Create an empty message so that the FuzzHarness func can check + // if the max payload constraint is violated. + emptyMsg := lnwire.RevokeAndAck{} + + // Pass the message into our general fuzz harness for wire messages! + return harness(data, &emptyMsg) +} diff --git a/fuzz/lnwire/shutdown.go b/fuzz/lnwire/shutdown.go new file mode 100644 index 0000000000..1ffd86606e --- /dev/null +++ b/fuzz/lnwire/shutdown.go @@ -0,0 +1,20 @@ +// +build gofuzz + +package lnwirefuzz + +import ( + "github.com/lightningnetwork/lnd/lnwire" +) + +// Fuzz_shutdown is used by go-fuzz. +func Fuzz_shutdown(data []byte) int { + // Prefix with MsgShutdown. + data = prefixWithMsgType(data, lnwire.MsgShutdown) + + // Create an empty message so that the FuzzHarness func can check + // if the max payload constraint is violated. + emptyMsg := lnwire.Shutdown{} + + // Pass the message into our general fuzz harness for wire messages! + return harness(data, &emptyMsg) +} diff --git a/fuzz/lnwire/update_add_htlc.go b/fuzz/lnwire/update_add_htlc.go new file mode 100644 index 0000000000..570de7418f --- /dev/null +++ b/fuzz/lnwire/update_add_htlc.go @@ -0,0 +1,20 @@ +// +build gofuzz + +package lnwirefuzz + +import ( + "github.com/lightningnetwork/lnd/lnwire" +) + +// Fuzz_update_add_htlc is used by go-fuzz. +func Fuzz_update_add_htlc(data []byte) int { + // Prefix with MsgUpdateAddHTLC. + data = prefixWithMsgType(data, lnwire.MsgUpdateAddHTLC) + + // Create an empty message so that the FuzzHarness func can check + // if the max payload constraint is violated. + emptyMsg := lnwire.UpdateAddHTLC{} + + // Pass the message into our general fuzz harness for wire messages! + return harness(data, &emptyMsg) +} diff --git a/fuzz/lnwire/update_fail_htlc.go b/fuzz/lnwire/update_fail_htlc.go new file mode 100644 index 0000000000..4ef9ab88d7 --- /dev/null +++ b/fuzz/lnwire/update_fail_htlc.go @@ -0,0 +1,20 @@ +// +build gofuzz + +package lnwirefuzz + +import ( + "github.com/lightningnetwork/lnd/lnwire" +) + +// Fuzz_update_fail_htlc is used by go-fuzz. +func Fuzz_update_fail_htlc(data []byte) int { + // Prefix with MsgUpdateFailHTLC. + data = prefixWithMsgType(data, lnwire.MsgUpdateFailHTLC) + + // Create an empty message so that the FuzzHarness func can check + // if the max payload constraint is violated. + emptyMsg := lnwire.UpdateFailHTLC{} + + // Pass the message into our general fuzz harness for wire messages! + return harness(data, &emptyMsg) +} diff --git a/fuzz/lnwire/update_fail_malformed_htlc.go b/fuzz/lnwire/update_fail_malformed_htlc.go new file mode 100644 index 0000000000..0a0d45a6f5 --- /dev/null +++ b/fuzz/lnwire/update_fail_malformed_htlc.go @@ -0,0 +1,20 @@ +// +build gofuzz + +package lnwirefuzz + +import ( + "github.com/lightningnetwork/lnd/lnwire" +) + +// Fuzz_update_fail_malformed_htlc is used by go-fuzz. +func Fuzz_update_fail_malformed_htlc(data []byte) int { + // Prefix with MsgUpdateFailMalformedHTLC. + data = prefixWithMsgType(data, lnwire.MsgUpdateFailMalformedHTLC) + + // Create an empty message so that the FuzzHarness func can check + // if the max payload constraint is violated. + emptyMsg := lnwire.UpdateFailMalformedHTLC{} + + // Pass the message into our general fuzz harness for wire messages! + return harness(data, &emptyMsg) +} diff --git a/fuzz/lnwire/update_fee.go b/fuzz/lnwire/update_fee.go new file mode 100644 index 0000000000..bb82c5c19f --- /dev/null +++ b/fuzz/lnwire/update_fee.go @@ -0,0 +1,20 @@ +// +build gofuzz + +package lnwirefuzz + +import ( + "github.com/lightningnetwork/lnd/lnwire" +) + +// Fuzz_update_fee is used by go-fuzz. +func Fuzz_update_fee(data []byte) int { + // Prefix with MsgUpdateFee. + data = prefixWithMsgType(data, lnwire.MsgUpdateFee) + + // Create an empty message so that the FuzzHarness func can check + // if the max payload constraint is violated. + emptyMsg := lnwire.UpdateFee{} + + // Pass the message into our general fuzz harness for wire messages! + return harness(data, &emptyMsg) +} diff --git a/fuzz/lnwire/update_fulfill_htlc.go b/fuzz/lnwire/update_fulfill_htlc.go new file mode 100644 index 0000000000..de28dfbe1a --- /dev/null +++ b/fuzz/lnwire/update_fulfill_htlc.go @@ -0,0 +1,20 @@ +// +build gofuzz + +package lnwirefuzz + +import ( + "github.com/lightningnetwork/lnd/lnwire" +) + +// Fuzz_update_fulfill_htlc is used by go-fuzz. +func Fuzz_update_fulfill_htlc(data []byte) int { + // Prefix with MsgUpdateFulfillHTLC. + data = prefixWithMsgType(data, lnwire.MsgUpdateFulfillHTLC) + + // Create an empty message so that the FuzzHarness func can check + // if the max payload constraint is violated. + emptyMsg := lnwire.UpdateFulfillHTLC{} + + // Pass the message into our general fuzz harness for wire messages! + return harness(data, &emptyMsg) +} diff --git a/fuzz/wtwire/create_session.go b/fuzz/wtwire/create_session.go new file mode 100644 index 0000000000..df521bf407 --- /dev/null +++ b/fuzz/wtwire/create_session.go @@ -0,0 +1,20 @@ +// +build gofuzz + +package wtwirefuzz + +import ( + "github.com/lightningnetwork/lnd/watchtower/wtwire" +) + +// Fuzz_create_session is used by go-fuzz. +func Fuzz_create_session(data []byte) int { + // Prefix with MsgCreateSession. + data = prefixWithMsgType(data, wtwire.MsgCreateSession) + + // Create an empty message so that the FuzzHarness func can check if the + // max payload constraint is violated. + emptyMsg := wtwire.CreateSession{} + + // Pass the message into our general fuzz harness for wire messages! + return harness(data, &emptyMsg) +} diff --git a/fuzz/wtwire/create_session_reply.go b/fuzz/wtwire/create_session_reply.go new file mode 100644 index 0000000000..a0e07f85f0 --- /dev/null +++ b/fuzz/wtwire/create_session_reply.go @@ -0,0 +1,20 @@ +// +build gofuzz + +package wtwirefuzz + +import ( + "github.com/lightningnetwork/lnd/watchtower/wtwire" +) + +// Fuzz_create_session_reply is used by go-fuzz. +func Fuzz_create_session_reply(data []byte) int { + // Prefix with MsgCreateSessionReply. + data = prefixWithMsgType(data, wtwire.MsgCreateSessionReply) + + // Create an empty message so that the FuzzHarness func can check if the + // max payload constraint is violated. + emptyMsg := wtwire.CreateSessionReply{} + + // Pass the message into our general fuzz harness for wire messages! + return harness(data, &emptyMsg) +} diff --git a/fuzz/wtwire/delete_session.go b/fuzz/wtwire/delete_session.go new file mode 100644 index 0000000000..2cb56225cf --- /dev/null +++ b/fuzz/wtwire/delete_session.go @@ -0,0 +1,20 @@ +// +build gofuzz + +package wtwirefuzz + +import ( + "github.com/lightningnetwork/lnd/watchtower/wtwire" +) + +// Fuzz_delete_session is used by go-fuzz. +func Fuzz_delete_session(data []byte) int { + // Prefix with MsgDeleteSession. + data = prefixWithMsgType(data, wtwire.MsgDeleteSession) + + // Create an empty message so that the FuzzHarness func can check if the + // max payload constraint is violated. + emptyMsg := wtwire.DeleteSession{} + + // Pass the message into our general fuzz harness for wire messages! + return harness(data, &emptyMsg) +} diff --git a/fuzz/wtwire/delete_session_reply.go b/fuzz/wtwire/delete_session_reply.go new file mode 100644 index 0000000000..0f0360f1a4 --- /dev/null +++ b/fuzz/wtwire/delete_session_reply.go @@ -0,0 +1,20 @@ +// +build gofuzz + +package wtwirefuzz + +import ( + "github.com/lightningnetwork/lnd/watchtower/wtwire" +) + +// Fuzz_delete_session_reply is used by go-fuzz. +func Fuzz_delete_session_reply(data []byte) int { + // Prefix with MsgDeleteSessionReply. + data = prefixWithMsgType(data, wtwire.MsgDeleteSessionReply) + + // Create an empty message so that the FuzzHarness func can check if the + // max payload constraint is violated. + emptyMsg := wtwire.DeleteSessionReply{} + + // Pass the message into our general fuzz harness for wire messages! + return harness(data, &emptyMsg) +} diff --git a/fuzz/wtwire/error.go b/fuzz/wtwire/error.go new file mode 100644 index 0000000000..f0f5fae231 --- /dev/null +++ b/fuzz/wtwire/error.go @@ -0,0 +1,20 @@ +// +build gofuzz + +package wtwirefuzz + +import ( + "github.com/lightningnetwork/lnd/watchtower/wtwire" +) + +// Fuzz_error is used by go-fuzz. +func Fuzz_error(data []byte) int { + // Prefix with MsgError. + data = prefixWithMsgType(data, wtwire.MsgError) + + // Create an empty message so that the FuzzHarness func can check if the + // max payload constraint is violated. + emptyMsg := wtwire.Error{} + + // Pass the message into our general fuzz harness for wire messages! + return harness(data, &emptyMsg) +} diff --git a/fuzz/wtwire/fuzz_utils.go b/fuzz/wtwire/fuzz_utils.go new file mode 100644 index 0000000000..8c8ee89b82 --- /dev/null +++ b/fuzz/wtwire/fuzz_utils.go @@ -0,0 +1,75 @@ +// +build gofuzz + +package wtwirefuzz + +import ( + "bytes" + "encoding/binary" + "fmt" + "reflect" + + "github.com/lightningnetwork/lnd/watchtower/wtwire" +) + +// prefixWithMsgType takes []byte and adds a wire protocol prefix +// to make the []byte into an actual message to be used in fuzzing. +func prefixWithMsgType(data []byte, prefix wtwire.MessageType) []byte { + var prefixBytes [2]byte + binary.BigEndian.PutUint16(prefixBytes[:], uint16(prefix)) + data = append(prefixBytes[:], data...) + return data +} + +// harness performs the actual fuzz testing of the appropriate wire message. +// This function will check that the passed-in message passes wire length checks, +// is a valid message once deserialized, and passes a sequence of serialization +// and deserialization checks. Returns an int that determines whether the input +// is unique or not. +func harness(data []byte, emptyMsg wtwire.Message) int { + // Create a reader with the byte array. + r := bytes.NewReader(data) + + // Make sure byte array length (excluding 2 bytes for message type) is + // less than max payload size for the wire message. We check this because + // otherwise `go-fuzz` will keep creating inputs that crash on ReadMessage + // due to a large message size. + payloadLen := uint32(len(data)) - 2 + if payloadLen > emptyMsg.MaxPayloadLength(0) { + // Ignore this input - max payload constraint violated. + return -1 + } + + msg, err := wtwire.ReadMessage(r, 0) + if err != nil { + // go-fuzz generated []byte that cannot be represented as a + // wire message but we will return 0 so go-fuzz can modify the + // input. + return 0 + } + + // We will serialize the message into a new bytes buffer. + var b bytes.Buffer + if _, err := wtwire.WriteMessage(&b, msg, 0); err != nil { + // Could not serialize message into bytes buffer, panic. + panic(err) + } + + // Deserialize the message from the serialized bytes buffer, and then + // assert that the original message is equal to the newly deserialized + // message. + newMsg, err := wtwire.ReadMessage(&b, 0) + if err != nil { + // Could not deserialize message from bytes buffer, panic. + panic(err) + } + + if !reflect.DeepEqual(msg, newMsg) { + // Deserialized message and original message are not + // deeply equal. + panic(fmt.Errorf("deserialized message and original message " + + "are not deeply equal.")) + } + + // Add this input to the corpus. + return 1 +} diff --git a/fuzz/wtwire/init.go b/fuzz/wtwire/init.go new file mode 100644 index 0000000000..31112b8f25 --- /dev/null +++ b/fuzz/wtwire/init.go @@ -0,0 +1,20 @@ +// +build gofuzz + +package wtwirefuzz + +import ( + "github.com/lightningnetwork/lnd/watchtower/wtwire" +) + +// Fuzz_init is used by go-fuzz. +func Fuzz_init(data []byte) int { + // Prefix with MsgInit. + data = prefixWithMsgType(data, wtwire.MsgInit) + + // Create an empty message so that the FuzzHarness func can check if the + // max payload constraint is violated. + emptyMsg := wtwire.Init{} + + // Pass the message into our general fuzz harness for wire messages! + return harness(data, &emptyMsg) +} diff --git a/fuzz/wtwire/state_update.go b/fuzz/wtwire/state_update.go new file mode 100644 index 0000000000..5d13568ef5 --- /dev/null +++ b/fuzz/wtwire/state_update.go @@ -0,0 +1,20 @@ +// +build gofuzz + +package wtwirefuzz + +import ( + "github.com/lightningnetwork/lnd/watchtower/wtwire" +) + +// Fuzz_state_update is used by go-fuzz. +func Fuzz_state_update(data []byte) int { + // Prefix with MsgStateUpdate. + data = prefixWithMsgType(data, wtwire.MsgStateUpdate) + + // Create an empty message so that the FuzzHarness func can check if the + // max payload constraint is violated. + emptyMsg := wtwire.StateUpdate{} + + // Pass the message into our general fuzz harness for wire messages! + return harness(data, &emptyMsg) +} diff --git a/fuzz/wtwire/state_update_reply.go b/fuzz/wtwire/state_update_reply.go new file mode 100644 index 0000000000..fb7a3bbb85 --- /dev/null +++ b/fuzz/wtwire/state_update_reply.go @@ -0,0 +1,20 @@ +// +build gofuzz + +package wtwirefuzz + +import ( + "github.com/lightningnetwork/lnd/watchtower/wtwire" +) + +// Fuzz_state_update_reply is used by go-fuzz. +func Fuzz_state_update_reply(data []byte) int { + // Prefix with MsgStateUpdateReply. + data = prefixWithMsgType(data, wtwire.MsgStateUpdateReply) + + // Create an empty message so that the FuzzHarness func can check if the + // max payload constraint is violated. + emptyMsg := wtwire.StateUpdateReply{} + + // Pass the message into our general fuzz harness for wire messages! + return harness(data, &emptyMsg) +} diff --git a/go.mod b/go.mod index 56e988147b..9e78322e5f 100644 --- a/go.mod +++ b/go.mod @@ -5,24 +5,26 @@ require ( github.com/NebulousLabs/fastrand v0.0.0-20181203155948-6fb6489aac4e // indirect github.com/NebulousLabs/go-upnp v0.0.0-20180202185039-29b680b06c82 github.com/Yawning/aez v0.0.0-20180114000226-4dad034d9db2 - github.com/btcsuite/btcd v0.20.0-beta + github.com/btcsuite/btcd v0.20.1-beta github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f - github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d - github.com/btcsuite/btcwallet v0.10.0 + github.com/btcsuite/btcutil v1.0.2 + github.com/btcsuite/btcutil/psbt v1.0.2 + github.com/btcsuite/btcwallet v0.11.1-0.20200403222202-ada7ca077ebb github.com/btcsuite/btcwallet/wallet/txauthor v1.0.0 github.com/btcsuite/btcwallet/wallet/txrules v1.0.0 - github.com/btcsuite/btcwallet/walletdb v1.1.0 + github.com/btcsuite/btcwallet/walletdb v1.3.1 github.com/btcsuite/btcwallet/wtxmgr v1.0.0 github.com/btcsuite/fastsha256 v0.0.0-20160815193821-637e65642941 - github.com/coreos/bbolt v1.3.3 github.com/davecgh/go-spew v1.1.1 github.com/go-errors/errors v1.0.1 + github.com/go-openapi/strfmt v0.19.5 // indirect github.com/golang/protobuf v1.3.1 github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 - github.com/grpc-ecosystem/grpc-gateway v0.0.0-20170724004829-f2862b476edc + github.com/grpc-ecosystem/grpc-gateway v1.8.6 github.com/jackpal/gateway v1.0.5 github.com/jackpal/go-nat-pmp v0.0.0-20170405195558-28a68d0c24ad + github.com/jedib0t/go-pretty v4.3.0+incompatible github.com/jessevdk/go-flags v1.4.0 github.com/jrick/logrotate v1.0.0 github.com/juju/clock v0.0.0-20190205081909-9c5c9712527c // indirect @@ -33,21 +35,25 @@ require ( github.com/juju/utils v0.0.0-20180820210520-bf9cc5bdd62d // indirect github.com/juju/version v0.0.0-20180108022336-b64dbd566305 // indirect github.com/kkdai/bstream v0.0.0-20181106074824-b3251f7901ec - github.com/lightninglabs/neutrino v0.10.0 - github.com/lightningnetwork/lightning-onion v0.0.0-20190909101754-850081b08b6a - github.com/lightningnetwork/lnd/queue v1.0.1 + github.com/lightninglabs/neutrino v0.11.1-0.20200316235139-bffc52e8f200 + github.com/lightninglabs/protobuf-hex-display v1.3.3-0.20191212020323-b444784ce75d + github.com/lightningnetwork/lightning-onion v1.0.1 + github.com/lightningnetwork/lnd/cert v1.0.2 + github.com/lightningnetwork/lnd/queue v1.0.3 github.com/lightningnetwork/lnd/ticker v1.0.0 github.com/ltcsuite/ltcd v0.0.0-20190101042124-f37f8bf35796 + github.com/mattn/go-runewidth v0.0.9 // indirect github.com/miekg/dns v0.0.0-20171125082028-79bfde677fa8 github.com/prometheus/client_golang v0.9.3 github.com/rogpeppe/fastuuid v1.2.0 // indirect github.com/tv42/zbase32 v0.0.0-20160707012821-501572607d02 github.com/urfave/cli v1.18.0 - golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67 + golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d golang.org/x/net v0.0.0-20190206173232-65e2d4e15006 + golang.org/x/sys v0.0.0-20200116001909-b77594299b42 // indirect golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 google.golang.org/genproto v0.0.0-20190201180003-4b09977fb922 - google.golang.org/grpc v1.18.0 + google.golang.org/grpc v1.19.0 gopkg.in/errgo.v1 v1.0.1 // indirect gopkg.in/macaroon-bakery.v2 v2.0.1 gopkg.in/macaroon.v2 v2.0.0 @@ -58,17 +64,24 @@ replace github.com/lightningnetwork/lnd/ticker => ./ticker replace github.com/lightningnetwork/lnd/queue => ./queue +replace github.com/lightningnetwork/lnd/cert => ./cert + replace git.schwanenlied.me/yawning/bsaes.git => github.com/Yawning/bsaes v0.0.0-20180720073208-c0276d75487e +// Pin this version that we know works explicitly, even though the +// btcsuite/btcutil package requests a newer version. +replace golang.org/x/crypto => golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67 + go 1.12 replace ( - github.com/btcsuite/btcd => github.com/Groestlcoin/grsd v0.20.0-grs - github.com/btcsuite/btcutil => github.com/Groestlcoin/grsutil v0.5.0-grsd-0-8 - github.com/btcsuite/btcwallet => github.com/Groestlcoin/grswallet v0.10.0-grs + github.com/btcsuite/btcd => github.com/Groestlcoin/grsd v0.20.1-grs + github.com/btcsuite/btcutil => github.com/Groestlcoin/grsutil v1.0.2-grs + github.com/btcsuite/btcutil/psbt => github.com/Groestlcoin/grsutil/psbt v1.0.2-grs + github.com/btcsuite/btcwallet => github.com/Groestlcoin/grswallet v0.11.1-pre-grs github.com/btcsuite/btcwallet/wallet/txauthor => github.com/Groestlcoin/grswallet/wallet/txauthor v1.0.0-grs github.com/btcsuite/btcwallet/wallet/txrules => github.com/Groestlcoin/grswallet/wallet/txrules v1.0.0-grs - github.com/btcsuite/btcwallet/walletdb => github.com/Groestlcoin/grswallet/walletdb v1.1.0-grs + github.com/btcsuite/btcwallet/walletdb => github.com/Groestlcoin/grswallet/walletdb v1.3.1-grs github.com/btcsuite/btcwallet/wtxmgr => github.com/Groestlcoin/grswallet/wtxmgr v1.0.0-grs - github.com/lightninglabs/neutrino => github.com/Groestlcoin/neutrino v0.10.0-grs + github.com/lightninglabs/neutrino => github.com/Groestlcoin/neutrino v0.11.1-pre-grs ) diff --git a/go.sum b/go.sum index 7f405519fa..6854ec316a 100644 --- a/go.sum +++ b/go.sum @@ -1,22 +1,25 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/Groestlcoin/go-groestl-hash v0.0.0-20181012171753-790653ac190c h1:8bYNmjELeCj7DEh/dN7zFzkJ0upK3GkbOC/0u1HMQ5s= github.com/Groestlcoin/go-groestl-hash v0.0.0-20181012171753-790653ac190c/go.mod h1:DwgC62sAn4RgH4L+O8REgcE7f0XplHPNeRYFy+ffy1M= -github.com/Groestlcoin/grsd v0.20.0-grs h1:62SH80xlNfbI8JVdoXAkNotjUi84TDb7g0WfOhufnV8= -github.com/Groestlcoin/grsd v0.20.0-grs/go.mod h1:c381FUxkRl24Sh2eyxzDF9t/T+dRSXIc7znyVN/sqjA= -github.com/Groestlcoin/grsutil v0.5.0-grsd-0-8 h1:lsMvgN0zaQV3K/k+nMnBfITaOOST0THirPiYzDDaTnE= -github.com/Groestlcoin/grsutil v0.5.0-grsd-0-8/go.mod h1:aQToonSeP4iLeZgNeLPI+xxNs3FadOdksM/FsWo8/Tg= -github.com/Groestlcoin/grswallet v0.10.0-grs h1:JBSjZwAcaWCPfGCvLLSZlzVcTENkprBGcnbzBq8VUM0= -github.com/Groestlcoin/grswallet v0.10.0-grs/go.mod h1:d7E6AYiktRO/kllx8cNpfKdGahxAfUDYobCO+7AxlYg= +github.com/Groestlcoin/grsd v0.20.1-grs h1:5H+I5pL+wu8hIN/9p90LEdVyrSOyI66p3Le9ZBBleA8= +github.com/Groestlcoin/grsd v0.20.1-grs/go.mod h1:c381FUxkRl24Sh2eyxzDF9t/T+dRSXIc7znyVN/sqjA= +github.com/Groestlcoin/grsutil v1.0.2-grs h1:JMoUoOZa9NM0D7HhgGTsr1aMs9is7K0UszgPXS7qP64= +github.com/Groestlcoin/grsutil v1.0.2-grs/go.mod h1:vdqvEcrjhki+/sHoqS82ZWBnKwnDB99XJb5XbT9OKcE= +github.com/Groestlcoin/grsutil/psbt v1.0.2-grs h1:tG4wIt7S4Q2eyrcARd3rOL5pRGsVV+DHm15HqWODgy0= +github.com/Groestlcoin/grsutil/psbt v1.0.2-grs/go.mod h1:j6GeRs7be6EA7HsQFHOvaYj0Vngj1Dp9seG4/jwWd3Q= +github.com/Groestlcoin/grswallet v0.11.1-pre-grs h1:NbEPQHI/Z8qAQOQ4Uc0fyPetJYtIo3X6nZ5M/+Ik2oY= +github.com/Groestlcoin/grswallet v0.11.1-pre-grs/go.mod h1:faMs/G3108mwN1IRvGJoobZ2rHhnnNW5Ks1up/LrQLw= github.com/Groestlcoin/grswallet/wallet/txauthor v1.0.0-grs h1:50aYMve1/irj5TGzj9LWOg4wx9cgzVys33omMIuzJLM= github.com/Groestlcoin/grswallet/wallet/txauthor v1.0.0-grs/go.mod h1:VufDts7bd/zs3GV13f/lXc/0lXrPnvxD/NvmpG/FEKU= github.com/Groestlcoin/grswallet/wallet/txrules v1.0.0-grs h1:hoo6dthWgwcxNHw1rxKlfRMgOR7qy2bd+YU6aCYFKwU= github.com/Groestlcoin/grswallet/wallet/txrules v1.0.0-grs/go.mod h1:UwQE78yCerZ313EXZwEiu3jNAtfXj2n2+c8RWiE/WNA= -github.com/Groestlcoin/grswallet/walletdb v1.1.0-grs h1:ctnvryAN0YEV83XceFOB2br+l1DCz2o/gToE0gVDNXk= -github.com/Groestlcoin/grswallet/walletdb v1.1.0-grs/go.mod h1:bZTy9RyYZh9fLnSua+/CD48TJtYJSHjjYcSaszuxCCk= +github.com/Groestlcoin/grswallet/walletdb v1.3.1-grs h1:fICpXhiqBGgoFS/pfp0bTKJ/7/JvjRnbGa9yqAk/Tr8= +github.com/Groestlcoin/grswallet/walletdb v1.3.1-grs/go.mod h1:9cwc1Yyg4uvd4ZdfdoMnALji+V9gfWSMfxEdLdR5Vwc= github.com/Groestlcoin/grswallet/wtxmgr v1.0.0-grs h1:cwOaJgZoZkgd2lC2RnCxzkmghq99bgZk1YfZA/pHqms= github.com/Groestlcoin/grswallet/wtxmgr v1.0.0-grs/go.mod h1:vc4gBprll6BP0UJ+AIGDaySoc7MdAmZf8kelfNb8CFY= -github.com/Groestlcoin/neutrino v0.10.0-grs h1:JN48n+HiNVbnvcrHIenFBvilEAuk9s26NewXB8LokO0= -github.com/Groestlcoin/neutrino v0.10.0-grs/go.mod h1:z6GAyip6cldMeycslPgnpZD+trKOnCSHEVCIXbgkhLo= +github.com/Groestlcoin/neutrino v0.11.1-pre-grs h1:hqQddrLyIJgMT2HyeR3OjnOZRb9n2m3W7zuTnrOoXHA= +github.com/Groestlcoin/neutrino v0.11.1-pre-grs/go.mod h1:J8dVN8Kk8ROxA6vog2+VreMt3dJs5SEDIQuw4N9PXqA= github.com/NebulousLabs/fastrand v0.0.0-20181203155948-6fb6489aac4e h1:n+DcnTNkQnHlwpsrHoQtkrJIO7CBx029fw6oR4vIob4= github.com/NebulousLabs/fastrand v0.0.0-20181203155948-6fb6489aac4e/go.mod h1:Bdzq+51GR4/0DIhaICZEOm+OHvXGwwB2trKZ8B4Y6eQ= github.com/NebulousLabs/go-upnp v0.0.0-20180202185039-29b680b06c82 h1:MG93+PZYs9PyEsj/n5/haQu2gK0h4tUtSy9ejtMwWa0= @@ -32,31 +35,36 @@ github.com/aead/siphash v1.0.1 h1:FwHfE/T45KPKYuuSAKyyvE+oPWcaQ+CUmFW0bPlM+kg= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/btcsuite/btcd v0.0.0-20190629003639-c26ffa870fd8 h1:mOg8/RgDSHTQ1R0IR+LMDuW4TDShPv+JzYHuR4GLoNA= github.com/btcsuite/btcd v0.0.0-20190629003639-c26ffa870fd8/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= -github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3 h1:A/EVblehb75cUgXA5njHPn0kLAsykn6mJGz7rnmW5W0= github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= -github.com/btcsuite/btcd v0.20.0-beta h1:DnZGUjFbRkpytojHWwy6nfUSA7vFrzWXDLpFNzt74ZA= -github.com/btcsuite/btcd v0.20.0-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= +github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= +github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d h1:yJzD/yFppdVCf6ApMkVy8cUxV0XrxdP9rVf6D87/Mng= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= -github.com/btcsuite/btcwallet v0.10.0 h1:fFZncfYJ7VByePTGttzJc3qfCyDzU95ucZYk0M912lU= -github.com/btcsuite/btcwallet v0.10.0/go.mod h1:4TqBEuceheGNdeLNrelliLHJzmXauMM2vtWfuy1pFiM= +github.com/btcsuite/btcutil v1.0.2 h1:9iZ1Terx9fMIOtq1VrwdqfsATL9MC2l8ZrUY6YZ2uts= +github.com/btcsuite/btcutil v1.0.2/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts= +github.com/btcsuite/btcutil/psbt v1.0.2 h1:gCVY3KxdoEVU7Q6TjusPO+GANIwVgr9yTLqM+a6CZr8= +github.com/btcsuite/btcutil/psbt v1.0.2/go.mod h1:LVveMu4VaNSkIRTZu2+ut0HDBRuYjqGocxDMNS1KuGQ= +github.com/btcsuite/btcwallet v0.11.1-0.20200403222202-ada7ca077ebb h1:kkq2SSCy+OrC7GVZLIqutoHVR2yW4SJQdX70jtmuLDI= +github.com/btcsuite/btcwallet v0.11.1-0.20200403222202-ada7ca077ebb/go.mod h1:9fJNm1aXi4q9P5Nk23mmqppCy1Le3f2/JMWj9UXKkCc= github.com/btcsuite/btcwallet/wallet/txauthor v1.0.0 h1:KGHMW5sd7yDdDMkCZ/JpP0KltolFsQcB973brBnfj4c= github.com/btcsuite/btcwallet/wallet/txauthor v1.0.0/go.mod h1:VufDts7bd/zs3GV13f/lXc/0lXrPnvxD/NvmpG/FEKU= github.com/btcsuite/btcwallet/wallet/txrules v1.0.0 h1:2VsfS0sBedcM5KmDzRMT3+b6xobqWveZGvjb+jFez5w= github.com/btcsuite/btcwallet/wallet/txrules v1.0.0/go.mod h1:UwQE78yCerZ313EXZwEiu3jNAtfXj2n2+c8RWiE/WNA= github.com/btcsuite/btcwallet/wallet/txsizes v1.0.0 h1:6DxkcoMnCPY4E9cUDPB5tbuuf40SmmMkSQkoE8vCT+s= github.com/btcsuite/btcwallet/wallet/txsizes v1.0.0/go.mod h1:pauEU8UuMFiThe5PB3EO+gO5kx87Me5NvdQDsTuq6cs= -github.com/btcsuite/btcwallet/walletdb v1.0.0 h1:mheT7vCWK5EP6rZzhxsQ7ms9+yX4VE8bwiJctECBeNw= github.com/btcsuite/btcwallet/walletdb v1.0.0/go.mod h1:bZTy9RyYZh9fLnSua+/CD48TJtYJSHjjYcSaszuxCCk= -github.com/btcsuite/btcwallet/walletdb v1.1.0 h1:JHAL7wZ8pX4SULabeAv/wPO9sseRWMGzE80lfVmRw6Y= -github.com/btcsuite/btcwallet/walletdb v1.1.0/go.mod h1:bZTy9RyYZh9fLnSua+/CD48TJtYJSHjjYcSaszuxCCk= +github.com/btcsuite/btcwallet/walletdb v1.2.0 h1:E0+M4jHOToAvGWZ27ew5AaDAHDi6fUiXkjUJUnoEOD0= +github.com/btcsuite/btcwallet/walletdb v1.2.0/go.mod h1:9cwc1Yyg4uvd4ZdfdoMnALji+V9gfWSMfxEdLdR5Vwc= +github.com/btcsuite/btcwallet/walletdb v1.3.1 h1:lW1Ac3F1jJY4K11P+YQtRNcP5jFk27ASfrV7C6mvRU0= +github.com/btcsuite/btcwallet/walletdb v1.3.1/go.mod h1:9cwc1Yyg4uvd4ZdfdoMnALji+V9gfWSMfxEdLdR5Vwc= github.com/btcsuite/btcwallet/wtxmgr v1.0.0 h1:aIHgViEmZmZfe0tQQqF1xyd2qBqFWxX5vZXkkbjtbeA= github.com/btcsuite/btcwallet/wtxmgr v1.0.0/go.mod h1:vc4gBprll6BP0UJ+AIGDaySoc7MdAmZf8kelfNb8CFY= github.com/btcsuite/fastsha256 v0.0.0-20160815193821-637e65642941 h1:kij1x2aL7VE6gtx8KMIt8PGPgI5GV9LgtHFG5KaEMPY= @@ -80,6 +88,7 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/coreos/bbolt v1.3.3 h1:n6AiVyVRKQFNb6mJlwESEvvLoDyiTzXX7ORAUlkeBdY= github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= @@ -87,11 +96,17 @@ github.com/frankban/quicktest v1.2.2 h1:xfmOhhoH5fGPgbEAlhLpJH9p0z/0Qizio9osmvn9 github.com/frankban/quicktest v1.2.2/go.mod h1:Qh/WofXFeiAFII1aEBu529AtJo6Zg2VHscnEsbBnJ20= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-openapi/errors v0.19.2 h1:a2kIyV3w+OS3S97zxUndRVD46+FhGOUBDFY7nmu4CsY= +github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/strfmt v0.19.5 h1:0utjKrw+BAh8s57XE9Xz8DUBsVvPmRUB6styvl9wWIM= +github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= @@ -103,18 +118,24 @@ github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/go-cmp v0.2.1-0.20190312032427-6f77996f0c42 h1:q3pnF5JFBNRz8sRD+IRj7Y6DMyYGTNqnZ9axTbSfoNI= github.com/google/go-cmp v0.2.1-0.20190312032427-6f77996f0c42/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v0.0.0-20170724004829-f2862b476edc h1:3NXdOHZ1YlN6SGP3FPbn4k73O2MeEp065abehRwGFxI= -github.com/grpc-ecosystem/grpc-gateway v0.0.0-20170724004829-f2862b476edc/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/grpc-ecosystem/grpc-gateway v1.8.6 h1:XvND7+MPP7Jp+JpqSZ7naSl5nVZf6k0LbL1V3EKh0zc= +github.com/grpc-ecosystem/grpc-gateway v1.8.6/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/jackpal/gateway v1.0.5 h1:qzXWUJfuMdlLMtt0a3Dgt+xkWQiA5itDEITVJtuSwMc= github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= github.com/jackpal/go-nat-pmp v0.0.0-20170405195558-28a68d0c24ad h1:heFfj7z0pGsNCekUlsFhO2jstxO4b5iQ665LjwM5mDc= github.com/jackpal/go-nat-pmp v0.0.0-20170405195558-28a68d0c24ad/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jedib0t/go-pretty v4.3.0+incompatible h1:CGs8AVhEKg/n9YbUenWmNStRW2PHJzaeDodcfvRAbIo= +github.com/jedib0t/go-pretty v4.3.0+incompatible/go.mod h1:XemHduiw8R651AF9Pt4FwCTKeG3oo7hrHJAoznj9nag= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -146,19 +167,26 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/lightninglabs/gozmq v0.0.0-20190710231225-cea2a031735d h1:tt8hwvxl6fksSfchjBGaWu+pnWJQfG1OWiCM20qOSAE= -github.com/lightninglabs/gozmq v0.0.0-20190710231225-cea2a031735d/go.mod h1:vxmQPeIQxPf6Jf9rM8R+B4rKBqLA2AjttNxkFBL2Plk= -github.com/lightninglabs/neutrino v0.10.0 h1:yWVy2cOCCXbKFdpYCE9vD1fWRJDd9FtGXhUws4l9RkU= -github.com/lightninglabs/neutrino v0.10.0/go.mod h1:C3KhCMk1Mcx3j8v0qRVWM1Ow6rIJSvSPnUAq00ZNAfk= -github.com/lightningnetwork/lightning-onion v0.0.0-20190909101754-850081b08b6a h1:GoWPN4i4jTKRxhVNh9a2vvBBO1Y2seiJB+SopUYoKyo= -github.com/lightningnetwork/lightning-onion v0.0.0-20190909101754-850081b08b6a/go.mod h1:rigfi6Af/KqsF7Za0hOgcyq2PNH4AN70AaMRxcJkff4= +github.com/lightninglabs/gozmq v0.0.0-20191113021534-d20a764486bf h1:HZKvJUHlcXI/f/O0Avg7t8sqkPo78HFzjmeYFl6DPnc= +github.com/lightninglabs/gozmq v0.0.0-20191113021534-d20a764486bf/go.mod h1:vxmQPeIQxPf6Jf9rM8R+B4rKBqLA2AjttNxkFBL2Plk= +github.com/lightninglabs/neutrino v0.11.0/go.mod h1:CuhF0iuzg9Sp2HO6ZgXgayviFTn1QHdSTJlMncK80wg= +github.com/lightninglabs/neutrino v0.11.1-0.20200316235139-bffc52e8f200 h1:j4iZ1XlUAPQmW6oSzMcJGILYsRHNs+4O3Gk+2Ms5Dww= +github.com/lightninglabs/neutrino v0.11.1-0.20200316235139-bffc52e8f200/go.mod h1:MlZmoKa7CJP3eR1s5yB7Rm5aSyadpKkxqAwLQmog7N0= +github.com/lightninglabs/protobuf-hex-display v1.3.3-0.20191212020323-b444784ce75d h1:QWD/5MPnaZfUVP7P8wLa4M8Td2DI7XXHXt2vhVtUgGI= +github.com/lightninglabs/protobuf-hex-display v1.3.3-0.20191212020323-b444784ce75d/go.mod h1:KDb67YMzoh4eudnzClmvs2FbiLG9vxISmLApUkCa4uI= +github.com/lightningnetwork/lightning-onion v1.0.1 h1:qChGgS5+aPxFeR6JiUsGvanei1bn6WJpYbvosw/1604= +github.com/lightningnetwork/lightning-onion v1.0.1/go.mod h1:rigfi6Af/KqsF7Za0hOgcyq2PNH4AN70AaMRxcJkff4= github.com/ltcsuite/ltcd v0.0.0-20190101042124-f37f8bf35796 h1:sjOGyegMIhvgfq5oaue6Td+hxZuf3tDC8lAPrFldqFw= github.com/ltcsuite/ltcd v0.0.0-20190101042124-f37f8bf35796/go.mod h1:3p7ZTf9V1sNPI5H8P3NkTFF4LuwMdPl2DodF60qAKqY= github.com/ltcsuite/ltcutil v0.0.0-20181217130922-17f3b04680b6/go.mod h1:8Vg/LTOO0KYa/vlHWJ6XZAevPQThGH5sufO0Hrou/lA= +github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/miekg/dns v0.0.0-20171125082028-79bfde677fa8 h1:PRMAcldsl4mXKJeRNB/KVNz6TlbS6hk2Rs42PqgU3Ws= github.com/miekg/dns v0.0.0-20171125082028-79bfde677fa8/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -183,21 +211,28 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084 h1:sofwID9zm4tzrgykg80hfFph1mryUeLRsUfoocVVmRY= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0 h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tv42/zbase32 v0.0.0-20160707012821-501572607d02 h1:tcJ6OjwOMvExLlzrAVZute09ocAGa7KqOON60++Gz4E= github.com/tv42/zbase32 v0.0.0-20160707012821-501572607d02/go.mod h1:tHlrkM198S068ZqfrO6S8HsoJq2bF3ETfTL+kt4tInY= github.com/urfave/cli v1.18.0 h1:m9MfmZWX7bwr9kUcs/Asr95j0IVXzGNNc+/5ku2m26Q= github.com/urfave/cli v1.18.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +go.mongodb.org/mongo-driver v1.0.3 h1:GKoji1ld3tw2aC+GX1wbr/J2fX13yNacEYoJ8Nhr0yU= +go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67 h1:ng3VDlRp5/DHpSWl02R4rM9I+8M2rhmsuLwAMmkLQWE= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -208,12 +243,14 @@ golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006 h1:bfLnR+k0tq5Lqt6dflRLcZiz6UaXCMt3vhYJ1l4FQ80= golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -223,12 +260,15 @@ golang.org/x/sys v0.0.0-20190209173611-3b5209105503 h1:5SvYFrOM3W8Mexn9/oA44Ji7v golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd h1:DBH9mDw0zluJT/R+nGuV3jWFWLFaHyYZWD4tOT+cjn0= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42 h1:vEOn+mP2zCOVzKckCZy6YsCtDblrpj/w7B9nxGNELpg= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2 h1:z99zHgr7hKfrUcX/KsoJk5FJfjTceCKIp96+biqP4To= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 h1:+DCIGbF/swA92ohVg0//6X2IVY3KZs6p9mix0ziNYJM= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190201180003-4b09977fb922 h1:mBVYJnbrXLA/ZCBTCe7PtEgAUP+1bg92qTaFoPHdz+8= @@ -236,6 +276,8 @@ google.golang.org/genproto v0.0.0-20190201180003-4b09977fb922/go.mod h1:L3J43x8/ google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.18.0 h1:IZl7mfBGfbhYx2p2rKRtYgDFw6SBz+kclmxYrCksPPA= google.golang.org/grpc v1.18.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= @@ -250,9 +292,12 @@ gopkg.in/macaroon.v2 v2.0.0 h1:LVWycAfeJBUjCIqfR9gqlo7I8vmiXRr51YEOZ1suop8= gopkg.in/macaroon.v2 v2.0.0/go.mod h1:+I6LnTMkm/uV5ew/0nsulNjL16SK4+C8yDmRUzHR17I= gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 h1:VpOs+IwYnYBaFnrNAeB8UUWtL3vEUnzSCL1nVjPhqrw= gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/htlcswitch/circuit_map.go b/htlcswitch/circuit_map.go index fa91bfcd79..7711f2474c 100644 --- a/htlcswitch/circuit_map.go +++ b/htlcswitch/circuit_map.go @@ -5,10 +5,10 @@ import ( "fmt" "sync" - "github.com/coreos/bbolt" "github.com/davecgh/go-spew/spew" "github.com/go-errors/errors" "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/htlcswitch/hop" "github.com/lightningnetwork/lnd/lnwire" ) @@ -213,13 +213,13 @@ func NewCircuitMap(cfg *CircuitMapConfig) (CircuitMap, error) { // initBuckets ensures that the primary buckets used by the circuit are // initialized so that we can assume their existence after startup. func (cm *circuitMap) initBuckets() error { - return cm.cfg.DB.Update(func(tx *bbolt.Tx) error { - _, err := tx.CreateBucketIfNotExists(circuitKeystoneKey) + return kvdb.Update(cm.cfg.DB, func(tx kvdb.RwTx) error { + _, err := tx.CreateTopLevelBucket(circuitKeystoneKey) if err != nil { return err } - _, err = tx.CreateBucketIfNotExists(circuitAddKey) + _, err = tx.CreateTopLevelBucket(circuitAddKey) return err }) } @@ -238,10 +238,10 @@ func (cm *circuitMap) restoreMemState() error { pending = make(map[CircuitKey]*PaymentCircuit) ) - if err := cm.cfg.DB.Update(func(tx *bbolt.Tx) error { + if err := kvdb.Update(cm.cfg.DB, func(tx kvdb.RwTx) error { // Restore any of the circuits persisted in the circuit bucket // back into memory. - circuitBkt := tx.Bucket(circuitAddKey) + circuitBkt := tx.ReadWriteBucket(circuitAddKey) if circuitBkt == nil { return ErrCorruptedCircuitMap } @@ -262,7 +262,7 @@ func (cm *circuitMap) restoreMemState() error { // Furthermore, load the keystone bucket and resurrect the // keystones used in any open circuits. - keystoneBkt := tx.Bucket(circuitKeystoneKey) + keystoneBkt := tx.ReadWriteBucket(circuitKeystoneKey) if keystoneBkt == nil { return ErrCorruptedCircuitMap } @@ -463,8 +463,8 @@ func (cm *circuitMap) TrimOpenCircuits(chanID lnwire.ShortChannelID, return nil } - return cm.cfg.DB.Update(func(tx *bbolt.Tx) error { - keystoneBkt := tx.Bucket(circuitKeystoneKey) + return kvdb.Update(cm.cfg.DB, func(tx kvdb.RwTx) error { + keystoneBkt := tx.ReadWriteBucket(circuitKeystoneKey) if keystoneBkt == nil { return ErrCorruptedCircuitMap } @@ -616,8 +616,8 @@ func (cm *circuitMap) CommitCircuits(circuits ...*PaymentCircuit) ( // Write the entire batch of circuits to the persistent circuit bucket // using bolt's Batch write. This method must be called from multiple, // distinct goroutines to have any impact on performance. - err := cm.cfg.DB.Batch(func(tx *bbolt.Tx) error { - circuitBkt := tx.Bucket(circuitAddKey) + err := kvdb.Batch(cm.cfg.DB.Backend, func(tx kvdb.RwTx) error { + circuitBkt := tx.ReadWriteBucket(circuitAddKey) if circuitBkt == nil { return ErrCorruptedCircuitMap } @@ -706,10 +706,10 @@ func (cm *circuitMap) OpenCircuits(keystones ...Keystone) error { } cm.mtx.RUnlock() - err := cm.cfg.DB.Update(func(tx *bbolt.Tx) error { + err := kvdb.Update(cm.cfg.DB, func(tx kvdb.RwTx) error { // Now, load the circuit bucket to which we will write the // already serialized circuit. - keystoneBkt := tx.Bucket(circuitKeystoneKey) + keystoneBkt := tx.ReadWriteBucket(circuitKeystoneKey) if keystoneBkt == nil { return ErrCorruptedCircuitMap } @@ -847,13 +847,13 @@ func (cm *circuitMap) DeleteCircuits(inKeys ...CircuitKey) error { } cm.mtx.Unlock() - err := cm.cfg.DB.Batch(func(tx *bbolt.Tx) error { + err := kvdb.Batch(cm.cfg.DB.Backend, func(tx kvdb.RwTx) error { for _, circuit := range removedCircuits { // If this htlc made it to an outgoing link, load the // keystone bucket from which we will remove the // outgoing circuit key. if circuit.HasKeystone() { - keystoneBkt := tx.Bucket(circuitKeystoneKey) + keystoneBkt := tx.ReadWriteBucket(circuitKeystoneKey) if keystoneBkt == nil { return ErrCorruptedCircuitMap } @@ -868,7 +868,7 @@ func (cm *circuitMap) DeleteCircuits(inKeys ...CircuitKey) error { // Remove the circuit itself based on the incoming // circuit key. - circuitBkt := tx.Bucket(circuitAddKey) + circuitBkt := tx.ReadWriteBucket(circuitAddKey) if circuitBkt == nil { return ErrCorruptedCircuitMap } diff --git a/htlcswitch/decayedlog.go b/htlcswitch/decayedlog.go index 6b3c62b56e..3a60e11228 100644 --- a/htlcswitch/decayedlog.go +++ b/htlcswitch/decayedlog.go @@ -8,9 +8,9 @@ import ( "sync" "sync/atomic" - "github.com/coreos/bbolt" sphinx "github.com/lightningnetwork/lightning-onion" "github.com/lightningnetwork/lnd/chainntnfs" + "github.com/lightningnetwork/lnd/channeldb/kvdb" ) const ( @@ -56,7 +56,7 @@ type DecayedLog struct { dbPath string - db *bbolt.DB + db kvdb.Backend notifier chainntnfs.ChainNotifier @@ -92,7 +92,10 @@ func (d *DecayedLog) Start() error { // Open the boltdb for use. var err error - if d.db, err = bbolt.Open(d.dbPath, dbPermissions, nil); err != nil { + d.db, err = kvdb.Create( + kvdb.BoltBackendName, d.dbPath, true, + ) + if err != nil { return fmt.Errorf("Could not open boltdb: %v", err) } @@ -119,13 +122,13 @@ func (d *DecayedLog) Start() error { // initBuckets initializes the primary buckets used by the decayed log, namely // the shared hash bucket, and batch replay func (d *DecayedLog) initBuckets() error { - return d.db.Update(func(tx *bbolt.Tx) error { - _, err := tx.CreateBucketIfNotExists(sharedHashBucket) + return kvdb.Update(d.db, func(tx kvdb.RwTx) error { + _, err := tx.CreateTopLevelBucket(sharedHashBucket) if err != nil { return ErrDecayedLogInit } - _, err = tx.CreateBucketIfNotExists(batchReplayBucket) + _, err = tx.CreateTopLevelBucket(batchReplayBucket) if err != nil { return ErrDecayedLogInit } @@ -196,11 +199,11 @@ func (d *DecayedLog) garbageCollector(epochClient *chainntnfs.BlockEpochEvent) { func (d *DecayedLog) gcExpiredHashes(height uint32) (uint32, error) { var numExpiredHashes uint32 - err := d.db.Batch(func(tx *bbolt.Tx) error { + err := kvdb.Batch(d.db, func(tx kvdb.RwTx) error { numExpiredHashes = 0 // Grab the shared hash bucket - sharedHashes := tx.Bucket(sharedHashBucket) + sharedHashes := tx.ReadWriteBucket(sharedHashBucket) if sharedHashes == nil { return fmt.Errorf("sharedHashBucket " + "is nil") @@ -246,8 +249,8 @@ func (d *DecayedLog) gcExpiredHashes(height uint32) (uint32, error) { // Delete removes a key-pair from the // sharedHashBucket. func (d *DecayedLog) Delete(hash *sphinx.HashPrefix) error { - return d.db.Batch(func(tx *bbolt.Tx) error { - sharedHashes := tx.Bucket(sharedHashBucket) + return kvdb.Batch(d.db, func(tx kvdb.RwTx) error { + sharedHashes := tx.ReadWriteBucket(sharedHashBucket) if sharedHashes == nil { return ErrDecayedLogCorrupted } @@ -261,10 +264,10 @@ func (d *DecayedLog) Delete(hash *sphinx.HashPrefix) error { func (d *DecayedLog) Get(hash *sphinx.HashPrefix) (uint32, error) { var value uint32 - err := d.db.View(func(tx *bbolt.Tx) error { + err := kvdb.View(d.db, func(tx kvdb.ReadTx) error { // Grab the shared hash bucket which stores the mapping from // truncated sha-256 hashes of shared secrets to CLTV's. - sharedHashes := tx.Bucket(sharedHashBucket) + sharedHashes := tx.ReadBucket(sharedHashBucket) if sharedHashes == nil { return fmt.Errorf("sharedHashes is nil, could " + "not retrieve CLTV value") @@ -294,8 +297,8 @@ func (d *DecayedLog) Put(hash *sphinx.HashPrefix, cltv uint32) error { var scratch [4]byte binary.BigEndian.PutUint32(scratch[:], cltv) - return d.db.Batch(func(tx *bbolt.Tx) error { - sharedHashes := tx.Bucket(sharedHashBucket) + return kvdb.Batch(d.db, func(tx kvdb.RwTx) error { + sharedHashes := tx.ReadWriteBucket(sharedHashBucket) if sharedHashes == nil { return ErrDecayedLogCorrupted } @@ -327,8 +330,8 @@ func (d *DecayedLog) PutBatch(b *sphinx.Batch) (*sphinx.ReplaySet, error) { // to generate the complete replay set. If this batch was previously // processed, the replay set will be deserialized from disk. var replays *sphinx.ReplaySet - if err := d.db.Batch(func(tx *bbolt.Tx) error { - sharedHashes := tx.Bucket(sharedHashBucket) + if err := kvdb.Batch(d.db, func(tx kvdb.RwTx) error { + sharedHashes := tx.ReadWriteBucket(sharedHashBucket) if sharedHashes == nil { return ErrDecayedLogCorrupted } @@ -336,7 +339,7 @@ func (d *DecayedLog) PutBatch(b *sphinx.Batch) (*sphinx.ReplaySet, error) { // Load the batch replay bucket, which will be used to either // retrieve the result of previously processing this batch, or // to write the result of this operation. - batchReplayBkt := tx.Bucket(batchReplayBucket) + batchReplayBkt := tx.ReadWriteBucket(batchReplayBucket) if batchReplayBkt == nil { return ErrDecayedLogCorrupted } diff --git a/htlcswitch/failure.go b/htlcswitch/failure.go index 4526e3560d..373263381f 100644 --- a/htlcswitch/failure.go +++ b/htlcswitch/failure.go @@ -9,6 +9,76 @@ import ( "github.com/lightningnetwork/lnd/lnwire" ) +// ClearTextError is an interface which is implemented by errors that occur +// when we know the underlying wire failure message. These errors are the +// opposite to opaque errors which are onion-encrypted blobs only understandable +// to the initiating node. ClearTextErrors are used when we fail a htlc at our +// node, or one of our initiated payments failed and we can decrypt the onion +// encrypted error fully. +type ClearTextError interface { + error + + // WireMessage extracts a valid wire failure message from an internal + // error which may contain additional metadata (which should not be + // exposed to the network). This value may be nil in the case where + // an unknown wire error is returned by one of our peers. + WireMessage() lnwire.FailureMessage +} + +// LinkError is an implementation of the ClearTextError interface which +// represents failures that occur on our incoming or outgoing link. +type LinkError struct { + // msg returns the wire failure associated with the error. + // This value should *not* be nil, because we should always + // know the failure type for failures which occur at our own + // node. + msg lnwire.FailureMessage + + // FailureDetail enriches the wire error with additional information. + FailureDetail +} + +// NewLinkError returns a LinkError with the failure message provided. +// The failure message provided should *not* be nil, because we should +// always know the failure type for failures which occur at our own node. +func NewLinkError(msg lnwire.FailureMessage) *LinkError { + return &LinkError{msg: msg} +} + +// NewDetailedLinkError returns a link error that enriches a wire message with +// a failure detail. +func NewDetailedLinkError(msg lnwire.FailureMessage, + detail FailureDetail) *LinkError { + + return &LinkError{ + msg: msg, + FailureDetail: detail, + } +} + +// WireMessage extracts a valid wire failure message from an internal +// error which may contain additional metadata (which should not be +// exposed to the network). This value should never be nil for LinkErrors, +// because we are the ones failing the htlc. +// +// Note this is part of the ClearTextError interface. +func (l *LinkError) WireMessage() lnwire.FailureMessage { + return l.msg +} + +// Error returns the string representation of a link error. +// +// Note this is part of the ClearTextError interface. +func (l *LinkError) Error() string { + // If the link error has no failure detail, return the wire message's + // error. + if l.FailureDetail == nil { + return l.msg.Error() + } + + return l.FailureDetail.FailureString() +} + // ForwardingError wraps an lnwire.FailureMessage in a struct that also // includes the source of the error. type ForwardingError struct { @@ -18,22 +88,49 @@ type ForwardingError struct { // zero is the self node. FailureSourceIdx int - // ExtraMsg is an additional error message that callers can provide in - // order to provide context specific error details. - ExtraMsg string + // msg is the wire message associated with the error. This value may + // be nil in the case where we fail to decode failure message sent by + // a peer. + msg lnwire.FailureMessage +} - lnwire.FailureMessage +// WireMessage extracts a valid wire failure message from an internal +// error which may contain additional metadata (which should not be +// exposed to the network). This value may be nil in the case where +// an unknown wire error is returned by one of our peers. +// +// Note this is part of the ClearTextError interface. +func (f *ForwardingError) WireMessage() lnwire.FailureMessage { + return f.msg } // Error implements the built-in error interface. We use this method to allow // the switch or any callers to insert additional context to the error message // returned. func (f *ForwardingError) Error() string { - if f.ExtraMsg == "" { - return fmt.Sprintf("%v", f.FailureMessage) + return fmt.Sprintf( + "%v@%v", f.msg, f.FailureSourceIdx, + ) +} + +// NewForwardingError creates a new payment error which wraps a wire error +// with additional metadata. +func NewForwardingError(failure lnwire.FailureMessage, + index int) *ForwardingError { + + return &ForwardingError{ + FailureSourceIdx: index, + msg: failure, } +} - return fmt.Sprintf("%v: %v", f.FailureMessage, f.ExtraMsg) +// NewUnknownForwardingError returns a forwarding error which has a nil failure +// message. This constructor should only be used in the case where we cannot +// decode the failure we have received from a peer. +func NewUnknownForwardingError(index int) *ForwardingError { + return &ForwardingError{ + FailureSourceIdx: index, + } } // ErrorDecrypter is an interface that is used to decrypt the onion encrypted @@ -90,15 +187,10 @@ func (s *SphinxErrorDecrypter) DecryptError(reason lnwire.OpaqueReason) ( r := bytes.NewReader(failure.Message) failureMsg, err := lnwire.DecodeFailure(r, 0) if err != nil { - return &ForwardingError{ - FailureSourceIdx: failure.SenderIdx, - }, nil + return NewUnknownForwardingError(failure.SenderIdx), nil } - return &ForwardingError{ - FailureSourceIdx: failure.SenderIdx, - FailureMessage: failureMsg, - }, nil + return NewForwardingError(failureMsg, failure.SenderIdx), nil } // A compile time check to ensure ErrorDecrypter implements the Deobfuscator diff --git a/htlcswitch/failure_detail.go b/htlcswitch/failure_detail.go new file mode 100644 index 0000000000..341688d124 --- /dev/null +++ b/htlcswitch/failure_detail.go @@ -0,0 +1,97 @@ +package htlcswitch + +// FailureDetail is an interface implemented by failures that occur on +// our incoming or outgoing link, or within the switch itself. +type FailureDetail interface { + // FailureString returns the string representation of a failure + // detail. + FailureString() string +} + +// OutgoingFailure is an enum which is used to enrich failures which occur in +// the switch or on our outgoing link with additional metadata. +type OutgoingFailure int + +const ( + // OutgoingFailureNone is returned when the wire message contains + // sufficient information. + OutgoingFailureNone OutgoingFailure = iota + + // OutgoingFailureDecodeError indicates that we could not decode the + // failure reason provided for a failed payment. + OutgoingFailureDecodeError + + // OutgoingFailureLinkNotEligible indicates that a routing attempt was + // made over a link that is not eligible for routing. + OutgoingFailureLinkNotEligible + + // OutgoingFailureOnChainTimeout indicates that a payment had to be + // timed out on chain before it got past the first hop by us or the + // remote party. + OutgoingFailureOnChainTimeout + + // OutgoingFailureHTLCExceedsMax is returned when a htlc exceeds our + // policy's maximum htlc amount. + OutgoingFailureHTLCExceedsMax + + // OutgoingFailureInsufficientBalance is returned when we cannot route a + // htlc due to insufficient outgoing capacity. + OutgoingFailureInsufficientBalance + + // OutgoingFailureCircularRoute is returned when an attempt is made + // to forward a htlc through our node which arrives and leaves on the + // same channel. + OutgoingFailureCircularRoute + + // OutgoingFailureIncompleteForward is returned when we cancel an incomplete + // forward. + OutgoingFailureIncompleteForward + + // OutgoingFailureDownstreamHtlcAdd is returned when we fail to add a + // downstream htlc to our outgoing link. + OutgoingFailureDownstreamHtlcAdd + + // OutgoingFailureForwardsDisabled is returned when the switch is + // configured to disallow forwards. + OutgoingFailureForwardsDisabled +) + +// FailureString returns the string representation of a failure detail. +// +// Note: it is part of the FailureDetail interface. +func (fd OutgoingFailure) FailureString() string { + switch fd { + case OutgoingFailureNone: + return "no failure detail" + + case OutgoingFailureDecodeError: + return "could not decode wire failure" + + case OutgoingFailureLinkNotEligible: + return "link not eligible" + + case OutgoingFailureOnChainTimeout: + return "payment was resolved on-chain, then canceled back" + + case OutgoingFailureHTLCExceedsMax: + return "htlc exceeds maximum policy amount" + + case OutgoingFailureInsufficientBalance: + return "insufficient bandwidth to route htlc" + + case OutgoingFailureCircularRoute: + return "same incoming and outgoing channel" + + case OutgoingFailureIncompleteForward: + return "failed after detecting incomplete forward" + + case OutgoingFailureDownstreamHtlcAdd: + return "could not add downstream htlc" + + case OutgoingFailureForwardsDisabled: + return "node configured to disallow forwards" + + default: + return "unknown failure detail" + } +} diff --git a/htlcswitch/hop/iterator.go b/htlcswitch/hop/iterator.go index 062ca87949..5c8afed2bd 100644 --- a/htlcswitch/hop/iterator.go +++ b/htlcswitch/hop/iterator.go @@ -16,15 +16,13 @@ import ( // interpret the forwarding information encoded within the HTLC packet, and hop // to encode the forwarding information for the _next_ hop. type Iterator interface { - // ForwardingInstructions returns the set of fields that detail exactly - // _how_ this hop should forward the HTLC to the next hop. - // Additionally, the information encoded within the returned - // ForwardingInfo is to be used by each hop to authenticate the - // information given to it by the prior hop. - ForwardingInstructions() (ForwardingInfo, error) - - // ExtraOnionBlob returns the additional EOB data (if available). - ExtraOnionBlob() []byte + // HopPayload returns the set of fields that detail exactly _how_ this + // hop should forward the HTLC to the next hop. Additionally, the + // information encoded within the returned ForwardingInfo is to be used + // by each hop to authenticate the information given to it by the prior + // hop. The payload will also contain any additional TLV fields provided + // by the sender. + HopPayload() (*Payload, error) // EncodeNextHop encodes the onion packet destined for the next hop // into the passed io.Writer. @@ -72,50 +70,35 @@ func (r *sphinxHopIterator) EncodeNextHop(w io.Writer) error { return r.processedPacket.NextPacket.Encode(w) } -// ForwardingInstructions returns the set of fields that detail exactly _how_ -// this hop should forward the HTLC to the next hop. Additionally, the -// information encoded within the returned ForwardingInfo is to be used by each -// hop to authenticate the information given to it by the prior hop. +// HopPayload returns the set of fields that detail exactly _how_ this hop +// should forward the HTLC to the next hop. Additionally, the information +// encoded within the returned ForwardingInfo is to be used by each hop to +// authenticate the information given to it by the prior hop. The payload will +// also contain any additional TLV fields provided by the sender. // // NOTE: Part of the HopIterator interface. -func (r *sphinxHopIterator) ForwardingInstructions() (ForwardingInfo, error) { +func (r *sphinxHopIterator) HopPayload() (*Payload, error) { switch r.processedPacket.Payload.Type { + // If this is the legacy payload, then we'll extract the information // directly from the pre-populated ForwardingInstructions field. case sphinx.PayloadLegacy: fwdInst := r.processedPacket.ForwardingInstructions - p := NewLegacyPayload(fwdInst) - - return p.ForwardingInfo(), nil + return NewLegacyPayload(fwdInst), nil // Otherwise, if this is the TLV payload, then we'll make a new stream // to decode only what we need to make routing decisions. case sphinx.PayloadTLV: - p, err := NewPayloadFromReader(bytes.NewReader( + return NewPayloadFromReader(bytes.NewReader( r.processedPacket.Payload.Payload, )) - if err != nil { - return ForwardingInfo{}, err - } - - return p.ForwardingInfo(), nil default: - return ForwardingInfo{}, fmt.Errorf("unknown "+ - "sphinx payload type: %v", + return nil, fmt.Errorf("unknown sphinx payload type: %v", r.processedPacket.Payload.Type) } } -// ExtraOnionBlob returns the additional EOB data (if available). -func (r *sphinxHopIterator) ExtraOnionBlob() []byte { - if r.processedPacket.Payload.Type == sphinx.PayloadLegacy { - return nil - } - - return r.processedPacket.Payload.Payload -} - // ExtractErrorEncrypter decodes and returns the ErrorEncrypter for this hop, // along with a failure code to signal if the decoding was successful. The // ErrorEncrypter is used to encrypt errors back to the sender in the event that @@ -201,6 +184,30 @@ func (p *OnionProcessor) DecodeHopIterator(r io.Reader, rHash []byte, return makeSphinxHopIterator(onionPkt, sphinxPacket), lnwire.CodeNone } +// ReconstructHopIterator attempts to decode a valid sphinx packet from the passed io.Reader +// instance using the rHash as the associated data when checking the relevant +// MACs during the decoding process. +func (p *OnionProcessor) ReconstructHopIterator(r io.Reader, rHash []byte) ( + Iterator, error) { + + onionPkt := &sphinx.OnionPacket{} + if err := onionPkt.Decode(r); err != nil { + return nil, err + } + + // Attempt to process the Sphinx packet. We include the payment hash of + // the HTLC as it's authenticated within the Sphinx packet itself as + // associated data in order to thwart attempts a replay attacks. In the + // case of a replay, an attacker is *forced* to use the same payment + // hash twice, thereby losing their money entirely. + sphinxPacket, err := p.router.ReconstructOnionPacket(onionPkt, rHash) + if err != nil { + return nil, err + } + + return makeSphinxHopIterator(onionPkt, sphinxPacket), nil +} + // DecodeHopIteratorRequest encapsulates all date necessary to process an onion // packet, perform sphinx replay detection, and schedule the entry for garbage // collection. diff --git a/htlcswitch/hop/iterator_test.go b/htlcswitch/hop/iterator_test.go index 822e8794b6..20c5632b21 100644 --- a/htlcswitch/hop/iterator_test.go +++ b/htlcswitch/hop/iterator_test.go @@ -85,12 +85,13 @@ func TestSphinxHopIteratorForwardingInstructions(t *testing.T) { for i, testCase := range testCases { iterator.processedPacket = testCase.sphinxPacket - fwdInfo, err := iterator.ForwardingInstructions() + pld, err := iterator.HopPayload() if err != nil { t.Fatalf("#%v: unable to extract forwarding "+ "instructions: %v", i, err) } + fwdInfo := pld.ForwardingInfo() if fwdInfo != testCase.expectedFwdInfo { t.Fatalf("#%v: wrong fwding info: expected %v, got %v", i, spew.Sdump(testCase.expectedFwdInfo), diff --git a/htlcswitch/hop/payload.go b/htlcswitch/hop/payload.go index 2fd1cd860c..d8aafd9ca0 100644 --- a/htlcswitch/hop/payload.go +++ b/htlcswitch/hop/payload.go @@ -5,21 +5,56 @@ import ( "fmt" "io" - "github.com/lightningnetwork/lightning-onion" + sphinx "github.com/lightningnetwork/lightning-onion" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/record" "github.com/lightningnetwork/lnd/tlv" ) +// PayloadViolation is an enum encapsulating the possible invalid payload +// violations that can occur when processing or validating a payload. +type PayloadViolation byte + +const ( + // OmittedViolation indicates that a type was expected to be found the + // payload but was absent. + OmittedViolation PayloadViolation = iota + + // IncludedViolation indicates that a type was expected to be omitted + // from the payload but was present. + IncludedViolation + + // RequiredViolation indicates that an unknown even type was found in + // the payload that we could not process. + RequiredViolation +) + +// String returns a human-readable description of the violation as a verb. +func (v PayloadViolation) String() string { + switch v { + case OmittedViolation: + return "omitted" + + case IncludedViolation: + return "included" + + case RequiredViolation: + return "required" + + default: + return "unknown violation" + } +} + // ErrInvalidPayload is an error returned when a parsed onion payload either // included or omitted incorrect records for a particular hop type. type ErrInvalidPayload struct { // Type the record's type that cause the violation. Type tlv.Type - // Ommitted if true, signals that the sender did not include the record. - // Otherwise, the sender included the record when it shouldn't have. - Omitted bool + // Violation is an enum indicating the type of violation detected in + // processing Type. + Violation PayloadViolation // FinalHop if true, indicates that the violation is for the final hop // in the route (identified by next hop id), otherwise the violation is @@ -33,13 +68,9 @@ func (e ErrInvalidPayload) Error() string { if e.FinalHop { hopType = "final" } - violation := "included" - if e.Omitted { - violation = "omitted" - } - return fmt.Sprintf("onion payload for %s hop %s record with type %d", - hopType, violation, e.Type) + return fmt.Sprintf("onion payload for %s hop %v record with type %d", + hopType, e.Violation, e.Type) } // Payload encapsulates all information delivered to a hop in an onion payload. @@ -50,6 +81,14 @@ type Payload struct { // FwdInfo holds the basic parameters required for HTLC forwarding, e.g. // amount, cltv, and next hop. FwdInfo ForwardingInfo + + // MPP holds the info provided in an option_mpp record when parsed from + // a TLV onion payload. + MPP *record.MPP + + // customRecords are user-defined records in the custom type range that + // were included in the payload. + customRecords record.CustomSet } // NewLegacyPayload builds a Payload from the amount, cltv, and next hop @@ -64,6 +103,7 @@ func NewLegacyPayload(f *sphinx.HopData) *Payload { AmountToForward: lnwire.MilliSatoshi(f.ForwardAmount), OutgoingCTLV: f.OutgoingCltv, }, + customRecords: make(record.CustomSet), } } @@ -74,12 +114,14 @@ func NewPayloadFromReader(r io.Reader) (*Payload, error) { cid uint64 amt uint64 cltv uint32 + mpp = &record.MPP{} ) tlvStream, err := tlv.NewStream( record.NewAmtToFwdRecord(&amt), record.NewLockTimeRecord(&cltv), record.NewNextHopIDRecord(&cid), + mpp.Record(), ) if err != nil { return nil, err @@ -90,15 +132,33 @@ func NewPayloadFromReader(r io.Reader) (*Payload, error) { return nil, err } - nextHop := lnwire.NewShortChanIDFromInt(cid) - // Validate whether the sender properly included or omitted tlv records // in accordance with BOLT 04. + nextHop := lnwire.NewShortChanIDFromInt(cid) err = ValidateParsedPayloadTypes(parsedTypes, nextHop) if err != nil { return nil, err } + // Check for violation of the rules for mandatory fields. + violatingType := getMinRequiredViolation(parsedTypes) + if violatingType != nil { + return nil, ErrInvalidPayload{ + Type: *violatingType, + Violation: RequiredViolation, + FinalHop: nextHop == Exit, + } + } + + // If no MPP field was parsed, set the MPP field on the resulting + // payload to nil. + if _, ok := parsedTypes[record.MPPOnionType]; !ok { + mpp = nil + } + + // Filter out the custom records. + customRecords := NewCustomRecords(parsedTypes) + return &Payload{ FwdInfo: ForwardingInfo{ Network: BitcoinNetwork, @@ -106,6 +166,8 @@ func NewPayloadFromReader(r io.Reader) (*Payload, error) { AmountToForward: lnwire.MilliSatoshi(amt), OutgoingCTLV: cltv, }, + MPP: mpp, + customRecords: customRecords, }, nil } @@ -115,11 +177,24 @@ func (h *Payload) ForwardingInfo() ForwardingInfo { return h.FwdInfo } +// NewCustomRecords filters the types parsed from the tlv stream for custom +// records. +func NewCustomRecords(parsedTypes tlv.TypeMap) record.CustomSet { + customRecords := make(record.CustomSet) + for t, parseResult := range parsedTypes { + if parseResult == nil || t < record.CustomTypeStart { + continue + } + customRecords[uint64(t)] = parseResult + } + return customRecords +} + // ValidateParsedPayloadTypes checks the types parsed from a hop payload to // ensure that the proper fields are either included or omitted. The finalHop // boolean should be true if the payload was parsed for an exit hop. The // requirements for this method are described in BOLT 04. -func ValidateParsedPayloadTypes(parsedTypes tlv.TypeSet, +func ValidateParsedPayloadTypes(parsedTypes tlv.TypeMap, nextHop lnwire.ShortChannelID) error { isFinalHop := nextHop == Exit @@ -127,23 +202,24 @@ func ValidateParsedPayloadTypes(parsedTypes tlv.TypeSet, _, hasAmt := parsedTypes[record.AmtOnionType] _, hasLockTime := parsedTypes[record.LockTimeOnionType] _, hasNextHop := parsedTypes[record.NextHopOnionType] + _, hasMPP := parsedTypes[record.MPPOnionType] switch { // All hops must include an amount to forward. case !hasAmt: return ErrInvalidPayload{ - Type: record.AmtOnionType, - Omitted: true, - FinalHop: isFinalHop, + Type: record.AmtOnionType, + Violation: OmittedViolation, + FinalHop: isFinalHop, } // All hops must include a cltv expiry. case !hasLockTime: return ErrInvalidPayload{ - Type: record.LockTimeOnionType, - Omitted: true, - FinalHop: isFinalHop, + Type: record.LockTimeOnionType, + Violation: OmittedViolation, + FinalHop: isFinalHop, } // The exit hop should omit the next hop id. If nextHop != Exit, the @@ -151,11 +227,65 @@ func ValidateParsedPayloadTypes(parsedTypes tlv.TypeSet, // inclusion at intermediate hops directly. case isFinalHop && hasNextHop: return ErrInvalidPayload{ - Type: record.NextHopOnionType, - Omitted: false, - FinalHop: true, + Type: record.NextHopOnionType, + Violation: IncludedViolation, + FinalHop: true, + } + + // Intermediate nodes should never receive MPP fields. + case !isFinalHop && hasMPP: + return ErrInvalidPayload{ + Type: record.MPPOnionType, + Violation: IncludedViolation, + FinalHop: isFinalHop, } } return nil } + +// MultiPath returns the record corresponding the option_mpp parsed from the +// onion payload. +func (h *Payload) MultiPath() *record.MPP { + return h.MPP +} + +// CustomRecords returns the custom tlv type records that were parsed from the +// payload. +func (h *Payload) CustomRecords() record.CustomSet { + return h.customRecords +} + +// getMinRequiredViolation checks for unrecognized required (even) fields in the +// standard range and returns the lowest required type. Always returning the +// lowest required type allows a failure message to be deterministic. +func getMinRequiredViolation(set tlv.TypeMap) *tlv.Type { + var ( + requiredViolation bool + minRequiredViolationType tlv.Type + ) + for t, parseResult := range set { + // If a type is even but not known to us, we cannot process the + // payload. We are required to understand a field that we don't + // support. + // + // We always accept custom fields, because a higher level + // application may understand them. + if parseResult == nil || t%2 != 0 || + t >= record.CustomTypeStart { + + continue + } + + if !requiredViolation || t < minRequiredViolationType { + minRequiredViolationType = t + } + requiredViolation = true + } + + if requiredViolation { + return &minRequiredViolationType + } + + return nil +} diff --git a/htlcswitch/hop/payload_test.go b/htlcswitch/hop/payload_test.go index 0c9342d55b..b0a92534f4 100644 --- a/htlcswitch/hop/payload_test.go +++ b/htlcswitch/hop/payload_test.go @@ -6,23 +6,36 @@ import ( "testing" "github.com/lightningnetwork/lnd/htlcswitch/hop" + "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/record" ) type decodePayloadTest struct { - name string - payload []byte - expErr error + name string + payload []byte + expErr error + expCustomRecords map[uint64][]byte + shouldHaveMPP bool } var decodePayloadTests = []decodePayloadTest{ + { + name: "final hop valid", + payload: []byte{0x02, 0x00, 0x04, 0x00}, + }, + { + name: "intermediate hop valid", + payload: []byte{0x02, 0x00, 0x04, 0x00, 0x06, 0x08, 0x01, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + }, { name: "final hop no amount", payload: []byte{0x04, 0x00}, expErr: hop.ErrInvalidPayload{ - Type: record.AmtOnionType, - Omitted: true, - FinalHop: true, + Type: record.AmtOnionType, + Violation: hop.OmittedViolation, + FinalHop: true, }, }, { @@ -31,18 +44,18 @@ var decodePayloadTests = []decodePayloadTest{ 0x00, 0x00, 0x00, 0x00, }, expErr: hop.ErrInvalidPayload{ - Type: record.AmtOnionType, - Omitted: true, - FinalHop: false, + Type: record.AmtOnionType, + Violation: hop.OmittedViolation, + FinalHop: false, }, }, { name: "final hop no expiry", payload: []byte{0x02, 0x00}, expErr: hop.ErrInvalidPayload{ - Type: record.LockTimeOnionType, - Omitted: true, - FinalHop: true, + Type: record.LockTimeOnionType, + Violation: hop.OmittedViolation, + FinalHop: true, }, }, { @@ -51,9 +64,9 @@ var decodePayloadTests = []decodePayloadTest{ 0x00, 0x00, 0x00, 0x00, }, expErr: hop.ErrInvalidPayload{ - Type: record.LockTimeOnionType, - Omitted: true, - FinalHop: false, + Type: record.LockTimeOnionType, + Violation: hop.OmittedViolation, + FinalHop: false, }, }, { @@ -62,11 +75,125 @@ var decodePayloadTests = []decodePayloadTest{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }, expErr: hop.ErrInvalidPayload{ - Type: record.NextHopOnionType, - Omitted: false, - FinalHop: true, + Type: record.NextHopOnionType, + Violation: hop.IncludedViolation, + FinalHop: true, + }, + }, + { + name: "required type after omitted hop id", + payload: []byte{0x02, 0x00, 0x04, 0x00, 0x0a, 0x00}, + expErr: hop.ErrInvalidPayload{ + Type: 10, + Violation: hop.RequiredViolation, + FinalHop: true, + }, + }, + { + name: "required type after included hop id", + payload: []byte{0x02, 0x00, 0x04, 0x00, 0x06, 0x08, 0x01, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, + }, + expErr: hop.ErrInvalidPayload{ + Type: 10, + Violation: hop.RequiredViolation, + FinalHop: false, + }, + }, + { + name: "required type zero final hop", + payload: []byte{0x00, 0x00, 0x02, 0x00, 0x04, 0x00}, + expErr: hop.ErrInvalidPayload{ + Type: 0, + Violation: hop.RequiredViolation, + FinalHop: true, + }, + }, + { + name: "required type zero final hop zero sid", + payload: []byte{0x00, 0x00, 0x02, 0x00, 0x04, 0x00, 0x06, 0x08, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + expErr: hop.ErrInvalidPayload{ + Type: record.NextHopOnionType, + Violation: hop.IncludedViolation, + FinalHop: true, + }, + }, + { + name: "required type zero intermediate hop", + payload: []byte{0x00, 0x00, 0x02, 0x00, 0x04, 0x00, 0x06, 0x08, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + expErr: hop.ErrInvalidPayload{ + Type: 0, + Violation: hop.RequiredViolation, + FinalHop: false, + }, + }, + { + name: "required type in custom range", + payload: []byte{0x02, 0x00, 0x04, 0x00, + 0xfe, 0x00, 0x01, 0x00, 0x00, 0x02, 0x10, 0x11, + }, + expCustomRecords: map[uint64][]byte{ + 65536: {0x10, 0x11}, + }, + }, + { + name: "valid intermediate hop", + payload: []byte{0x02, 0x00, 0x04, 0x00, 0x06, 0x08, 0x01, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + expErr: nil, + }, + { + name: "valid final hop", + payload: []byte{0x02, 0x00, 0x04, 0x00}, + expErr: nil, + }, + { + name: "intermediate hop with mpp", + payload: []byte{ + // amount + 0x02, 0x00, + // cltv + 0x04, 0x00, + // next hop id + 0x06, 0x08, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // mpp + 0x08, 0x21, + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, + 0x08, + }, + expErr: hop.ErrInvalidPayload{ + Type: record.MPPOnionType, + Violation: hop.IncludedViolation, + FinalHop: false, }, }, + { + name: "final hop with mpp", + payload: []byte{ + // amount + 0x02, 0x00, + // cltv + 0x04, 0x00, + // mpp + 0x08, 0x21, + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, + 0x08, + }, + expErr: nil, + shouldHaveMPP: true, + }, } // TestDecodeHopPayloadRecordValidation asserts that parsing the payloads in the @@ -81,9 +208,47 @@ func TestDecodeHopPayloadRecordValidation(t *testing.T) { } func testDecodeHopPayloadValidation(t *testing.T, test decodePayloadTest) { - _, err := hop.NewPayloadFromReader(bytes.NewReader(test.payload)) + var ( + testTotalMsat = lnwire.MilliSatoshi(8) + testAddr = [32]byte{ + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, + } + ) + + p, err := hop.NewPayloadFromReader(bytes.NewReader(test.payload)) if !reflect.DeepEqual(test.expErr, err) { t.Fatalf("expected error mismatch, want: %v, got: %v", test.expErr, err) } + if err != nil { + return + } + + // Assert MPP fields if we expect them. + if test.shouldHaveMPP { + if p.MPP == nil { + t.Fatalf("payload should have MPP record") + } + if p.MPP.TotalMsat() != testTotalMsat { + t.Fatalf("invalid total msat") + } + if p.MPP.PaymentAddr() != testAddr { + t.Fatalf("invalid payment addr") + } + } else if p.MPP != nil { + t.Fatalf("unexpected MPP payload") + } + + // Convert expected nil map to empty map, because we always expect an + // initiated map from the payload. + expCustomRecords := make(record.CustomSet) + if test.expCustomRecords != nil { + expCustomRecords = test.expCustomRecords + } + if !reflect.DeepEqual(expCustomRecords, p.CustomRecords()) { + t.Fatalf("invalid custom records") + } } diff --git a/htlcswitch/htlcnotifier.go b/htlcswitch/htlcnotifier.go new file mode 100644 index 0000000000..25953e6566 --- /dev/null +++ b/htlcswitch/htlcnotifier.go @@ -0,0 +1,429 @@ +package htlcswitch + +import ( + "fmt" + "strings" + "sync" + "time" + + "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/htlcswitch/hop" + "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/subscribe" +) + +// HtlcNotifier notifies clients of htlc forwards, failures and settles for +// htlcs that the switch handles. It takes subscriptions for its events and +// notifies them when htlc events occur. These are served on a best-effort +// basis; events are not persisted, delivery is not guaranteed (in the event +// of a crash in the switch, forward events may be lost) and some events may +// be replayed upon restart. Events consumed from this package should be +// de-duplicated by the htlc's unique combination of incoming+outgoing circuit +// and not relied upon for critical operations. +// +// The htlc notifier sends the following kinds of events: +// Forwarding Event: +// - Represents a htlc which is forwarded onward from our node. +// - Present for htlc forwards through our node and local sends. +// +// Link Failure Event: +// - Indicates that a htlc has failed on our incoming or outgoing link, +// with an incoming boolean which indicates where the failure occurred. +// - Incoming link failures are present for failed attempts to pay one of +// our invoices (insufficient amount or mpp timeout, for example) and for +// forwards that we cannot decode to forward onwards. +// - Outgoing link failures are present for forwards or local payments that +// do not meet our outgoing link's policy (insufficient fees, for example) +// and when we fail to forward the payment on (insufficient outgoing +// capacity, or an unknown outgoing link). +// +// Forwarding Failure Event: +// - Forwarding failures indicate that a htlc we forwarded has failed at +// another node down the route. +// - Present for local sends and htlc forwards which fail after they left +// our node. +// +// Settle event: +// - Settle events are present when a htlc which we added is settled through +// the release of a preimage. +// - Present for local receives, and successful local sends or forwards. +// +// Each htlc is identified by its incoming and outgoing circuit key. Htlcs, +// and their subsequent settles or fails, can be identified by the combination +// of incoming and outgoing circuits. Note that receives to our node will +// have a zero outgoing circuit key because the htlc terminates at our +// node, and sends from our node will have a zero incoming circuit key because +// the send originates at our node. +type HtlcNotifier struct { + started sync.Once + stopped sync.Once + + // now returns the current time, it is set in the htlcnotifier to allow + // for timestamp mocking in tests. + now func() time.Time + + ntfnServer *subscribe.Server +} + +// NewHtlcNotifier creates a new HtlcNotifier which gets htlc forwarded, +// failed and settled events from links our node has established with peers +// and sends notifications to subscribing clients. +func NewHtlcNotifier(now func() time.Time) *HtlcNotifier { + return &HtlcNotifier{ + now: now, + ntfnServer: subscribe.NewServer(), + } +} + +// Start starts the HtlcNotifier and all goroutines it needs to consume +// events and provide subscriptions to clients. +func (h *HtlcNotifier) Start() error { + var err error + h.started.Do(func() { + log.Trace("HtlcNotifier starting") + err = h.ntfnServer.Start() + }) + return err +} + +// Stop signals the notifier for a graceful shutdown. +func (h *HtlcNotifier) Stop() { + h.stopped.Do(func() { + if err := h.ntfnServer.Stop(); err != nil { + log.Warnf("error stopping htlc notifier: %v", err) + } + }) +} + +// SubscribeHtlcEvents returns a subscribe.Client that will receive updates +// any time the server is made aware of a new event. +func (h *HtlcNotifier) SubscribeHtlcEvents() (*subscribe.Client, error) { + return h.ntfnServer.Subscribe() +} + +// HtlcKey uniquely identifies the htlc. +type HtlcKey struct { + // IncomingCircuit is the channel an htlc id of the incoming htlc. + IncomingCircuit channeldb.CircuitKey + + // OutgoingCircuit is the channel and htlc id of the outgoing htlc. + OutgoingCircuit channeldb.CircuitKey +} + +// String returns a string representation of a htlc key. +func (k HtlcKey) String() string { + switch { + case k.IncomingCircuit.ChanID == hop.Source: + return k.OutgoingCircuit.String() + + case k.OutgoingCircuit.ChanID == hop.Exit: + return k.IncomingCircuit.String() + + default: + return fmt.Sprintf("%v -> %v", k.IncomingCircuit, + k.OutgoingCircuit) + } +} + +// HtlcInfo provides the details of a htlc that our node has processed. For +// forwards, incoming and outgoing values are set, whereas sends and receives +// will only have outgoing or incoming details set. +type HtlcInfo struct { + // IncomingTimelock is the time lock of the htlc on our incoming + // channel. + IncomingTimeLock uint32 + + // OutgoingTimelock is the time lock the htlc on our outgoing channel. + OutgoingTimeLock uint32 + + // IncomingAmt is the amount of the htlc on our incoming channel. + IncomingAmt lnwire.MilliSatoshi + + // OutgoingAmt is the amount of the htlc on our outgoing channel. + OutgoingAmt lnwire.MilliSatoshi +} + +// String returns a string representation of a htlc. +func (h HtlcInfo) String() string { + var details []string + + // If the incoming information is not zero, as is the case for a send, + // we include the incoming amount and timelock. + if h.IncomingAmt != 0 || h.IncomingTimeLock != 0 { + str := fmt.Sprintf("incoming amount: %v, "+ + "incoming timelock: %v", h.IncomingAmt, + h.IncomingTimeLock) + + details = append(details, str) + } + + // If the outgoing information is not zero, as is the case for a + // receive, we include the outgoing amount and timelock. + if h.OutgoingAmt != 0 || h.OutgoingTimeLock != 0 { + str := fmt.Sprintf("outgoing amount: %v, "+ + "outgoing timelock: %v", h.OutgoingAmt, + h.OutgoingTimeLock) + + details = append(details, str) + } + + return strings.Join(details, ", ") +} + +// HtlcEventType represents the type of event that a htlc was part of. +type HtlcEventType int + +const ( + // HtlcEventTypeSend represents a htlc that was part of a send from + // our node. + HtlcEventTypeSend HtlcEventType = iota + + // HtlcEventTypeReceive represents a htlc that was part of a receive + // to our node. + HtlcEventTypeReceive + + // HtlcEventTypeForward represents a htlc that was forwarded through + // our node. + HtlcEventTypeForward +) + +// String returns a string representation of a htlc event type. +func (h HtlcEventType) String() string { + switch h { + case HtlcEventTypeSend: + return "send" + + case HtlcEventTypeReceive: + return "receive" + + case HtlcEventTypeForward: + return "forward" + + default: + return "unknown" + } +} + +// ForwardingEvent represents a htlc that was forwarded onwards from our node. +// Sends which originate from our node will report forward events with zero +// incoming circuits in their htlc key. +type ForwardingEvent struct { + // HtlcKey uniquely identifies the htlc, and can be used to match the + // forwarding event with subsequent settle/fail events. + HtlcKey + + // HtlcInfo contains details about the htlc. + HtlcInfo + + // HtlcEventType classifies the event as part of a local send or + // receive, or as part of a forward. + HtlcEventType + + // Timestamp is the time when this htlc was forwarded. + Timestamp time.Time +} + +// LinkFailEvent describes a htlc that failed on our incoming or outgoing +// link. The incoming bool is true for failures on incoming links, and false +// for failures on outgoing links. The failure reason is provided by a lnwire +// failure message which is enriched with a failure detail in the cases where +// the wire failure message does not contain full information about the +// failure. +type LinkFailEvent struct { + // HtlcKey uniquely identifies the htlc. + HtlcKey + + // HtlcInfo contains details about the htlc. + HtlcInfo + + // HtlcEventType classifies the event as part of a local send or + // receive, or as part of a forward. + HtlcEventType + + // LinkError is the reason that we failed the htlc. + LinkError *LinkError + + // Incoming is true if the htlc was failed on an incoming link. + // If it failed on the outgoing link, it is false. + Incoming bool + + // Timestamp is the time when the link failure occurred. + Timestamp time.Time +} + +// ForwardingFailEvent represents a htlc failure which occurred down the line +// after we forwarded a htlc onwards. An error is not included in this event +// because errors returned down the route are encrypted. HtlcInfo is not +// reliably available for forwarding failures, so it is omitted. These events +// should be matched with their corresponding forward event to obtain this +// information. +type ForwardingFailEvent struct { + // HtlcKey uniquely identifies the htlc, and can be used to match the + // htlc with its corresponding forwarding event. + HtlcKey + + // HtlcEventType classifies the event as part of a local send or + // receive, or as part of a forward. + HtlcEventType + + // Timestamp is the time when the forwarding failure was received. + Timestamp time.Time +} + +// SettleEvent represents a htlc that was settled. HtlcInfo is not reliably +// available for forwarding failures, so it is omitted. These events should +// be matched with corresponding forward events or invoices (for receives) +// to obtain additional information about the htlc. +type SettleEvent struct { + // HtlcKey uniquely identifies the htlc, and can be used to match + // forwards with their corresponding forwarding event. + HtlcKey + + // HtlcEventType classifies the event as part of a local send or + // receive, or as part of a forward. + HtlcEventType + + // Timestamp is the time when this htlc was settled. + Timestamp time.Time +} + +// NotifyForwardingEvent notifies the HtlcNotifier than a htlc has been +// forwarded. +// +// Note this is part of the htlcNotifier interface. +func (h *HtlcNotifier) NotifyForwardingEvent(key HtlcKey, info HtlcInfo, + eventType HtlcEventType) { + + event := &ForwardingEvent{ + HtlcKey: key, + HtlcInfo: info, + HtlcEventType: eventType, + Timestamp: h.now(), + } + + log.Tracef("Notifying forward event: %v over %v, %v", eventType, key, + info) + + if err := h.ntfnServer.SendUpdate(event); err != nil { + log.Warnf("Unable to send forwarding event: %v", err) + } +} + +// NotifyLinkFailEvent notifies that a htlc has failed on our incoming +// or outgoing link. +// +// Note this is part of the htlcNotifier interface. +func (h *HtlcNotifier) NotifyLinkFailEvent(key HtlcKey, info HtlcInfo, + eventType HtlcEventType, linkErr *LinkError, incoming bool) { + + event := &LinkFailEvent{ + HtlcKey: key, + HtlcInfo: info, + HtlcEventType: eventType, + LinkError: linkErr, + Incoming: incoming, + Timestamp: h.now(), + } + + log.Tracef("Notifying link failure event: %v over %v, %v", eventType, + key, info) + + if err := h.ntfnServer.SendUpdate(event); err != nil { + log.Warnf("Unable to send link fail event: %v", err) + } +} + +// NotifyForwardingFailEvent notifies the HtlcNotifier that a htlc we +// forwarded has failed down the line. +// +// Note this is part of the htlcNotifier interface. +func (h *HtlcNotifier) NotifyForwardingFailEvent(key HtlcKey, + eventType HtlcEventType) { + + event := &ForwardingFailEvent{ + HtlcKey: key, + HtlcEventType: eventType, + Timestamp: h.now(), + } + + log.Tracef("Notifying forwarding failure event: %v over %v", eventType, + key) + + if err := h.ntfnServer.SendUpdate(event); err != nil { + log.Warnf("Unable to send forwarding fail event: %v", err) + } +} + +// NotifySettleEvent notifies the HtlcNotifier that a htlc that we committed +// to as part of a forward or a receive to our node has been settled. +// +// Note this is part of the htlcNotifier interface. +func (h *HtlcNotifier) NotifySettleEvent(key HtlcKey, eventType HtlcEventType) { + event := &SettleEvent{ + HtlcKey: key, + HtlcEventType: eventType, + Timestamp: h.now(), + } + + log.Tracef("Notifying settle event: %v over %v", eventType, key) + + if err := h.ntfnServer.SendUpdate(event); err != nil { + log.Warnf("Unable to send settle event: %v", err) + } +} + +// newHtlc key returns a htlc key for the packet provided. If the packet +// has a zero incoming channel ID, the packet is for one of our own sends, +// which has the payment id stashed in the incoming htlc id. If this is the +// case, we replace the incoming htlc id with zero so that the notifier +// consistently reports zero circuit keys for events that terminate or +// originate at our node. +func newHtlcKey(pkt *htlcPacket) HtlcKey { + htlcKey := HtlcKey{ + IncomingCircuit: channeldb.CircuitKey{ + ChanID: pkt.incomingChanID, + HtlcID: pkt.incomingHTLCID, + }, + OutgoingCircuit: CircuitKey{ + ChanID: pkt.outgoingChanID, + HtlcID: pkt.outgoingHTLCID, + }, + } + + // If the packet has a zero incoming channel ID, it is a send that was + // initiated at our node. If this is the case, our internal pid is in + // the incoming htlc ID, so we overwrite it with 0 for notification + // purposes. + if pkt.incomingChanID == hop.Source { + htlcKey.IncomingCircuit.HtlcID = 0 + } + + return htlcKey +} + +// newHtlcInfo returns HtlcInfo for the packet provided. +func newHtlcInfo(pkt *htlcPacket) HtlcInfo { + return HtlcInfo{ + IncomingTimeLock: pkt.incomingTimeout, + OutgoingTimeLock: pkt.outgoingTimeout, + IncomingAmt: pkt.incomingAmount, + OutgoingAmt: pkt.amount, + } +} + +// getEventType returns the htlc type based on the fields set in the htlc +// packet. Sends that originate at our node have the source (zero) incoming +// channel ID. Receives to our node have the exit (zero) outgoing channel ID +// and forwards have both fields set. +func getEventType(pkt *htlcPacket) HtlcEventType { + switch { + case pkt.incomingChanID == hop.Source: + return HtlcEventTypeSend + + case pkt.outgoingChanID == hop.Exit: + return HtlcEventTypeReceive + + default: + return HtlcEventTypeForward + } +} diff --git a/htlcswitch/interfaces.go b/htlcswitch/interfaces.go index 5e16279586..b28d137f5a 100644 --- a/htlcswitch/interfaces.go +++ b/htlcswitch/interfaces.go @@ -27,7 +27,7 @@ type InvoiceDatabase interface { NotifyExitHopHtlc(payHash lntypes.Hash, paidAmount lnwire.MilliSatoshi, expiry uint32, currentHeight int32, circuitKey channeldb.CircuitKey, hodlChan chan<- interface{}, - eob []byte) (*invoices.HodlEvent, error) + payload invoices.Payload) (invoices.HtlcResolution, error) // CancelInvoice attempts to cancel the invoice corresponding to the // passed payment hash. @@ -36,7 +36,7 @@ type InvoiceDatabase interface { // SettleHodlInvoice settles a hold invoice. SettleHodlInvoice(preimage lntypes.Preimage) error - // HodlUnsubscribeAll unsubscribes from all hodl events. + // HodlUnsubscribeAll unsubscribes from all htlc resolutions. HodlUnsubscribeAll(subscriber chan<- interface{}) } @@ -100,23 +100,23 @@ type ChannelLink interface { // policy to govern if it an incoming HTLC should be forwarded or not. UpdateForwardingPolicy(ForwardingPolicy) - // HtlcSatifiesPolicy should return a nil error if the passed HTLC - // details satisfy the current forwarding policy fo the target link. - // Otherwise, a valid protocol failure message should be returned in - // order to signal to the source of the HTLC, the policy consistency + // CheckHtlcForward should return a nil error if the passed HTLC details + // satisfy the current forwarding policy fo the target link. Otherwise, + // a LinkError with a valid protocol failure message should be returned + // in order to signal to the source of the HTLC, the policy consistency // issue. - HtlcSatifiesPolicy(payHash [32]byte, incomingAmt lnwire.MilliSatoshi, + CheckHtlcForward(payHash [32]byte, incomingAmt lnwire.MilliSatoshi, amtToForward lnwire.MilliSatoshi, incomingTimeout, outgoingTimeout uint32, - heightNow uint32) lnwire.FailureMessage + heightNow uint32) *LinkError - // HtlcSatifiesPolicyLocal should return a nil error if the passed HTLC - // details satisfy the current channel policy. Otherwise, a valid - // protocol failure message should be returned in order to signal the - // violation. This call is intended to be used for locally initiated + // CheckHtlcTransit should return a nil error if the passed HTLC details + // satisfy the current channel policy. Otherwise, a LinkError with a + // valid protocol failure message should be returned in order to signal + // the violation. This call is intended to be used for locally initiated // payments for which there is no corresponding incoming htlc. - HtlcSatifiesPolicyLocal(payHash [32]byte, amt lnwire.MilliSatoshi, - timeout uint32, heightNow uint32) lnwire.FailureMessage + CheckHtlcTransit(payHash [32]byte, amt lnwire.MilliSatoshi, + timeout uint32, heightNow uint32) *LinkError // Bandwidth returns the amount of milli-satoshis which current link // might pass through channel link. The value returned from this method @@ -180,3 +180,29 @@ type TowerClient interface { // isTweakless should be true. BackupState(*lnwire.ChannelID, *lnwallet.BreachRetribution, bool) error } + +// htlcNotifier is an interface which represents the input side of the +// HtlcNotifier which htlc events are piped through. This interface is intended +// to allow for mocking of the htlcNotifier in tests, so is unexported because +// it is not needed outside of the htlcSwitch package. +type htlcNotifier interface { + // NotifyForwardingEvent notifies the HtlcNotifier than a htlc has been + // forwarded. + NotifyForwardingEvent(key HtlcKey, info HtlcInfo, + eventType HtlcEventType) + + // NotifyIncomingLinkFailEvent notifies that a htlc has failed on our + // incoming link. It takes an isReceive bool to differentiate between + // our node's receives and forwards. + NotifyLinkFailEvent(key HtlcKey, info HtlcInfo, + eventType HtlcEventType, linkErr *LinkError, incoming bool) + + // NotifyForwardingFailEvent notifies the HtlcNotifier that a htlc we + // forwarded has failed down the line. + NotifyForwardingFailEvent(key HtlcKey, eventType HtlcEventType) + + // NotifySettleEvent notifies the HtlcNotifier that a htlc that we + // committed to as part of a forward or a receive to our node has been + // settled. + NotifySettleEvent(key HtlcKey, eventType HtlcEventType) +} diff --git a/htlcswitch/link.go b/htlcswitch/link.go index 7d37604a47..3290573a24 100644 --- a/htlcswitch/link.go +++ b/htlcswitch/link.go @@ -11,17 +11,19 @@ import ( "time" "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btclog" "github.com/davecgh/go-spew/spew" "github.com/go-errors/errors" + "github.com/lightningnetwork/lnd/build" "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/contractcourt" "github.com/lightningnetwork/lnd/htlcswitch/hodl" "github.com/lightningnetwork/lnd/htlcswitch/hop" - "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/invoices" "github.com/lightningnetwork/lnd/lnpeer" "github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lnwallet" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/queue" "github.com/lightningnetwork/lnd/ticker" @@ -68,10 +70,8 @@ const ( // the error possibly carrying along a ChannelUpdate message that includes the // latest policy. type ForwardingPolicy struct { - // MinHTLC is the smallest HTLC that is to be forwarded. This is - // set when a channel is first opened, and will be static for the - // lifetime of the channel. - MinHTLC lnwire.MilliSatoshi + // MinHTLC is the smallest HTLC that is to be forwarded. + MinHTLCOut lnwire.MilliSatoshi // MaxHTLC is the largest HTLC that is to be forwarded. MaxHTLC lnwire.MilliSatoshi @@ -197,7 +197,7 @@ type ChannelLinkConfig struct { // FeeEstimator is an instance of a live fee estimator which will be // used to dynamically regulate the current fee of the commitment // transaction to ensure timely confirmation. - FeeEstimator lnwallet.FeeEstimator + FeeEstimator chainfee.Estimator // hodl.Mask is a bitvector composed of hodl.Flags, specifying breakpoints // for HTLC forwarding internal to the switch. @@ -223,6 +223,11 @@ type ChannelLinkConfig struct { // syncing. FwdPkgGCTicker ticker.Ticker + // PendingCommitTicker is a ticker that allows the link to determine if + // a locally initiated commitment dance gets stuck waiting for the + // remote party to revoke. + PendingCommitTicker ticker.Ticker + // BatchSize is the max size of a batch of updates done to the link // before we do a state update. BatchSize uint32 @@ -254,9 +259,9 @@ type ChannelLinkConfig struct { // configured set of watchtowers. TowerClient TowerClient - // MaxCltvExpiry is the maximum outgoing timelock that the link should - // accept for a forwarded HTLC. The value is relative to the current - // block height. + // MaxOutgoingCltvExpiry is the maximum outgoing timelock that the link + // should accept for a forwarded HTLC. The value is relative to the + // current block height. MaxOutgoingCltvExpiry uint32 // MaxFeeAllocation is the highest allocation we'll allow a channel's @@ -264,6 +269,10 @@ type ChannelLinkConfig struct { // initiator of the channel. MaxFeeAllocation float64 + // NotifyActiveLink allows the link to tell the ChannelNotifier when a + // link is first started. + NotifyActiveLink func(wire.OutPoint) + // NotifyActiveChannel allows the link to tell the ChannelNotifier when // channels becomes active. NotifyActiveChannel func(wire.OutPoint) @@ -271,6 +280,10 @@ type ChannelLinkConfig struct { // NotifyInactiveChannel allows the switch to tell the ChannelNotifier // when channels become inactive. NotifyInactiveChannel func(wire.OutPoint) + + // HtlcNotifier is an instance of a htlcNotifier which we will pipe htlc + // events through. + HtlcNotifier htlcNotifier } // channelLink is the service which drives a channel's commitment update @@ -288,15 +301,6 @@ type channelLink struct { // sure we don't process any more updates. failed bool - // batchCounter is the number of updates which we received from remote - // side, but not include in commitment transaction yet and plus the - // current number of settles that have been sent, but not yet committed - // to the commitment. - // - // TODO(andrew.shvv) remove after we add additional BatchNumber() - // method in state machine. - batchCounter uint32 - // keystoneBatch represents a volatile list of keystones that must be // written before attempting to sign the next commitment txn. These // represent all the HTLC's forwarded to the link from the switch. Once @@ -327,10 +331,6 @@ type channelLink struct { // which may affect behaviour of the service. cfg ChannelLinkConfig - // overflowQueue is used to store the htlc add updates which haven't - // been processed because of the commitment transaction overflow. - overflowQueue *packetQueue - // mailBox is the main interface between the outside world and the // link. All incoming messages will be sent over this mailBox. Messages // include new updates from our connected peer, and new packets to be @@ -350,14 +350,6 @@ type channelLink struct { // sub-systems with the latest set of active HTLC's on our channel. htlcUpdates chan *contractcourt.ContractUpdate - // logCommitTimer is a timer which is sent upon if we go an interval - // without receiving/sending a commitment update. It's role is to - // ensure both chains converge to identical state in a timely manner. - // - // TODO(roasbeef): timer should be >> then RTT - logCommitTimer *time.Timer - logCommitTick <-chan time.Time - // updateFeeTimer is the timer responsible for updating the link's // commitment fee every time it fires. updateFeeTimer *time.Timer @@ -378,6 +370,9 @@ type channelLink struct { // resolving those htlcs when we receive a message on hodlQueue. hodlMap map[channeldb.CircuitKey]hodlHtlc + // log is a link-specific logging instance. + log btclog.Logger + wg sync.WaitGroup quit chan struct{} } @@ -393,17 +388,18 @@ type hodlHtlc struct { func NewChannelLink(cfg ChannelLinkConfig, channel *lnwallet.LightningChannel) ChannelLink { + logPrefix := fmt.Sprintf("ChannelLink(%v):", channel.ShortChanID()) + return &channelLink{ cfg: cfg, channel: channel, shortChanID: channel.ShortChanID(), // TODO(roasbeef): just do reserve here? - logCommitTimer: time.NewTimer(300 * time.Millisecond), - overflowQueue: newPacketQueue(input.MaxHTLCNumber / 2), - htlcUpdates: make(chan *contractcourt.ContractUpdate), - hodlMap: make(map[channeldb.CircuitKey]hodlHtlc), - hodlQueue: queue.NewConcurrentQueue(10), - quit: make(chan struct{}), + htlcUpdates: make(chan *contractcourt.ContractUpdate), + hodlMap: make(map[channeldb.CircuitKey]hodlHtlc), + hodlQueue: queue.NewConcurrentQueue(10), + log: build.NewPrefixLog(logPrefix, log), + quit: make(chan struct{}), } } @@ -418,15 +414,20 @@ var _ ChannelLink = (*channelLink)(nil) func (l *channelLink) Start() error { if !atomic.CompareAndSwapInt32(&l.started, 0, 1) { err := errors.Errorf("channel link(%v): already started", l) - log.Warn(err) + l.log.Warn("already started") return err } - log.Infof("ChannelLink(%v) is starting", l) + l.log.Info("starting") // If the config supplied watchtower client, ensure the channel is // registered before trying to use it during operation. - if l.cfg.TowerClient != nil { + // TODO(halseth): support anchor types for watchtower. + state := l.channel.State() + if l.cfg.TowerClient != nil && state.ChanType.HasAnchors() { + l.log.Warnf("Skipping tower registration for anchor " + + "channel type") + } else if l.cfg.TowerClient != nil && !state.ChanType.HasAnchors() { err := l.cfg.TowerClient.RegisterChannel(l.ChanID()) if err != nil { return err @@ -434,7 +435,6 @@ func (l *channelLink) Start() error { } l.mailBox.ResetMessages() - l.overflowQueue.Start() l.hodlQueue.Start() // Before launching the htlcManager messages, revert any circuits that @@ -475,8 +475,7 @@ func (l *channelLink) Start() error { err := l.cfg.UpdateContractSignals(signals) if err != nil { - log.Errorf("Unable to update signals for "+ - "ChannelLink(%v)", l) + l.log.Errorf("unable to update signals") } }() } @@ -495,14 +494,14 @@ func (l *channelLink) Start() error { // NOTE: Part of the ChannelLink interface. func (l *channelLink) Stop() { if !atomic.CompareAndSwapInt32(&l.shutdown, 0, 1) { - log.Warnf("channel link(%v): already stopped", l) + l.log.Warn("already stopped") return } - log.Infof("ChannelLink(%v) is stopping", l) + l.log.Info("stopping") - // As the link is stopping, we are no longer interested in hodl events - // coming from the invoice registry. + // As the link is stopping, we are no longer interested in htlc + // resolutions coming from the invoice registry. l.cfg.Registry.HodlUnsubscribeAll(l.hodlQueue.ChanIn()) if l.cfg.ChainEvents.Cancel != nil { @@ -510,12 +509,18 @@ func (l *channelLink) Stop() { } l.updateFeeTimer.Stop() - l.overflowQueue.Stop() l.hodlQueue.Stop() close(l.quit) l.wg.Wait() + // Now that the htlcManager has completely exited, reset the packet + // courier. This allows the mailbox to revaluate any lingering Adds that + // were delivered but didn't make it on a commitment to be failed back + // if the link is offline for an extended period of time. The error is + // ignored since it can only fail when the daemon is exiting. + _ = l.mailBox.ResetPackets() + // As a final precaution, we will attempt to flush any uncommitted // preimages to the preimage cache. The preimages should be re-delivered // after channel reestablishment, however this adds an extra layer of @@ -524,7 +529,7 @@ func (l *channelLink) Stop() { // we had learned them at some point. err := l.cfg.PreimageCache.AddPreimages(l.uncommittedPreimages...) if err != nil { - log.Errorf("Unable to add preimages=%v to cache: %v", + l.log.Errorf("unable to add preimages=%v to cache: %v", l.uncommittedPreimages, err) } } @@ -563,7 +568,7 @@ func (l *channelLink) markReestablished() { // chain in a timely manner. The returned value is expressed in fee-per-kw, as // this is the native rate used when computing the fee for commitment // transactions, and the second-level HTLC transactions. -func (l *channelLink) sampleNetworkFee() (lnwallet.SatPerKWeight, error) { +func (l *channelLink) sampleNetworkFee() (chainfee.SatPerKWeight, error) { // We'll first query for the sat/kw recommended to be confirmed within 3 // blocks. feePerKw, err := l.cfg.FeeEstimator.EstimateFeePerKW(3) @@ -571,8 +576,8 @@ func (l *channelLink) sampleNetworkFee() (lnwallet.SatPerKWeight, error) { return 0, err } - log.Debugf("ChannelLink(%v): sampled fee rate for 3 block conf: %v "+ - "sat/kw", l, int64(feePerKw)) + l.log.Debugf("sampled fee rate for 3 block conf: %v sat/kw", + int64(feePerKw)) return feePerKw, nil } @@ -580,7 +585,7 @@ func (l *channelLink) sampleNetworkFee() (lnwallet.SatPerKWeight, error) { // shouldAdjustCommitFee returns true if we should update our commitment fee to // match that of the network fee. We'll only update our commitment fee if the // network fee is +/- 10% to our network fee. -func shouldAdjustCommitFee(netFee, chanFee lnwallet.SatPerKWeight) bool { +func shouldAdjustCommitFee(netFee, chanFee chainfee.SatPerKWeight) bool { switch { // If the network fee is greater than the commitment fee, then we'll // switch to it if it's at least 10% greater than the commit fee. @@ -598,13 +603,25 @@ func shouldAdjustCommitFee(netFee, chanFee lnwallet.SatPerKWeight) bool { } } +// createFailureWithUpdate retrieves this link's last channel update message and +// passes it into the callback. It expects a fully populated failure message. +func (l *channelLink) createFailureWithUpdate( + cb func(update *lnwire.ChannelUpdate) lnwire.FailureMessage) lnwire.FailureMessage { + + update, err := l.cfg.FetchLastChannelUpdate(l.ShortChanID()) + if err != nil { + return &lnwire.FailTemporaryNodeFailure{} + } + + return cb(update) +} + // syncChanState attempts to synchronize channel states with the remote party. // This method is to be called upon reconnection after the initial funding // flow. We'll compare out commitment chains with the remote party, and re-send // either a danging commit signature, a revocation, or both. func (l *channelLink) syncChanStates() error { - log.Infof("Attempting to re-resynchronize ChannelPoint(%v)", - l.channel.ChannelPoint()) + l.log.Info("attempting to re-resynchronize") // First, we'll generate our ChanSync message to send to the other // side. Based on this message, the remote party will decide if they @@ -618,7 +635,7 @@ func (l *channelLink) syncChanStates() error { if err := l.cfg.Peer.SendMessage(true, localChanSyncMsg); err != nil { return fmt.Errorf("Unable to send chan sync message for "+ - "ChannelPoint(%v)", l.channel.ChannelPoint()) + "ChannelPoint(%v): %v", l.channel.ChannelPoint(), err) } var msgsToReSend []lnwire.Message @@ -643,9 +660,7 @@ func (l *channelLink) syncChanStates() error { localChanSyncMsg.NextLocalCommitHeight == 1 && !l.channel.IsPending() { - log.Infof("ChannelPoint(%v): resending "+ - "FundingLocked message to peer", - l.channel.ChannelPoint()) + l.log.Infof("resending FundingLocked message to peer") nextRevocation, err := l.channel.NextRevocationKey() if err != nil { @@ -664,8 +679,7 @@ func (l *channelLink) syncChanStates() error { } // In any case, we'll then process their ChanSync message. - log.Infof("Received re-establishment message from remote side "+ - "for channel(%v)", l.channel.ChannelPoint()) + l.log.Info("received re-establishment message from remote side") var ( openedCircuits []CircuitKey @@ -694,9 +708,8 @@ func (l *channelLink) syncChanStates() error { } if len(msgsToReSend) > 0 { - log.Infof("Sending %v updates to synchronize the "+ - "state for ChannelPoint(%v)", len(msgsToReSend), - l.channel.ChannelPoint()) + l.log.Infof("sending %v updates to synchronize the "+ + "state", len(msgsToReSend)) } // If we have any messages to retransmit, we'll do so @@ -724,21 +737,17 @@ func (l *channelLink) resolveFwdPkgs() error { return err } - l.debugf("loaded %d fwd pks", len(fwdPkgs)) + l.log.Debugf("loaded %d fwd pks", len(fwdPkgs)) - var needUpdate bool for _, fwdPkg := range fwdPkgs { - hasUpdate, err := l.resolveFwdPkg(fwdPkg) - if err != nil { + if err := l.resolveFwdPkg(fwdPkg); err != nil { return err } - - needUpdate = needUpdate || hasUpdate } // If any of our reprocessing steps require an update to the commitment // txn, we initiate a state transition to capture all relevant changes. - if needUpdate { + if l.channel.PendingLocalUpdateCount() > 0 { return l.updateCommitTx() } @@ -748,17 +757,17 @@ func (l *channelLink) resolveFwdPkgs() error { // resolveFwdPkg interprets the FwdState of the provided package, either // reprocesses any outstanding htlcs in the package, or performs garbage // collection on the package. -func (l *channelLink) resolveFwdPkg(fwdPkg *channeldb.FwdPkg) (bool, error) { +func (l *channelLink) resolveFwdPkg(fwdPkg *channeldb.FwdPkg) error { // Remove any completed packages to clear up space. if fwdPkg.State == channeldb.FwdStateCompleted { - l.debugf("removing completed fwd pkg for height=%d", + l.log.Debugf("removing completed fwd pkg for height=%d", fwdPkg.Height) err := l.channel.RemoveFwdPkg(fwdPkg.Height) if err != nil { - l.errorf("unable to remove fwd pkg for height=%d: %v", - fwdPkg.Height, err) - return false, err + l.log.Errorf("unable to remove fwd pkg for height=%d: "+ + "%v", fwdPkg.Height, err) + return err } } @@ -775,9 +784,9 @@ func (l *channelLink) resolveFwdPkg(fwdPkg *channeldb.FwdPkg) (bool, error) { fwdPkg.Source, fwdPkg.Height, fwdPkg.SettleFails, ) if err != nil { - l.errorf("Unable to process remote log updates: %v", + l.log.Errorf("unable to process remote log updates: %v", err) - return false, err + return err } l.processRemoteSettleFails(fwdPkg, settleFails) } @@ -786,28 +795,27 @@ func (l *channelLink) resolveFwdPkg(fwdPkg *channeldb.FwdPkg) (bool, error) { // downstream logic is able to filter out any duplicates, but we must // shove the entire, original set of adds down the pipeline so that the // batch of adds presented to the sphinx router does not ever change. - var needUpdate bool if !fwdPkg.AckFilter.IsFull() { adds, err := lnwallet.PayDescsFromRemoteLogUpdates( fwdPkg.Source, fwdPkg.Height, fwdPkg.Adds, ) if err != nil { - l.errorf("Unable to process remote log updates: %v", + l.log.Errorf("unable to process remote log updates: %v", err) - return false, err + return err } - needUpdate = l.processRemoteAdds(fwdPkg, adds) + l.processRemoteAdds(fwdPkg, adds) // If the link failed during processing the adds, we must // return to ensure we won't attempted to update the state // further. if l.failed { - return false, fmt.Errorf("link failed while " + + return fmt.Errorf("link failed while " + "processing remote adds") } } - return needUpdate, nil + return nil } // fwdPkgGarbager periodically reads all forwarding packages from disk and @@ -827,7 +835,8 @@ func (l *channelLink) fwdPkgGarbager() { case <-l.cfg.FwdPkgGCTicker.Ticks(): fwdPkgs, err := l.channel.LoadFwdPkgs() if err != nil { - l.warnf("unable to load fwdpkgs for gc: %v", err) + l.log.Warnf("unable to load fwdpkgs for gc: %v", + err) continue } @@ -839,7 +848,7 @@ func (l *channelLink) fwdPkgGarbager() { err = l.channel.RemoveFwdPkg(fwdPkg.Height) if err != nil { - l.warnf("unable to remove fwd pkg "+ + l.log.Warnf("unable to remove fwd pkg "+ "for height=%d: %v", fwdPkg.Height, err) } @@ -864,11 +873,14 @@ func (l *channelLink) htlcManager() { defer func() { l.cfg.BatchTicker.Stop() l.wg.Done() - log.Infof("ChannelLink(%v) has exited", l) + l.log.Infof("exited") }() - log.Infof("HTLC manager for ChannelPoint(%v) started, "+ - "bandwidth=%v", l.channel.ChannelPoint(), l.Bandwidth()) + l.log.Infof("HTLC manager started, bandwidth=%v", l.Bandwidth()) + + // Notify any clients that the link is now in the switch via an + // ActiveLinkEvent. + l.cfg.NotifyActiveLink(*l.ChannelPoint()) // TODO(roasbeef): need to call wipe chan whenever D/C? @@ -879,14 +891,14 @@ func (l *channelLink) htlcManager() { if l.cfg.SyncStates { err := l.syncChanStates() if err != nil { - log.Warnf("Error when syncing channel states: %v", err) + l.log.Warnf("error when syncing channel states: %v", err) errDataLoss, localDataLoss := err.(*lnwallet.ErrCommitSyncLocalDataLoss) switch { case err == ErrLinkShuttingDown: - log.Debugf("unable to sync channel states, " + + l.log.Debugf("unable to sync channel states, " + "link is shutting down") return @@ -934,7 +946,7 @@ func (l *channelLink) htlcManager() { errDataLoss.CommitPoint, ) if err != nil { - log.Errorf("Unable to mark channel "+ + l.log.Errorf("unable to mark channel "+ "data loss: %v", err) } @@ -945,7 +957,7 @@ func (l *channelLink) htlcManager() { // cases where this error is returned? case err == lnwallet.ErrCannotSyncCommitChains: if err := l.channel.MarkBorked(); err != nil { - log.Errorf("Unable to mark channel "+ + l.log.Errorf("unable to mark channel "+ "borked: %v", err) } @@ -1003,20 +1015,22 @@ func (l *channelLink) htlcManager() { go l.fwdPkgGarbager() } -out: for { // We must always check if we failed at some point processing // the last update before processing the next. if l.failed { - l.errorf("link failed, exiting htlcManager") - break out + l.log.Errorf("link failed, exiting htlcManager") + return } - // If the previous event resulted in a non-empty - // batch, reinstate the batch ticker so that it can be - // cleared. - if l.batchCounter > 0 { + // If the previous event resulted in a non-empty batch, resume + // the batch ticker so that it can be cleared. Otherwise pause + // the ticker to prevent waking up the htlcManager while the + // batch is empty. + if l.channel.PendingLocalUpdateCount() > 0 { l.cfg.BatchTicker.Resume() + } else { + l.cfg.BatchTicker.Pause() } select { @@ -1036,7 +1050,8 @@ out: // blocks. netFee, err := l.sampleNetworkFee() if err != nil { - log.Errorf("unable to sample network fee: %v", err) + l.log.Errorf("unable to sample network fee: %v", + err) continue } @@ -1045,7 +1060,7 @@ out: // fee rate to our max fee allocation. commitFee := l.channel.CommitFeeRate() maxFee := l.channel.MaxFeeRate(l.cfg.MaxFeeAllocation) - newCommitFee := lnwallet.SatPerKWeight( + newCommitFee := chainfee.SatPerKWeight( math.Min(float64(netFee), float64(maxFee)), ) if !shouldAdjustCommitFee(newCommitFee, commitFee) { @@ -1055,7 +1070,8 @@ out: // If we do, then we'll send a new UpdateFee message to // the remote party, to be locked in with a new update. if err := l.updateChannelFee(newCommitFee); err != nil { - log.Errorf("unable to update fee rate: %v", err) + l.log.Errorf("unable to update fee rate: %v", + err) continue } @@ -1066,87 +1082,37 @@ out: // // TODO(roasbeef): add force closure? also breach? case <-l.cfg.ChainEvents.RemoteUnilateralClosure: - log.Warnf("Remote peer has closed ChannelPoint(%v) on-chain", - l.channel.ChannelPoint()) + l.log.Warnf("remote peer has closed on-chain") // TODO(roasbeef): remove all together go func() { chanPoint := l.channel.ChannelPoint() - err := l.cfg.Peer.WipeChannel(chanPoint) - if err != nil { - log.Errorf("unable to wipe channel %v", err) - } + l.cfg.Peer.WipeChannel(chanPoint) }() - break out - - case <-l.logCommitTick: - // If we haven't sent or received a new commitment - // update in some time, check to see if we have any - // pending updates we need to commit due to our - // commitment chains being desynchronized. - if l.channel.FullySynced() { - continue - } - - if err := l.updateCommitTx(); err != nil { - l.fail(LinkFailureError{code: ErrInternalError}, - "unable to update commitment: %v", err) - break out - } + return case <-l.cfg.BatchTicker.Ticks(): - // If the current batch is empty, then we have no work - // here. We also disable the batch ticker from waking up - // the htlcManager while the batch is empty. - if l.batchCounter == 0 { - l.cfg.BatchTicker.Pause() - continue - } - - // Otherwise, attempt to extend the remote commitment - // chain including all the currently pending entries. - // If the send was unsuccessful, then abandon the - // update, waiting for the revocation window to open - // up. + // Attempt to extend the remote commitment chain + // including all the currently pending entries. If the + // send was unsuccessful, then abandon the update, + // waiting for the revocation window to open up. if err := l.updateCommitTx(); err != nil { l.fail(LinkFailureError{code: ErrInternalError}, "unable to update commitment: %v", err) - break out + return } - // A packet that previously overflowed the commitment - // transaction is now eligible for processing once again. So - // we'll attempt to re-process the packet in order to allow it - // to continue propagating within the network. - case packet := <-l.overflowQueue.outgoingPkts: - msg := packet.htlc.(*lnwire.UpdateAddHTLC) - log.Tracef("Reprocessing downstream add update "+ - "with payment hash(%x)", msg.PaymentHash[:]) - - l.handleDownStreamPkt(packet, true) + case <-l.cfg.PendingCommitTicker.Ticks(): + l.fail(LinkFailureError{code: ErrRemoteUnresponsive}, + "unable to complete dance") + return // A message from the switch was just received. This indicates // that the link is an intermediate hop in a multi-hop HTLC // circuit. case pkt := <-l.downstream: - // If we have non empty processing queue then we'll add - // this to the overflow rather than processing it - // directly. Once an active HTLC is either settled or - // failed, then we'll free up a new slot. - htlc, ok := pkt.htlc.(*lnwire.UpdateAddHTLC) - if ok && l.overflowQueue.Length() != 0 { - log.Infof("Downstream htlc add update with "+ - "payment hash(%x) have been added to "+ - "reprocessing queue, batch_size=%v", - htlc.PaymentHash[:], - l.batchCounter) - - l.overflowQueue.AddPkt(pkt) - continue - } - - l.handleDownStreamPkt(pkt, false) + l.handleDownstreamPkt(pkt) // A message from the connected peer was just received. This // indicates that we have a new incoming HTLC, either directly @@ -1154,43 +1120,45 @@ out: case msg := <-l.upstream: l.handleUpstreamMsg(msg) - // A hodl event is received. This means that we now have a + // A htlc resolution is received. This means that we now have a // resolution for a previously accepted htlc. case hodlItem := <-l.hodlQueue.ChanOut(): - hodlEvent := hodlItem.(invoices.HodlEvent) - err := l.processHodlQueue(hodlEvent) + htlcResolution := hodlItem.(invoices.HtlcResolution) + err := l.processHodlQueue(htlcResolution) if err != nil { l.fail(LinkFailureError{code: ErrInternalError}, fmt.Sprintf("process hodl queue: %v", err.Error()), ) - break out + return } case <-l.quit: - break out + return } } } -// processHodlQueue processes a received hodl event and continues reading from -// the hodl queue until no more events remain. When this function returns -// without an error, the commit tx should be updated. -func (l *channelLink) processHodlQueue(firstHodlEvent invoices.HodlEvent) error { +// processHodlQueue processes a received htlc resolution and continues reading +// from the hodl queue until no more resolutions remain. When this function +// returns without an error, the commit tx should be updated. +func (l *channelLink) processHodlQueue( + firstResolution invoices.HtlcResolution) error { + // Try to read all waiting resolution messages, so that they can all be // processed in a single commitment tx update. - hodlEvent := firstHodlEvent + htlcResolution := firstResolution loop: for { // Lookup all hodl htlcs that can be failed or settled with this event. // The hodl htlc must be present in the map. - circuitKey := hodlEvent.CircuitKey + circuitKey := htlcResolution.CircuitKey() hodlHtlc, ok := l.hodlMap[circuitKey] if !ok { return fmt.Errorf("hodl htlc not found: %v", circuitKey) } - if err := l.processHodlEvent(hodlEvent, hodlHtlc); err != nil { + if err := l.processHtlcResolution(htlcResolution, hodlHtlc); err != nil { return err } @@ -1199,7 +1167,7 @@ loop: select { case item := <-l.hodlQueue.ChanOut(): - hodlEvent = item.(invoices.HodlEvent) + htlcResolution = item.(invoices.HtlcResolution) default: break loop } @@ -1213,38 +1181,70 @@ loop: return nil } -// processHodlEvent applies a received hodl event to the provided htlc. When -// this function returns without an error, the commit tx should be updated. -func (l *channelLink) processHodlEvent(hodlEvent invoices.HodlEvent, +// processHtlcResolution applies a received htlc resolution to the provided +// htlc. When this function returns without an error, the commit tx should be +// updated. +func (l *channelLink) processHtlcResolution(resolution invoices.HtlcResolution, htlc hodlHtlc) error { - l.batchCounter++ + circuitKey := resolution.CircuitKey() - circuitKey := hodlEvent.CircuitKey + // Determine required action for the resolution based on the type of + // resolution we have received. + switch res := resolution.(type) { + // Settle htlcs that returned a settle resolution using the preimage + // in the resolution. + case *invoices.HtlcSettleResolution: + l.log.Debugf("received settle resolution for %v "+ + "with outcome: %v", circuitKey, res.Outcome) - // Determine required action for the resolution. - if hodlEvent.Preimage != nil { - l.debugf("Received hodl settle event for %v", circuitKey) + return l.settleHTLC(res.Preimage, htlc.pd) - return l.settleHTLC( - *hodlEvent.Preimage, htlc.pd.HtlcIndex, - htlc.pd.SourceRef, + // For htlc failures, we get the relevant failure message based + // on the failure resolution and then fail the htlc. + case *invoices.HtlcFailResolution: + l.log.Debugf("received cancel resolution for "+ + "%v with outcome: %v", circuitKey, res.Outcome) + + // Get the lnwire failure message based on the resolution + // result. + failure := getResolutionFailure(res, htlc.pd.Amount) + + l.sendHTLCError( + htlc.pd, failure, htlc.obfuscator, true, ) + return nil + + // Fail if we do not get a settle of fail resolution, since we + // are only expecting to handle settles and fails. + default: + return fmt.Errorf("unknown htlc resolution type: %T", + resolution) } +} - l.debugf("Received hodl cancel event for %v", circuitKey) +// getResolutionFailure returns the wire message that a htlc resolution should +// be failed with. +func getResolutionFailure(resolution *invoices.HtlcFailResolution, + amount lnwire.MilliSatoshi) *LinkError { - // In case of a cancel, always return - // incorrect_or_unknown_payment_details in order to avoid leaking info. - failure := lnwire.NewFailIncorrectDetails( - htlc.pd.Amount, uint32(hodlEvent.AcceptHeight), - ) + // If the resolution has been resolved as part of a MPP timeout, + // we need to fail the htlc with lnwire.FailMppTimeout. + if resolution.Outcome == invoices.ResultMppTimeout { + return NewDetailedLinkError( + &lnwire.FailMPPTimeout{}, resolution.Outcome, + ) + } - l.sendHTLCError( - htlc.pd.HtlcIndex, failure, htlc.obfuscator, - htlc.pd.SourceRef, + // If the htlc is not a MPP timeout, we fail it with + // FailIncorrectDetails. This error is sent for invoice payment + // failures such as underpayment/ expiry too soon and hodl invoices + // (which return FailIncorrectDetails to avoid leaking information). + incorrectDetails := lnwire.NewFailIncorrectDetails( + amount, uint32(resolution.AcceptHeight), ) - return nil + + return NewDetailedLinkError(incorrectDetails, resolution.Outcome) } // randomFeeUpdateTimeout returns a random timeout between the bounds defined @@ -1256,13 +1256,13 @@ func (l *channelLink) randomFeeUpdateTimeout() time.Duration { return time.Duration(prand.Int63n(upper-lower) + lower) } -// handleDownStreamPkt processes an HTLC packet sent from the downstream HTLC +// handleDownstreamPkt processes an HTLC packet sent from the downstream HTLC // Switch. Possible messages sent by the switch include requests to forward new // HTLCs, timeout previously cleared HTLCs, and finally to settle currently // cleared HTLCs with the upstream peer. // // TODO(roasbeef): add sync ntfn to ensure switch always has consistent view? -func (l *channelLink) handleDownStreamPkt(pkt *htlcPacket, isReProcess bool) { +func (l *channelLink) handleDownstreamPkt(pkt *htlcPacket) { var isSettle bool switch htlc := pkt.htlc.(type) { case *lnwire.UpdateAddHTLC: @@ -1270,7 +1270,7 @@ func (l *channelLink) handleDownStreamPkt(pkt *htlcPacket, isReProcess bool) { // arbitrary delays between the switch adding an ADD to the // mailbox, and the HTLC being added to the commitment state. if l.cfg.HodlMask.Active(hodl.AddOutgoing) { - l.warnf(hodl.AddOutgoing.Warning()) + l.log.Warnf(hodl.AddOutgoing.Warning()) l.mailBox.AckPacket(pkt.inKey()) return } @@ -1282,103 +1282,35 @@ func (l *channelLink) handleDownStreamPkt(pkt *htlcPacket, isReProcess bool) { openCircuitRef := pkt.inKey() index, err := l.channel.AddHTLC(htlc, &openCircuitRef) if err != nil { - switch err { - - // The channels spare bandwidth is fully allocated, so - // we'll put this HTLC into the overflow queue. - case lnwallet.ErrMaxHTLCNumber: - l.infof("Downstream htlc add update with "+ - "payment hash(%x) have been added to "+ - "reprocessing queue, batch: %v", - htlc.PaymentHash[:], - l.batchCounter) - - l.overflowQueue.AddPkt(pkt) - return - - // The HTLC was unable to be added to the state - // machine, as a result, we'll signal the switch to - // cancel the pending payment. - default: - l.warnf("Unable to handle downstream add HTLC: %v", err) - - var ( - localFailure = false - reason lnwire.OpaqueReason - ) - - var failure lnwire.FailureMessage - update, err := l.cfg.FetchLastChannelUpdate( - l.ShortChanID(), - ) - if err != nil { - failure = &lnwire.FailTemporaryNodeFailure{} - } else { - failure = lnwire.NewTemporaryChannelFailure( - update, - ) - } - - // Encrypt the error back to the source unless - // the payment was generated locally. - if pkt.obfuscator == nil { - var b bytes.Buffer - err := lnwire.EncodeFailure(&b, failure, 0) - if err != nil { - l.errorf("unable to encode failure: %v", err) - l.mailBox.AckPacket(pkt.inKey()) - return - } - reason = lnwire.OpaqueReason(b.Bytes()) - localFailure = true - } else { - var err error - reason, err = pkt.obfuscator.EncryptFirstHop(failure) - if err != nil { - l.errorf("unable to obfuscate error: %v", err) - l.mailBox.AckPacket(pkt.inKey()) - return - } - } - - failPkt := &htlcPacket{ - incomingChanID: pkt.incomingChanID, - incomingHTLCID: pkt.incomingHTLCID, - circuit: pkt.circuit, - sourceRef: pkt.sourceRef, - hasSource: true, - localFailure: localFailure, - htlc: &lnwire.UpdateFailHTLC{ - Reason: reason, - }, - } - - go l.forwardBatch(failPkt) + // The HTLC was unable to be added to the state machine, + // as a result, we'll signal the switch to cancel the + // pending payment. + l.log.Warnf("Unable to handle downstream add HTLC: %v", + err) - // Remove this packet from the link's mailbox, - // this prevents it from being reprocessed if - // the link restarts and resets it mailbox. If - // this response doesn't make it back to the - // originating link, it will be rejected upon - // attempting to reforward the Add to the - // switch, since the circuit was never fully - // opened, and the forwarding package shows it - // as unacknowledged. - l.mailBox.AckPacket(pkt.inKey()) + // Remove this packet from the link's mailbox, this + // prevents it from being reprocessed if the link + // restarts and resets it mailbox. If this response + // doesn't make it back to the originating link, it will + // be rejected upon attempting to reforward the Add to + // the switch, since the circuit was never fully opened, + // and the forwarding package shows it as + // unacknowledged. + l.mailBox.FailAdd(pkt) - return - } + return } - l.tracef("Received downstream htlc: payment_hash=%x, "+ - "local_log_index=%v, batch_size=%v", - htlc.PaymentHash[:], index, l.batchCounter+1) + l.log.Tracef("received downstream htlc: payment_hash=%x, "+ + "local_log_index=%v, pend_updates=%v", + htlc.PaymentHash[:], index, + l.channel.PendingLocalUpdateCount()) pkt.outgoingChanID = l.ShortChanID() pkt.outgoingHTLCID = index htlc.ID = index - l.debugf("Queueing keystone of ADD open circuit: %s->%s", + l.log.Debugf("queueing keystone of ADD open circuit: %s->%s", pkt.inKey(), pkt.outKey()) l.openedCircuits = append(l.openedCircuits, pkt.inKey()) @@ -1386,13 +1318,25 @@ func (l *channelLink) handleDownStreamPkt(pkt *htlcPacket, isReProcess bool) { l.cfg.Peer.SendMessage(false, htlc) + // Send a forward event notification to htlcNotifier. + l.cfg.HtlcNotifier.NotifyForwardingEvent( + newHtlcKey(pkt), + HtlcInfo{ + IncomingTimeLock: pkt.incomingTimeout, + IncomingAmt: pkt.incomingAmount, + OutgoingTimeLock: htlc.Expiry, + OutgoingAmt: htlc.Amount, + }, + getEventType(pkt), + ) + case *lnwire.UpdateFulfillHTLC: // If hodl.SettleOutgoing mode is active, we exit early to // simulate arbitrary delays between the switch adding the // SETTLE to the mailbox, and the HTLC being added to the // commitment state. if l.cfg.HodlMask.Active(hodl.SettleOutgoing) { - l.warnf(hodl.SettleOutgoing.Warning()) + l.log.Warnf(hodl.SettleOutgoing.Warning()) l.mailBox.AckPacket(pkt.inKey()) return } @@ -1409,7 +1353,7 @@ func (l *channelLink) handleDownStreamPkt(pkt *htlcPacket, isReProcess bool) { &inKey, ) if err != nil { - l.errorf("unable to settle incoming HTLC for "+ + l.log.Errorf("unable to settle incoming HTLC for "+ "circuit-key=%v: %v", inKey, err) // If the HTLC index for Settle response was not known @@ -1428,8 +1372,8 @@ func (l *channelLink) handleDownStreamPkt(pkt *htlcPacket, isReProcess bool) { return } - l.debugf("Queueing removal of SETTLE closed circuit: %s->%s", - pkt.inKey(), pkt.outKey()) + l.log.Debugf("queueing removal of SETTLE closed circuit: "+ + "%s->%s", pkt.inKey(), pkt.outKey()) l.closedCircuits = append(l.closedCircuits, pkt.inKey()) @@ -1444,13 +1388,19 @@ func (l *channelLink) handleDownStreamPkt(pkt *htlcPacket, isReProcess bool) { l.cfg.Peer.SendMessage(false, htlc) isSettle = true + // Send a settle event notification to htlcNotifier. + l.cfg.HtlcNotifier.NotifySettleEvent( + newHtlcKey(pkt), + getEventType(pkt), + ) + case *lnwire.UpdateFailHTLC: // If hodl.FailOutgoing mode is active, we exit early to // simulate arbitrary delays between the switch adding a FAIL to // the mailbox, and the HTLC being added to the commitment // state. if l.cfg.HodlMask.Active(hodl.FailOutgoing) { - l.warnf(hodl.FailOutgoing.Warning()) + l.log.Warnf(hodl.FailOutgoing.Warning()) l.mailBox.AckPacket(pkt.inKey()) return } @@ -1466,7 +1416,7 @@ func (l *channelLink) handleDownStreamPkt(pkt *htlcPacket, isReProcess bool) { &inKey, ) if err != nil { - l.errorf("unable to cancel incoming HTLC for "+ + l.log.Errorf("unable to cancel incoming HTLC for "+ "circuit-key=%v: %v", inKey, err) // If the HTLC index for Fail response was not known to @@ -1485,7 +1435,7 @@ func (l *channelLink) handleDownStreamPkt(pkt *htlcPacket, isReProcess bool) { return } - l.debugf("Queueing removal of FAIL closed circuit: %s->%s", + l.log.Debugf("queueing removal of FAIL closed circuit: %s->%s", pkt.inKey(), pkt.outKey()) l.closedCircuits = append(l.closedCircuits, pkt.inKey()) @@ -1497,17 +1447,35 @@ func (l *channelLink) handleDownStreamPkt(pkt *htlcPacket, isReProcess bool) { htlc.ChanID = l.ChanID() htlc.ID = pkt.incomingHTLCID - // Finally, we send the HTLC message to the peer which - // initially created the HTLC. + // We send the HTLC message to the peer which initially created + // the HTLC. l.cfg.Peer.SendMessage(false, htlc) isSettle = true - } - l.batchCounter++ + // If the packet does not have a link failure set, it failed + // further down the route so we notify a forwarding failure. + // Otherwise, we notify a link failure because it failed at our + // node. + if pkt.linkFailure != nil { + l.cfg.HtlcNotifier.NotifyLinkFailEvent( + newHtlcKey(pkt), + newHtlcInfo(pkt), + getEventType(pkt), + pkt.linkFailure, + false, + ) + } else { + l.cfg.HtlcNotifier.NotifyForwardingFailEvent( + newHtlcKey(pkt), getEventType(pkt), + ) + } + } // If this newly added update exceeds the min batch size for adds, or // this is a settle request, then initiate an update. - if l.batchCounter >= l.cfg.BatchSize || isSettle { + if l.channel.PendingLocalUpdateCount() >= uint64(l.cfg.BatchSize) || + isSettle { + if err := l.updateCommitTx(); err != nil { l.fail(LinkFailureError{code: ErrInternalError}, "unable to update commitment: %v", err) @@ -1523,13 +1491,13 @@ func (l *channelLink) handleDownStreamPkt(pkt *htlcPacket, isReProcess bool) { func (l *channelLink) cleanupSpuriousResponse(pkt *htlcPacket) { inKey := pkt.inKey() - l.debugf("Cleaning up spurious response for incoming circuit-key=%v", - inKey) + l.log.Debugf("cleaning up spurious response for incoming "+ + "circuit-key=%v", inKey) // If the htlc packet doesn't have a source reference, it is unsafe to // proceed, as skipping this ack may cause the htlc to be reforwarded. if pkt.sourceRef == nil { - l.errorf("uanble to cleanup response for incoming "+ + l.log.Errorf("uanble to cleanup response for incoming "+ "circuit-key=%v, does not contain source reference", inKey) return @@ -1540,7 +1508,7 @@ func (l *channelLink) cleanupSpuriousResponse(pkt *htlcPacket) { // of the incoming HTLC belonging to this link. err := l.channel.AckAddHtlcs(*pkt.sourceRef) if err != nil { - l.errorf("unable to ack AddRef for incoming "+ + l.log.Errorf("unable to ack AddRef for incoming "+ "circuit-key=%v: %v", inKey, err) // If this operation failed, it is unsafe to attempt removal of @@ -1563,19 +1531,19 @@ func (l *channelLink) cleanupSpuriousResponse(pkt *htlcPacket) { if pkt.destRef != nil { err := l.channel.AckSettleFails(*pkt.destRef) if err != nil { - l.errorf("unable to ack SettleFailRef "+ + l.log.Errorf("unable to ack SettleFailRef "+ "for incoming circuit-key=%v: %v", inKey, err) } } - l.debugf("Deleting circuit for incoming circuit-key=%x", inKey) + l.log.Debugf("deleting circuit for incoming circuit-key=%x", inKey) // With all known references acked, we can now safely delete the circuit // from the switch's circuit map, as the state is no longer needed. err = l.cfg.Circuits.DeleteCircuits(inKey) if err != nil { - l.errorf("unable to delete circuit for "+ + l.log.Errorf("unable to delete circuit for "+ "circuit-key=%v: %v", inKey, err) } } @@ -1597,7 +1565,7 @@ func (l *channelLink) handleUpstreamMsg(msg lnwire.Message) { return } - l.tracef("Receive upstream htlc with payment hash(%x), "+ + l.log.Tracef("receive upstream htlc with payment hash(%x), "+ "assigning index: %v", msg.PaymentHash[:], index) case *lnwire.UpdateFulfillHTLC: @@ -1650,7 +1618,7 @@ func (l *channelLink) handleUpstreamMsg(msg lnwire.Message) { OnionSHA256: msg.ShaOnionBlob, } default: - log.Warnf("Unexpected failure code received in "+ + l.log.Warnf("unexpected failure code received in "+ "UpdateFailMailformedHTLC: %v", msg.FailureCode) // We don't just pass back the error we received from @@ -1671,7 +1639,7 @@ func (l *channelLink) handleUpstreamMsg(msg lnwire.Message) { // form. var b bytes.Buffer if err := lnwire.EncodeFailure(&b, failure, 0); err != nil { - l.errorf("unable to encode malformed error: %v", err) + l.log.Errorf("unable to encode malformed error: %v", err) return } @@ -1758,7 +1726,7 @@ func (l *channelLink) handleUpstreamMsg(msg lnwire.Message) { // state. nextRevocation, currentHtlcs, err := l.channel.RevokeCurrentCommitment() if err != nil { - log.Errorf("unable to revoke commitment: %v", err) + l.log.Errorf("unable to revoke commitment: %v", err) return } l.cfg.Peer.SendMessage(false, nextRevocation) @@ -1775,25 +1743,10 @@ func (l *channelLink) handleUpstreamMsg(msg lnwire.Message) { return } - // As we've just received a commitment signature, we'll - // re-start the log commit timer to wake up the main processing - // loop to check if we need to send a commitment signature as - // we owe one. - // - // TODO(roasbeef): instead after revocation? - if !l.logCommitTimer.Stop() { - select { - case <-l.logCommitTimer.C: - default: - } - } - l.logCommitTimer.Reset(300 * time.Millisecond) - l.logCommitTick = l.logCommitTimer.C - // If both commitment chains are fully synced from our PoV, // then we don't need to reply with a signature as both sides // already have a commitment with the latest accepted. - if l.channel.FullySynced() { + if !l.channel.OweCommitment(true) { return } @@ -1834,8 +1787,12 @@ func (l *channelLink) handleUpstreamMsg(msg lnwire.Message) { // If we have a tower client, we'll proceed in backing up the // state that was just revoked. - if l.cfg.TowerClient != nil { - state := l.channel.State() + // TODO(halseth): support anchor types for watchtower. + state := l.channel.State() + if l.cfg.TowerClient != nil && state.ChanType.HasAnchors() { + l.log.Warnf("Skipping tower backup for anchor " + + "channel type") + } else if l.cfg.TowerClient != nil && !state.ChanType.HasAnchors() { breachInfo, err := lnwallet.NewBreachRetribution( state, state.RemoteCommitment.CommitHeight-1, 0, ) @@ -1846,11 +1803,9 @@ func (l *channelLink) handleUpstreamMsg(msg lnwire.Message) { } chanType := l.channel.State().ChanType - isTweakless := chanType == channeldb.SingleFunderTweakless - chanID := l.ChanID() err = l.cfg.TowerClient.BackupState( - &chanID, breachInfo, isTweakless, + &chanID, breachInfo, chanType.IsTweakless(), ) if err != nil { l.fail(LinkFailureError{code: ErrInternalError}, @@ -1860,7 +1815,7 @@ func (l *channelLink) handleUpstreamMsg(msg lnwire.Message) { } l.processRemoteSettleFails(fwdPkg, settleFails) - needUpdate := l.processRemoteAdds(fwdPkg, adds) + l.processRemoteAdds(fwdPkg, adds) // If the link failed during processing the adds, we must // return to ensure we won't attempted to update the state @@ -1869,7 +1824,14 @@ func (l *channelLink) handleUpstreamMsg(msg lnwire.Message) { return } - if needUpdate { + // The revocation window opened up. If there are pending local + // updates, try to update the commit tx. Pending updates could + // already have been present because of a previously failed + // update to the commit tx or freshly added in by + // processRemoteAdds. Also in case there are no local updates, + // but there are still remote updates that are not in the remote + // commit tx yet, send out an update. + if l.channel.OweCommitment(true) { if err := l.updateCommitTx(); err != nil { l.fail(LinkFailureError{code: ErrInternalError}, "unable to update commitment: %v", err) @@ -1880,7 +1842,7 @@ func (l *channelLink) handleUpstreamMsg(msg lnwire.Message) { case *lnwire.UpdateFee: // We received fee update from peer. If we are the initiator we // will fail the channel, if not we will apply the update. - fee := lnwallet.SatPerKWeight(msg.FeePerKw) + fee := chainfee.SatPerKWeight(msg.FeePerKw) if err := l.channel.ReceiveUpdateFee(fee); err != nil { l.fail(LinkFailureError{code: ErrInvalidUpdate}, "error receiving fee update: %v", err) @@ -1894,8 +1856,7 @@ func (l *channelLink) handleUpstreamMsg(msg lnwire.Message) { "ChannelPoint(%v): received error from peer: %v", l.channel.ChannelPoint(), msg.Error()) default: - log.Warnf("ChannelPoint(%v): received unknown message of type %T", - l.channel.ChannelPoint(), msg) + l.log.Warnf("received unknown message of type %T", msg) } } @@ -1919,7 +1880,7 @@ func (l *channelLink) ackDownStreamPackets() error { continue } - l.debugf("removing Add packet %s from mailbox", inKey) + l.log.Debugf("removing Add packet %s from mailbox", inKey) l.mailBox.AckPacket(inKey) } @@ -1933,7 +1894,7 @@ func (l *channelLink) ackDownStreamPackets() error { // Successful deletion. default: - l.errorf("unable to delete %d circuits: %v", + l.log.Errorf("unable to delete %d circuits: %v", len(l.closedCircuits), err) return err } @@ -1944,7 +1905,8 @@ func (l *channelLink) ackDownStreamPackets() error { // the circuits must have been removed at some point, so it is now safe // to un-queue the corresponding Settle/Fails. for _, inKey := range l.closedCircuits { - l.debugf("removing Fail/Settle packet %s from mailbox", inKey) + l.log.Debugf("removing Fail/Settle packet %s from mailbox", + inKey) l.mailBox.AckPacket(inKey) } @@ -1976,15 +1938,18 @@ func (l *channelLink) updateCommitTx() error { // permits testing of either the switch or link's ability to trim // circuits that have been opened, but unsuccessfully committed. if l.cfg.HodlMask.Active(hodl.Commit) { - l.warnf(hodl.Commit.Warning()) + l.log.Warnf(hodl.Commit.Warning()) return nil } theirCommitSig, htlcSigs, pendingHTLCs, err := l.channel.SignNextCommitment() if err == lnwallet.ErrNoWindow { - l.tracef("revocation window exhausted, unable to send: %v, "+ - "dangling_opens=%v, dangling_closes%v", - l.batchCounter, newLogClosure(func() string { + l.cfg.PendingCommitTicker.Resume() + + l.log.Tracef("revocation window exhausted, unable to send: "+ + "%v, pend_updates=%v, dangling_closes%v", + l.channel.PendingLocalUpdateCount(), + newLogClosure(func() string { return spew.Sdump(l.openedCircuits) }), newLogClosure(func() string { @@ -1996,6 +1961,12 @@ func (l *channelLink) updateCommitTx() error { return err } + if err := l.ackDownStreamPackets(); err != nil { + return err + } + + l.cfg.PendingCommitTicker.Pause() + // The remote party now has a new pending commitment, so we'll update // the contract court to be aware of this new set (the prior old remote // pending). @@ -2005,11 +1976,7 @@ func (l *channelLink) updateCommitTx() error { Htlcs: pendingHTLCs, }: case <-l.quit: - return nil - } - - if err := l.ackDownStreamPackets(); err != nil { - return err + return ErrLinkShuttingDown } commitSig := &lnwire.CommitSig{ @@ -2019,21 +1986,6 @@ func (l *channelLink) updateCommitTx() error { } l.cfg.Peer.SendMessage(false, commitSig) - // We've just initiated a state transition, attempt to stop the - // logCommitTimer. If the timer already ticked, then we'll consume the - // value, dropping - if l.logCommitTimer != nil && !l.logCommitTimer.Stop() { - select { - case <-l.logCommitTimer.C: - default: - } - } - l.logCommitTick = nil - - // Finally, clear our the current batch, so we can accurately make - // further batch flushing decisions. - l.batchCounter = 0 - return nil } @@ -2077,14 +2029,14 @@ func (l *channelLink) UpdateShortChanID() (lnwire.ShortChannelID, error) { // short channel ID. err := l.channel.State().RefreshShortChanID() if err != nil { - l.errorf("unable to refresh short_chan_id for chan_id=%v: %v", - chanID, err) + l.log.Errorf("unable to refresh short_chan_id for chan_id=%v: "+ + "%v", chanID, err) return hop.Source, err } sid := l.channel.ShortChanID() - l.infof("Updating to short_chan_id=%v for chan_id=%v", sid, chanID) + l.log.Infof("updating to short_chan_id=%v for chan_id=%v", sid, chanID) l.Lock() l.shortChanID = sid @@ -2096,8 +2048,7 @@ func (l *channelLink) UpdateShortChanID() (lnwire.ShortChannelID, error) { ShortChanID: sid, }) if err != nil { - log.Errorf("Unable to update signals for "+ - "ChannelLink(%v)", l) + l.log.Errorf("unable to update signals") } }() @@ -2124,26 +2075,10 @@ func (l *channelLink) ChanID() lnwire.ChannelID { // // NOTE: Part of the ChannelLink interface. func (l *channelLink) Bandwidth() lnwire.MilliSatoshi { - channelBandwidth := l.channel.AvailableBalance() - overflowBandwidth := l.overflowQueue.TotalHtlcAmount() - - // To compute the total bandwidth, we'll take the current available - // bandwidth, then subtract the overflow bandwidth as we'll eventually - // also need to evaluate those HTLC's once space on the commitment - // transaction is free. - linkBandwidth := channelBandwidth - overflowBandwidth - - // If the channel reserve is greater than the total available balance - // of the link, just return 0. - reserve := lnwire.NewMSatFromSatoshis(l.channel.LocalChanReserve()) - if linkBandwidth < reserve { - return 0 - } - - // Else the amount that is available to flow through the link at this - // point is the available balance minus the reserve amount we are - // required to keep as collateral. - return linkBandwidth - reserve + // Get the balance available on the channel for new HTLCs. This takes + // the channel reserve into account so HTLCs up to this value won't + // violate it. + return l.channel.AvailableBalance() } // AttachMailBox updates the current mailbox used by this link, and hooks up @@ -2171,23 +2106,24 @@ func (l *channelLink) UpdateForwardingPolicy(newPolicy ForwardingPolicy) { l.cfg.FwrdingPolicy = newPolicy } -// HtlcSatifiesPolicy should return a nil error if the passed HTLC details -// satisfy the current forwarding policy fo the target link. Otherwise, a -// valid protocol failure message should be returned in order to signal to the -// source of the HTLC, the policy consistency issue. +// CheckHtlcForward should return a nil error if the passed HTLC details +// satisfy the current forwarding policy fo the target link. Otherwise, +// a LinkError with a valid protocol failure message should be returned +// in order to signal to the source of the HTLC, the policy consistency +// issue. // // NOTE: Part of the ChannelLink interface. -func (l *channelLink) HtlcSatifiesPolicy(payHash [32]byte, +func (l *channelLink) CheckHtlcForward(payHash [32]byte, incomingHtlcAmt, amtToForward lnwire.MilliSatoshi, incomingTimeout, outgoingTimeout uint32, - heightNow uint32) lnwire.FailureMessage { + heightNow uint32) *LinkError { l.RLock() policy := l.cfg.FwrdingPolicy l.RUnlock() // First check whether the outgoing htlc satisfies the channel policy. - err := l.htlcSatifiesPolicyOutgoing( + err := l.canSendHtlc( policy, payHash, amtToForward, outgoingTimeout, heightNow, ) if err != nil { @@ -2206,22 +2142,20 @@ func (l *channelLink) HtlcSatifiesPolicy(payHash [32]byte, // any case, we'll cancel this HTLC. actualFee := incomingHtlcAmt - amtToForward if incomingHtlcAmt < amtToForward || actualFee < expectedFee { - l.errorf("outgoing htlc(%x) has insufficient fee: expected %v, "+ - "got %v", payHash[:], int64(expectedFee), int64(actualFee)) + l.log.Errorf("outgoing htlc(%x) has insufficient fee: "+ + "expected %v, got %v", + payHash[:], int64(expectedFee), int64(actualFee)) // As part of the returned error, we'll send our latest routing // policy so the sending node obtains the most up to date data. - var failure lnwire.FailureMessage - update, err := l.cfg.FetchLastChannelUpdate(l.ShortChanID()) - if err != nil { - failure = &lnwire.FailTemporaryNodeFailure{} - } else { - failure = lnwire.NewFeeInsufficient( - amtToForward, *update, - ) - } - - return failure + failure := l.createFailureWithUpdate( + func(upd *lnwire.ChannelUpdate) lnwire.FailureMessage { + return lnwire.NewFeeInsufficient( + amtToForward, *upd, + ) + }, + ) + return NewLinkError(failure) } // Finally, we'll ensure that the time-lock on the outgoing HTLC meets @@ -2230,124 +2164,119 @@ func (l *channelLink) HtlcSatifiesPolicy(payHash [32]byte, // sender messed up, or an intermediate node tampered with the HTLC. timeDelta := policy.TimeLockDelta if incomingTimeout < outgoingTimeout+timeDelta { - l.errorf("Incoming htlc(%x) has incorrect time-lock value: "+ + l.log.Errorf("incoming htlc(%x) has incorrect time-lock value: "+ "expected at least %v block delta, got %v block delta", payHash[:], timeDelta, incomingTimeout-outgoingTimeout) // Grab the latest routing policy so the sending node is up to // date with our current policy. - var failure lnwire.FailureMessage - update, err := l.cfg.FetchLastChannelUpdate( - l.ShortChanID(), + failure := l.createFailureWithUpdate( + func(upd *lnwire.ChannelUpdate) lnwire.FailureMessage { + return lnwire.NewIncorrectCltvExpiry( + incomingTimeout, *upd, + ) + }, ) - if err != nil { - failure = lnwire.NewTemporaryChannelFailure(update) - } else { - failure = lnwire.NewIncorrectCltvExpiry( - incomingTimeout, *update, - ) - } - - return failure + return NewLinkError(failure) } return nil } -// HtlcSatifiesPolicyLocal should return a nil error if the passed HTLC details -// satisfy the current channel policy. Otherwise, a valid protocol failure -// message should be returned in order to signal the violation. This call is -// intended to be used for locally initiated payments for which there is no -// corresponding incoming htlc. -func (l *channelLink) HtlcSatifiesPolicyLocal(payHash [32]byte, +// CheckHtlcTransit should return a nil error if the passed HTLC details +// satisfy the current channel policy. Otherwise, a LinkError with a +// valid protocol failure message should be returned in order to signal +// the violation. This call is intended to be used for locally initiated +// payments for which there is no corresponding incoming htlc. +func (l *channelLink) CheckHtlcTransit(payHash [32]byte, amt lnwire.MilliSatoshi, timeout uint32, - heightNow uint32) lnwire.FailureMessage { + heightNow uint32) *LinkError { l.RLock() policy := l.cfg.FwrdingPolicy l.RUnlock() - return l.htlcSatifiesPolicyOutgoing( + return l.canSendHtlc( policy, payHash, amt, timeout, heightNow, ) } // htlcSatifiesPolicyOutgoing checks whether the given htlc parameters satisfy // the channel's amount and time lock constraints. -func (l *channelLink) htlcSatifiesPolicyOutgoing(policy ForwardingPolicy, +func (l *channelLink) canSendHtlc(policy ForwardingPolicy, payHash [32]byte, amt lnwire.MilliSatoshi, timeout uint32, - heightNow uint32) lnwire.FailureMessage { + heightNow uint32) *LinkError { // As our first sanity check, we'll ensure that the passed HTLC isn't // too small for the next hop. If so, then we'll cancel the HTLC // directly. - if amt < policy.MinHTLC { - l.errorf("outgoing htlc(%x) is too small: min_htlc=%v, "+ - "htlc_value=%v", payHash[:], policy.MinHTLC, + if amt < policy.MinHTLCOut { + l.log.Errorf("outgoing htlc(%x) is too small: min_htlc=%v, "+ + "htlc_value=%v", payHash[:], policy.MinHTLCOut, amt) // As part of the returned error, we'll send our latest routing // policy so the sending node obtains the most up to date data. - var failure lnwire.FailureMessage - update, err := l.cfg.FetchLastChannelUpdate(l.ShortChanID()) - if err != nil { - failure = &lnwire.FailTemporaryNodeFailure{} - } else { - failure = lnwire.NewAmountBelowMinimum( - amt, *update, - ) - } - - return failure + failure := l.createFailureWithUpdate( + func(upd *lnwire.ChannelUpdate) lnwire.FailureMessage { + return lnwire.NewAmountBelowMinimum( + amt, *upd, + ) + }, + ) + return NewLinkError(failure) } - // Next, ensure that the passed HTLC isn't too large. If so, we'll cancel - // the HTLC directly. + // Next, ensure that the passed HTLC isn't too large. If so, we'll + // cancel the HTLC directly. if policy.MaxHTLC != 0 && amt > policy.MaxHTLC { - l.errorf("outgoing htlc(%x) is too large: max_htlc=%v, "+ + l.log.Errorf("outgoing htlc(%x) is too large: max_htlc=%v, "+ "htlc_value=%v", payHash[:], policy.MaxHTLC, amt) - // As part of the returned error, we'll send our latest routing policy - // so the sending node obtains the most up-to-date data. - var failure lnwire.FailureMessage - update, err := l.cfg.FetchLastChannelUpdate(l.ShortChanID()) - if err != nil { - failure = &lnwire.FailTemporaryNodeFailure{} - } else { - failure = lnwire.NewTemporaryChannelFailure(update) - } - - return failure + // As part of the returned error, we'll send our latest routing + // policy so the sending node obtains the most up-to-date data. + failure := l.createFailureWithUpdate( + func(upd *lnwire.ChannelUpdate) lnwire.FailureMessage { + return lnwire.NewTemporaryChannelFailure(upd) + }, + ) + return NewDetailedLinkError(failure, OutgoingFailureHTLCExceedsMax) } // We want to avoid offering an HTLC which will expire in the near // future, so we'll reject an HTLC if the outgoing expiration time is // too close to the current height. if timeout <= heightNow+l.cfg.OutgoingCltvRejectDelta { - l.errorf("htlc(%x) has an expiry that's too soon: "+ + l.log.Errorf("htlc(%x) has an expiry that's too soon: "+ "outgoing_expiry=%v, best_height=%v", payHash[:], timeout, heightNow) - - var failure lnwire.FailureMessage - update, err := l.cfg.FetchLastChannelUpdate( - l.ShortChanID(), + failure := l.createFailureWithUpdate( + func(upd *lnwire.ChannelUpdate) lnwire.FailureMessage { + return lnwire.NewExpiryTooSoon(*upd) + }, ) - if err != nil { - failure = lnwire.NewTemporaryChannelFailure(update) - } else { - failure = lnwire.NewExpiryTooSoon(*update) - } - - return failure + return NewLinkError(failure) } // Check absolute max delta. if timeout > l.cfg.MaxOutgoingCltvExpiry+heightNow { - l.errorf("outgoing htlc(%x) has a time lock too far in the "+ - "future: got %v, but maximum is %v", payHash[:], + l.log.Errorf("outgoing htlc(%x) has a time lock too far in "+ + "the future: got %v, but maximum is %v", payHash[:], timeout-heightNow, l.cfg.MaxOutgoingCltvExpiry) - return &lnwire.FailExpiryTooFar{} + return NewLinkError(&lnwire.FailExpiryTooFar{}) + } + + // Check to see if there is enough balance in this channel. + if amt > l.Bandwidth() { + failure := l.createFailureWithUpdate( + func(upd *lnwire.ChannelUpdate) lnwire.FailureMessage { + return lnwire.NewTemporaryChannelFailure(upd) + }, + ) + return NewDetailedLinkError( + failure, OutgoingFailureInsufficientBalance, + ) } return nil @@ -2377,11 +2306,10 @@ func (l *channelLink) String() string { // // NOTE: Part of the ChannelLink interface. func (l *channelLink) HandleSwitchPacket(pkt *htlcPacket) error { - l.tracef("received switch packet inkey=%v, outkey=%v", + l.log.Tracef("received switch packet inkey=%v, outkey=%v", pkt.inKey(), pkt.outKey()) - l.mailBox.AddPacket(pkt) - return nil + return l.mailBox.AddPacket(pkt) } // HandleChannelUpdate handles the htlc requests as settle/add/fail which sent @@ -2394,16 +2322,14 @@ func (l *channelLink) HandleChannelUpdate(message lnwire.Message) { // updateChannelFee updates the commitment fee-per-kw on this channel by // committing to an update_fee message. -func (l *channelLink) updateChannelFee(feePerKw lnwallet.SatPerKWeight) error { +func (l *channelLink) updateChannelFee(feePerKw chainfee.SatPerKWeight) error { - log.Infof("ChannelPoint(%v): updating commit fee to %v sat/kw", l, - feePerKw) + l.log.Infof("updating commit fee to %v sat/kw", feePerKw) // We skip sending the UpdateFee message if the channel is not // currently eligible to forward messages. if !l.EligibleToForward() { - log.Debugf("ChannelPoint(%v): skipping fee update for "+ - "inactive channel", l.ChanID()) + l.log.Debugf("skipping fee update for inactive channel") return nil } @@ -2433,8 +2359,7 @@ func (l *channelLink) processRemoteSettleFails(fwdPkg *channeldb.FwdPkg, return } - log.Debugf("ChannelLink(%v): settle-fail-filter %v", - l.ShortChanID(), fwdPkg.SettleFailFilter) + l.log.Debugf("settle-fail-filter %v", fwdPkg.SettleFailFilter) var switchPackets []*htlcPacket for i, pd := range settleFails { @@ -2458,7 +2383,7 @@ func (l *channelLink) processRemoteSettleFails(fwdPkg *channeldb.FwdPkg, // forward the SETTLE to the switch and will not signal // a free slot on the commitment transaction. if l.cfg.HodlMask.Active(hodl.SettleIncoming) { - l.warnf(hodl.SettleIncoming.Warning()) + l.log.Warnf(hodl.SettleIncoming.Warning()) continue } @@ -2475,7 +2400,6 @@ func (l *channelLink) processRemoteSettleFails(fwdPkg *channeldb.FwdPkg, // notify the overflow queue that a spare spot has been // freed up within the commitment state. switchPackets = append(switchPackets, settlePacket) - l.overflowQueue.SignalFreeSlot() // A failureCode message for a previously forwarded HTLC has // been received. As a result a new slot will be freed up in @@ -2486,12 +2410,14 @@ func (l *channelLink) processRemoteSettleFails(fwdPkg *channeldb.FwdPkg, // forward the FAIL to the switch and will not signal a // free slot on the commitment transaction. if l.cfg.HodlMask.Active(hodl.FailIncoming) { - l.warnf(hodl.FailIncoming.Warning()) + l.log.Warnf(hodl.FailIncoming.Warning()) continue } // Fetch the reason the HTLC was canceled so we can - // continue to propagate it. + // continue to propagate it. This failure originated + // from another node, so the linkFailure field is not + // set on the packet. failPacket := &htlcPacket{ outgoingChanID: l.ShortChanID(), outgoingHTLCID: pd.ParentIndex, @@ -2519,7 +2445,6 @@ func (l *channelLink) processRemoteSettleFails(fwdPkg *channeldb.FwdPkg, // notify the overflow queue that a spare spot has been // freed up within the commitment state. switchPackets = append(switchPackets, failPacket) - l.overflowQueue.SignalFreeSlot() } } @@ -2536,9 +2461,9 @@ func (l *channelLink) processRemoteSettleFails(fwdPkg *channeldb.FwdPkg, // whether we are reprocessing as a result of a failure or restart. Adds that // have already been acknowledged in the forwarding package will be ignored. func (l *channelLink) processRemoteAdds(fwdPkg *channeldb.FwdPkg, - lockedInHtlcs []*lnwallet.PaymentDescriptor) bool { + lockedInHtlcs []*lnwallet.PaymentDescriptor) { - l.tracef("processing %d remote adds for height %d", + l.log.Tracef("processing %d remote adds for height %d", len(lockedInHtlcs), fwdPkg.Height) decodeReqs := make( @@ -2575,13 +2500,10 @@ func (l *channelLink) processRemoteAdds(fwdPkg *channeldb.FwdPkg, if sphinxErr != nil { l.fail(LinkFailureError{code: ErrInternalError}, "unable to decode hop iterators: %v", sphinxErr) - return false + return } - var ( - needUpdate bool - switchPackets []*htlcPacket - ) + var switchPackets []*htlcPacket for i, pd := range lockedInHtlcs { idx := uint16(i) @@ -2618,9 +2540,8 @@ func (l *channelLink) processRemoteAdds(fwdPkg *channeldb.FwdPkg, // sender. l.sendMalformedHTLCError(pd.HtlcIndex, failureCode, onionBlob[:], pd.SourceRef) - needUpdate = true - log.Errorf("unable to decode onion hop "+ + l.log.Errorf("unable to decode onion hop "+ "iterator: %v", failureCode) continue } @@ -2637,48 +2558,54 @@ func (l *channelLink) processRemoteAdds(fwdPkg *channeldb.FwdPkg, l.sendMalformedHTLCError( pd.HtlcIndex, failureCode, onionBlob[:], pd.SourceRef, ) - needUpdate = true - log.Errorf("unable to decode onion "+ + l.log.Errorf("unable to decode onion "+ "obfuscator: %v", failureCode) continue } heightNow := l.cfg.Switch.BestHeight() - fwdInfo, err := chanIterator.ForwardingInstructions() + pld, err := chanIterator.HopPayload() if err != nil { // If we're unable to process the onion payload, or we - // we received malformed TLV stream, then we should - // send an error back to the caller so the HTLC can be - // canceled. + // received invalid onion payload failure, then we + // should send an error back to the caller so the HTLC + // can be canceled. + var failedType uint64 + if e, ok := err.(hop.ErrInvalidPayload); ok { + failedType = uint64(e.Type) + } + + // TODO: currently none of the test unit infrastructure + // is setup to handle TLV payloads, so testing this + // would require implementing a separate mock iterator + // for TLV payloads that also supports injecting invalid + // payloads. Deferring this non-trival effort till a + // later date + failure := lnwire.NewInvalidOnionPayload(failedType, 0) l.sendHTLCError( - pd.HtlcIndex, - lnwire.NewInvalidOnionVersion(onionBlob[:]), - obfuscator, pd.SourceRef, + pd, NewLinkError(failure), obfuscator, false, ) - needUpdate = true - log.Errorf("Unable to decode forwarding "+ + l.log.Errorf("unable to decode forwarding "+ "instructions: %v", err) continue } + fwdInfo := pld.ForwardingInfo() + switch fwdInfo.NextHop { case hop.Exit: - updated, err := l.processExitHop( - pd, obfuscator, fwdInfo, heightNow, - chanIterator.ExtraOnionBlob(), + err := l.processExitHop( + pd, obfuscator, fwdInfo, heightNow, pld, ) if err != nil { l.fail(LinkFailureError{code: ErrInternalError}, err.Error(), ) - return false - } - if updated { - needUpdate = true + return } // There are additional channels left within this route. So @@ -2688,7 +2615,7 @@ func (l *channelLink) processRemoteAdds(fwdPkg *channeldb.FwdPkg, // validate the forwarded ADD, nor will we send the // packet to the htlc switch. if l.cfg.HodlMask.Active(hodl.AddIncoming) { - l.warnf(hodl.AddIncoming.Warning()) + l.log.Warnf(hodl.AddIncoming.Warning()) continue } @@ -2759,25 +2686,20 @@ func (l *channelLink) processRemoteAdds(fwdPkg *channeldb.FwdPkg, buf := bytes.NewBuffer(addMsg.OnionBlob[0:0]) err := chanIterator.EncodeNextHop(buf) if err != nil { - log.Errorf("unable to encode the "+ + l.log.Errorf("unable to encode the "+ "remaining route %v", err) - var failure lnwire.FailureMessage - update, err := l.cfg.FetchLastChannelUpdate( - l.ShortChanID(), + failure := l.createFailureWithUpdate( + func(upd *lnwire.ChannelUpdate) lnwire.FailureMessage { + return lnwire.NewTemporaryChannelFailure( + upd, + ) + }, ) - if err != nil { - failure = &lnwire.FailTemporaryNodeFailure{} - } else { - failure = lnwire.NewTemporaryChannelFailure( - update, - ) - } l.sendHTLCError( - pd.HtlcIndex, failure, obfuscator, pd.SourceRef, + pd, NewLinkError(failure), obfuscator, false, ) - needUpdate = true continue } @@ -2817,15 +2739,15 @@ func (l *channelLink) processRemoteAdds(fwdPkg *channeldb.FwdPkg, if err != nil { l.fail(LinkFailureError{code: ErrInternalError}, "unable to set fwd filter: %v", err) - return false + return } } if len(switchPackets) == 0 { - return needUpdate + return } - l.debugf("forwarding %d packets to switch", len(switchPackets)) + l.log.Debugf("forwarding %d packets to switch", len(switchPackets)) // NOTE: This call is made synchronous so that we ensure all circuits // are committed in the exact order that they are processed in the link. @@ -2833,23 +2755,21 @@ func (l *channelLink) processRemoteAdds(fwdPkg *channeldb.FwdPkg, // opened circuits, which violates assumptions made by the circuit // trimming. l.forwardBatch(switchPackets...) - - return needUpdate } // processExitHop handles an htlc for which this link is the exit hop. It // returns a boolean indicating whether the commitment tx needs an update. func (l *channelLink) processExitHop(pd *lnwallet.PaymentDescriptor, obfuscator hop.ErrorEncrypter, fwdInfo hop.ForwardingInfo, - heightNow uint32, eob []byte) (bool, error) { + heightNow uint32, payload invoices.Payload) error { // If hodl.ExitSettle is requested, we will not validate the final hop's // ADD, nor will we settle the corresponding invoice or respond with the // preimage. if l.cfg.HodlMask.Active(hodl.ExitSettle) { - l.warnf(hodl.ExitSettle.Warning()) + l.log.Warnf(hodl.ExitSettle.Warning()) - return false, nil + return nil } // As we're the exit hop, we'll double check the hop-payload included in @@ -2857,27 +2777,31 @@ func (l *channelLink) processExitHop(pd *lnwallet.PaymentDescriptor, // matches the HTLC we were extended. if pd.Amount != fwdInfo.AmountToForward { - log.Errorf("Onion payload of incoming htlc(%x) has incorrect "+ + l.log.Errorf("onion payload of incoming htlc(%x) has incorrect "+ "value: expected %v, got %v", pd.RHash, pd.Amount, fwdInfo.AmountToForward) - failure := lnwire.NewFinalIncorrectHtlcAmount(pd.Amount) - l.sendHTLCError(pd.HtlcIndex, failure, obfuscator, pd.SourceRef) + failure := NewLinkError( + lnwire.NewFinalIncorrectHtlcAmount(pd.Amount), + ) + l.sendHTLCError(pd, failure, obfuscator, true) - return true, nil + return nil } // We'll also ensure that our time-lock value has been computed // correctly. if pd.Timeout != fwdInfo.OutgoingCTLV { - log.Errorf("Onion payload of incoming htlc(%x) has incorrect "+ + l.log.Errorf("onion payload of incoming htlc(%x) has incorrect "+ "time-lock: expected %v, got %v", pd.RHash[:], pd.Timeout, fwdInfo.OutgoingCTLV) - failure := lnwire.NewFinalIncorrectCltvExpiry(pd.Timeout) - l.sendHTLCError(pd.HtlcIndex, failure, obfuscator, pd.SourceRef) + failure := NewLinkError( + lnwire.NewFinalIncorrectCltvExpiry(pd.Timeout), + ) + l.sendHTLCError(pd, failure, obfuscator, true) - return true, nil + return nil } // Notify the invoiceRegistry of the exit hop htlc. If we crash right @@ -2892,24 +2816,10 @@ func (l *channelLink) processExitHop(pd *lnwallet.PaymentDescriptor, event, err := l.cfg.Registry.NotifyExitHopHtlc( invoiceHash, pd.Amount, pd.Timeout, int32(heightNow), - circuitKey, l.hodlQueue.ChanIn(), eob, + circuitKey, l.hodlQueue.ChanIn(), payload, ) - - switch err { - - // Cancel htlc if we don't have an invoice for it. - case channeldb.ErrInvoiceNotFound: - failure := lnwire.NewFailIncorrectDetails(pd.Amount, heightNow) - l.sendHTLCError(pd.HtlcIndex, failure, obfuscator, pd.SourceRef) - - return true, nil - - // No error. - case nil: - - // Pass error to caller. - default: - return false, err + if err != nil { + return err } // Create a hodlHtlc struct and decide either resolved now or later. @@ -2918,31 +2828,27 @@ func (l *channelLink) processExitHop(pd *lnwallet.PaymentDescriptor, obfuscator: obfuscator, } + // If the event is nil, the invoice is being held, so we save payment + // descriptor for future reference. if event == nil { - // Save payment descriptor for future reference. l.hodlMap[circuitKey] = htlc - - return false, nil + return nil } // Process the received resolution. - err = l.processHodlEvent(*event, htlc) - if err != nil { - return false, err - } - return true, nil + return l.processHtlcResolution(event, htlc) } // settleHTLC settles the HTLC on the channel. -func (l *channelLink) settleHTLC(preimage lntypes.Preimage, htlcIndex uint64, - sourceRef *channeldb.AddRef) error { +func (l *channelLink) settleHTLC(preimage lntypes.Preimage, + pd *lnwallet.PaymentDescriptor) error { hash := preimage.Hash() - l.infof("settling htlc %v as exit hop", hash) + l.log.Infof("settling htlc %v as exit hop", hash) err := l.channel.SettleHTLC( - preimage, htlcIndex, sourceRef, nil, nil, + preimage, pd.HtlcIndex, pd.SourceRef, nil, nil, ) if err != nil { return fmt.Errorf("unable to settle htlc: %v", err) @@ -2951,7 +2857,7 @@ func (l *channelLink) settleHTLC(preimage lntypes.Preimage, htlcIndex uint64, // If the link is in hodl.BogusSettle mode, replace the preimage with a // fake one before sending it to the peer. if l.cfg.HodlMask.Active(hodl.BogusSettle) { - l.warnf(hodl.BogusSettle.Warning()) + l.log.Warnf(hodl.BogusSettle.Warning()) preimage = [32]byte{} copy(preimage[:], bytes.Repeat([]byte{2}, 32)) } @@ -2960,10 +2866,21 @@ func (l *channelLink) settleHTLC(preimage lntypes.Preimage, htlcIndex uint64, // remote peer. l.cfg.Peer.SendMessage(false, &lnwire.UpdateFulfillHTLC{ ChanID: l.ChanID(), - ID: htlcIndex, + ID: pd.HtlcIndex, PaymentPreimage: preimage, }) + // Once we have successfully settled the htlc, notify a settle event. + l.cfg.HtlcNotifier.NotifySettleEvent( + HtlcKey{ + IncomingCircuit: channeldb.CircuitKey{ + ChanID: l.ShortChanID(), + HtlcID: pd.HtlcIndex, + }, + }, + HtlcEventTypeReceive, + ) + return nil } @@ -2984,51 +2901,57 @@ func (l *channelLink) forwardBatch(packets ...*htlcPacket) { } errChan := l.cfg.ForwardPackets(l.quit, filteredPkts...) - go l.handleBatchFwdErrs(errChan) -} - -// handleBatchFwdErrs waits on the given errChan until it is closed, logging -// the errors returned from any unsuccessful forwarding attempts. -func (l *channelLink) handleBatchFwdErrs(errChan chan error) { - for { - err, ok := <-errChan - if !ok { - // Err chan has been drained or switch is shutting - // down. Either way, return. - return - } - - if err == nil { - continue - } - - l.errorf("unhandled error while forwarding htlc packet over "+ - "htlcswitch: %v", err) - } + go handleBatchFwdErrs(errChan, l.log) } // sendHTLCError functions cancels HTLC and send cancel message back to the // peer from which HTLC was received. -func (l *channelLink) sendHTLCError(htlcIndex uint64, failure lnwire.FailureMessage, - e hop.ErrorEncrypter, sourceRef *channeldb.AddRef) { +func (l *channelLink) sendHTLCError(pd *lnwallet.PaymentDescriptor, + failure *LinkError, e hop.ErrorEncrypter, isReceive bool) { - reason, err := e.EncryptFirstHop(failure) + reason, err := e.EncryptFirstHop(failure.WireMessage()) if err != nil { - log.Errorf("unable to obfuscate error: %v", err) + l.log.Errorf("unable to obfuscate error: %v", err) return } - err = l.channel.FailHTLC(htlcIndex, reason, sourceRef, nil, nil) + err = l.channel.FailHTLC(pd.HtlcIndex, reason, pd.SourceRef, nil, nil) if err != nil { - log.Errorf("unable cancel htlc: %v", err) + l.log.Errorf("unable cancel htlc: %v", err) return } l.cfg.Peer.SendMessage(false, &lnwire.UpdateFailHTLC{ ChanID: l.ChanID(), - ID: htlcIndex, + ID: pd.HtlcIndex, Reason: reason, }) + + // Notify a link failure on our incoming link. Outgoing htlc information + // is not available at this point, because we have not decrypted the + // onion, so it is excluded. + var eventType HtlcEventType + if isReceive { + eventType = HtlcEventTypeReceive + } else { + eventType = HtlcEventTypeForward + } + + l.cfg.HtlcNotifier.NotifyLinkFailEvent( + HtlcKey{ + IncomingCircuit: channeldb.CircuitKey{ + ChanID: l.ShortChanID(), + HtlcID: pd.HtlcIndex, + }, + }, + HtlcInfo{ + IncomingTimeLock: pd.Timeout, + IncomingAmt: pd.Amount, + }, + eventType, + failure, + true, + ) } // sendMalformedHTLCError helper function which sends the malformed HTLC update @@ -3039,7 +2962,7 @@ func (l *channelLink) sendMalformedHTLCError(htlcIndex uint64, shaOnionBlob := sha256.Sum256(onionBlob) err := l.channel.MalformedFailHTLC(htlcIndex, code, shaOnionBlob, sourceRef) if err != nil { - log.Errorf("unable cancel htlc: %v", err) + l.log.Errorf("unable cancel htlc: %v", err) return } @@ -3062,45 +2985,15 @@ func (l *channelLink) fail(linkErr LinkFailureError, // Return if we have already notified about a failure. if l.failed { - l.warnf("Ignoring link failure (%v), as link already failed", - reason) + l.log.Warnf("ignoring link failure (%v), as link already "+ + "failed", reason) return } - l.errorf("Failing link: %s", reason) + l.log.Errorf("failing link: %s with error: %v", reason, linkErr) // Set failed, such that we won't process any more updates, and notify // the peer about the failure. l.failed = true l.cfg.OnChannelFailure(l.ChanID(), l.ShortChanID(), linkErr) } - -// infof prefixes the channel's identifier before printing to info log. -func (l *channelLink) infof(format string, a ...interface{}) { - msg := fmt.Sprintf(format, a...) - log.Infof("ChannelLink(%s) %s", l.ShortChanID(), msg) -} - -// debugf prefixes the channel's identifier before printing to debug log. -func (l *channelLink) debugf(format string, a ...interface{}) { - msg := fmt.Sprintf(format, a...) - log.Debugf("ChannelLink(%s) %s", l.ShortChanID(), msg) -} - -// warnf prefixes the channel's identifier before printing to warn log. -func (l *channelLink) warnf(format string, a ...interface{}) { - msg := fmt.Sprintf(format, a...) - log.Warnf("ChannelLink(%s) %s", l.ShortChanID(), msg) -} - -// errorf prefixes the channel's identifier before printing to error log. -func (l *channelLink) errorf(format string, a ...interface{}) { - msg := fmt.Sprintf(format, a...) - log.Errorf("ChannelLink(%s) %s", l.ShortChanID(), msg) -} - -// tracef prefixes the channel's identifier before printing to trace log. -func (l *channelLink) tracef(format string, a ...interface{}) { - msg := fmt.Sprintf(format, a...) - log.Tracef("ChannelLink(%s) %s", l.ShortChanID(), msg) -} diff --git a/htlcswitch/link_isolated_test.go b/htlcswitch/link_isolated_test.go index 8f059d748e..f108853f1e 100644 --- a/htlcswitch/link_isolated_test.go +++ b/htlcswitch/link_isolated_test.go @@ -139,6 +139,21 @@ func (l *linkTestContext) receiveRevAndAckAliceToBob() { func (l *linkTestContext) receiveCommitSigAliceToBob(expHtlcs int) { l.t.Helper() + comSig := l.receiveCommitSigAlice(expHtlcs) + + err := l.bobChannel.ReceiveNewCommitment( + comSig.CommitSig, comSig.HtlcSigs, + ) + if err != nil { + l.t.Fatalf("bob failed receiving commitment: %v", err) + } +} + +// receiveCommitSigAlice waits for Alice to send a CommitSig, signing expHtlcs +// numbers of HTLCs. +func (l *linkTestContext) receiveCommitSigAlice(expHtlcs int) *lnwire.CommitSig { + l.t.Helper() + var msg lnwire.Message select { case msg = <-l.aliceMsgs: @@ -155,11 +170,8 @@ func (l *linkTestContext) receiveCommitSigAliceToBob(expHtlcs int) { l.t.Fatalf("expected %d htlc sigs, got %d", expHtlcs, len(comSig.HtlcSigs)) } - err := l.bobChannel.ReceiveNewCommitment(comSig.CommitSig, - comSig.HtlcSigs) - if err != nil { - l.t.Fatalf("bob failed receiving commitment: %v", err) - } + + return comSig } // sendRevAndAckBobToAlice make Bob revoke his current commitment, then hand @@ -242,3 +254,15 @@ func (l *linkTestContext) receiveFailAliceToBob() { l.t.Fatalf("unable to apply received fail htlc: %v", err) } } + +// assertNoMsgFromAlice asserts that Alice hasn't sent a message. Before +// calling, make sure that Alice has had the opportunity to send the message. +func (l *linkTestContext) assertNoMsgFromAlice(timeout time.Duration) { + l.t.Helper() + + select { + case msg := <-l.aliceMsgs: + l.t.Fatalf("unexpected message from Alice: %v", msg) + case <-time.After(timeout): + } +} diff --git a/htlcswitch/link_test.go b/htlcswitch/link_test.go index c6bc3be140..935ccde3db 100644 --- a/htlcswitch/link_test.go +++ b/htlcswitch/link_test.go @@ -7,7 +7,6 @@ import ( "encoding/binary" "fmt" "io" - "math" "net" "reflect" "runtime" @@ -19,24 +18,28 @@ import ( "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcutil" - "github.com/coreos/bbolt" "github.com/davecgh/go-spew/spew" "github.com/go-errors/errors" + sphinx "github.com/lightningnetwork/lightning-onion" "github.com/lightningnetwork/lnd/build" "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/contractcourt" "github.com/lightningnetwork/lnd/htlcswitch/hodl" "github.com/lightningnetwork/lnd/htlcswitch/hop" "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/lnpeer" + "github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lnwallet" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/ticker" ) const ( testStartingHeight = 100 + testDefaultDelta = 6 ) // concurrentTester is a thread-safe wrapper around the Fatalf method of a @@ -243,7 +246,7 @@ func TestChannelLinkSingleHopPayment(t *testing.T) { if err != nil { t.Fatalf("unable to get invoice: %v", err) } - if invoice.Terms.State != channeldb.ContractSettled { + if invoice.State != channeldb.ContractSettled { t.Fatal("alice invoice wasn't settled") } @@ -259,141 +262,6 @@ func TestChannelLinkSingleHopPayment(t *testing.T) { } } -// TestChannelLinkBidirectionalOneHopPayments tests the ability of channel -// link to cope with bigger number of payment updates that commitment -// transaction may consist. -func TestChannelLinkBidirectionalOneHopPayments(t *testing.T) { - t.Parallel() - - channels, cleanUp, _, err := createClusterChannels( - btcutil.SatoshiPerBitcoin*3, - btcutil.SatoshiPerBitcoin*5) - if err != nil { - t.Fatalf("unable to create channel: %v", err) - } - defer cleanUp() - - n := newThreeHopNetwork(t, channels.aliceToBob, channels.bobToAlice, - channels.bobToCarol, channels.carolToBob, testStartingHeight) - if err := n.start(); err != nil { - t.Fatal(err) - } - defer n.stop() - bobBandwidthBefore := n.firstBobChannelLink.Bandwidth() - aliceBandwidthBefore := n.aliceChannelLink.Bandwidth() - - debug := false - if debug { - // Log message that alice receives. - n.aliceServer.intersect(createLogFunc("alice", - n.aliceChannelLink.ChanID())) - - // Log message that bob receives. - n.bobServer.intersect(createLogFunc("bob", - n.firstBobChannelLink.ChanID())) - } - - amt := lnwire.NewMSatFromSatoshis(20000) - - htlcAmt, totalTimelock, hopsForwards := generateHops(amt, - testStartingHeight, n.firstBobChannelLink) - _, _, hopsBackwards := generateHops(amt, - testStartingHeight, n.aliceChannelLink) - - type result struct { - err error - start time.Time - number int - sender string - } - - // Send max available payment number in both sides, thereby testing - // the property of channel link to cope with overflowing. - count := 2 * input.MaxHTLCNumber - resultChan := make(chan *result, count) - for i := 0; i < count/2; i++ { - go func(i int) { - r := &result{ - start: time.Now(), - number: i, - sender: "alice", - } - - firstHop := n.firstBobChannelLink.ShortChanID() - _, r.err = makePayment( - n.aliceServer, n.bobServer, firstHop, - hopsForwards, amt, htlcAmt, totalTimelock, - ).Wait(5 * time.Minute) - resultChan <- r - }(i) - } - - for i := 0; i < count/2; i++ { - go func(i int) { - r := &result{ - start: time.Now(), - number: i, - sender: "bob", - } - - firstHop := n.aliceChannelLink.ShortChanID() - _, r.err = makePayment( - n.bobServer, n.aliceServer, firstHop, - hopsBackwards, amt, htlcAmt, totalTimelock, - ).Wait(5 * time.Minute) - resultChan <- r - }(i) - } - - maxDelay := time.Duration(0) - minDelay := time.Duration(math.MaxInt64) - averageDelay := time.Duration(0) - - // Check that alice invoice was settled and bandwidth of HTLC - // links was changed. - for i := 0; i < count; i++ { - select { - case r := <-resultChan: - if r.err != nil { - t.Fatalf("unable to make payment: %v", r.err) - } - - delay := time.Since(r.start) - if delay > maxDelay { - maxDelay = delay - } - - if delay < minDelay { - minDelay = delay - } - averageDelay += delay - - case <-time.After(5 * time.Minute): - t.Fatalf("timeout: (%v/%v)", i+1, count) - } - } - - // TODO(roasbeef): should instead consume async notifications from both - // links - time.Sleep(time.Second * 2) - - // At the end Bob and Alice balances should be the same as previous, - // because they sent the equal amount of money to each other. - if aliceBandwidthBefore != n.aliceChannelLink.Bandwidth() { - t.Fatalf("alice bandwidth shouldn't have changed: expected %v, got %x", - aliceBandwidthBefore, n.aliceChannelLink.Bandwidth()) - } - - if bobBandwidthBefore != n.firstBobChannelLink.Bandwidth() { - t.Fatalf("bob bandwidth shouldn't have changed: expected %v, got %v", - bobBandwidthBefore, n.firstBobChannelLink.Bandwidth()) - } - - t.Logf("Max waiting: %v", maxDelay) - t.Logf("Min waiting: %v", minDelay) - t.Logf("Average waiting: %v", time.Duration(int(averageDelay)/count)) -} - // TestChannelLinkMultiHopPayment checks the ability to send payment over two // hops. In this test we send the payment from Carol to Alice over Bob peer. // (Carol -> Bob -> Alice) and checking that HTLC was settled properly and @@ -503,7 +371,7 @@ func testChannelLinkMultiHopPayment(t *testing.T, if err != nil { t.Fatalf("unable to get invoice: %v", err) } - if invoice.Terms.State != channeldb.ContractSettled { + if invoice.State != channeldb.ContractSettled { t.Fatal("carol invoice haven't been settled") } @@ -532,6 +400,105 @@ func testChannelLinkMultiHopPayment(t *testing.T, } } +// TestChannelLinkCancelFullCommitment tests the ability for links to cancel +// forwarded HTLCs once all of their commitment slots are full. +func TestChannelLinkCancelFullCommitment(t *testing.T) { + t.Parallel() + + channels, cleanUp, _, err := createClusterChannels( + btcutil.SatoshiPerBitcoin*3, + btcutil.SatoshiPerBitcoin*5) + if err != nil { + t.Fatalf("unable to create channel: %v", err) + } + defer cleanUp() + + n := newTwoHopNetwork( + t, channels.aliceToBob, channels.bobToAlice, testStartingHeight, + ) + if err := n.start(); err != nil { + t.Fatal(err) + } + defer n.stop() + + // Fill up the commitment from Alice's side with 20 sat payments. + count := (input.MaxHTLCNumber / 2) + amt := lnwire.NewMSatFromSatoshis(20000) + + htlcAmt, totalTimelock, hopsForwards := generateHops(amt, + testStartingHeight, n.bobChannelLink) + + firstHop := n.aliceChannelLink.ShortChanID() + + // Create channels to buffer the preimage and error channels used in + // making the preliminary payments. + preimages := make([]lntypes.Preimage, count) + aliceErrChan := make(chan chan error, count) + + var wg sync.WaitGroup + for i := 0; i < count; i++ { + preimages[i] = lntypes.Preimage{byte(i >> 8), byte(i)} + + wg.Add(1) + go func(i int) { + defer wg.Done() + + errChan := n.makeHoldPayment( + n.aliceServer, n.bobServer, firstHop, + hopsForwards, amt, htlcAmt, totalTimelock, + preimages[i], + ) + aliceErrChan <- errChan + }(i) + } + + // Wait for Alice to finish filling her commitment. + wg.Wait() + close(aliceErrChan) + + // Now make an additional payment from Alice to Bob, this should be + // canceled because the commitment in this direction is full. + err = <-makePayment( + n.aliceServer, n.bobServer, firstHop, hopsForwards, amt, + htlcAmt, totalTimelock, + ).err + if err == nil { + t.Fatalf("overflow payment should have failed") + } + lerr, ok := err.(*LinkError) + if !ok { + t.Fatalf("expected LinkError, got: %T", err) + } + + msg := lerr.WireMessage() + if _, ok := msg.(*lnwire.FailTemporaryChannelFailure); !ok { + t.Fatalf("expected TemporaryChannelFailure, got: %T", msg) + } + + // Now, settle all htlcs held by bob and clear the commitment of htlcs. + for _, preimage := range preimages { + preimage := preimage + + // It's possible that the HTLCs have not been delivered to the + // invoice registry at this point, so we poll until we are able + // to settle. + err = wait.NoError(func() error { + return n.bobServer.registry.SettleHodlInvoice(preimage) + }, time.Minute) + if err != nil { + t.Fatal(err) + } + } + + // Ensure that all of the payments sent by alice eventually succeed. + for errChan := range aliceErrChan { + err := <-errChan + if err != nil { + t.Fatalf("alice payment failed: %v", err) + } + } +} + // TestExitNodeTimelockPayloadMismatch tests that when an exit node receives an // incoming HTLC, if the time lock encoded in the payload of the forwarded HTLC // doesn't match the expected payment value, then the HTLC will be rejected @@ -562,7 +529,7 @@ func TestExitNodeTimelockPayloadMismatch(t *testing.T) { // per-hop payload for outgoing time lock to be the incorrect value. // The proper value of the outgoing CLTV should be the policy set by // the receiving node, instead we set it to be a random value. - hops[0].OutgoingCTLV = 500 + hops[0].FwdInfo.OutgoingCTLV = 500 firstHop := n.firstBobChannelLink.ShortChanID() _, err = makePayment( n.aliceServer, n.bobServer, firstHop, hops, amount, htlcAmt, @@ -572,12 +539,12 @@ func TestExitNodeTimelockPayloadMismatch(t *testing.T) { t.Fatalf("payment should have failed but didn't") } - ferr, ok := err.(*ForwardingError) + rtErr, ok := err.(ClearTextError) if !ok { - t.Fatalf("expected a ForwardingError, instead got: %T", err) + t.Fatalf("expected a ClearTextError, instead got: %T", err) } - switch ferr.FailureMessage.(type) { + switch rtErr.WireMessage().(type) { case *lnwire.FailFinalIncorrectCltvExpiry: default: t.Fatalf("incorrect error, expected incorrect cltv expiry, "+ @@ -615,7 +582,7 @@ func TestExitNodeAmountPayloadMismatch(t *testing.T) { // per-hop payload for amount to be the incorrect value. The proper // value of the amount to forward should be the amount that the // receiving node expects to receive. - hops[0].AmountToForward = 1 + hops[0].FwdInfo.AmountToForward = 1 firstHop := n.firstBobChannelLink.ShortChanID() _, err = makePayment( n.aliceServer, n.bobServer, firstHop, hops, amount, htlcAmt, @@ -672,12 +639,12 @@ func TestLinkForwardTimelockPolicyMismatch(t *testing.T) { t.Fatalf("payment should have failed but didn't") } - ferr, ok := err.(*ForwardingError) + rtErr, ok := err.(ClearTextError) if !ok { - t.Fatalf("expected a ForwardingError, instead got: %T", err) + t.Fatalf("expected a ClearTextError, instead got: %T", err) } - switch ferr.FailureMessage.(type) { + switch rtErr.WireMessage().(type) { case *lnwire.FailIncorrectCltvExpiry: default: t.Fatalf("incorrect error, expected incorrect cltv expiry, "+ @@ -730,12 +697,12 @@ func TestLinkForwardFeePolicyMismatch(t *testing.T) { t.Fatalf("payment should have failed but didn't") } - ferr, ok := err.(*ForwardingError) + rtErr, ok := err.(ClearTextError) if !ok { - t.Fatalf("expected a ForwardingError, instead got: %T", err) + t.Fatalf("expected a ClearTextError, instead got: %T", err) } - switch ferr.FailureMessage.(type) { + switch rtErr.WireMessage().(type) { case *lnwire.FailFeeInsufficient: default: t.Fatalf("incorrect error, expected fee insufficient, "+ @@ -788,12 +755,12 @@ func TestLinkForwardMinHTLCPolicyMismatch(t *testing.T) { t.Fatalf("payment should have failed but didn't") } - ferr, ok := err.(*ForwardingError) + rtErr, ok := err.(ClearTextError) if !ok { - t.Fatalf("expected a ForwardingError, instead got: %T", err) + t.Fatalf("expected a ClearTextError, instead got: %T", err) } - switch ferr.FailureMessage.(type) { + switch rtErr.WireMessage().(type) { case *lnwire.FailAmountBelowMinimum: default: t.Fatalf("incorrect error, expected amount below minimum, "+ @@ -855,12 +822,12 @@ func TestLinkForwardMaxHTLCPolicyMismatch(t *testing.T) { t.Fatalf("payment should have failed but didn't") } - ferr, ok := err.(*ForwardingError) + rtErr, ok := err.(ClearTextError) if !ok { - t.Fatalf("expected a ForwardingError, instead got: %T", err) + t.Fatalf("expected a ClearTextError, instead got: %T", err) } - switch ferr.FailureMessage.(type) { + switch rtErr.WireMessage().(type) { case *lnwire.FailTemporaryChannelFailure: default: t.Fatalf("incorrect error, expected temporary channel failure, "+ @@ -917,7 +884,7 @@ func TestUpdateForwardingPolicy(t *testing.T) { if err != nil { t.Fatalf("unable to get invoice: %v", err) } - if invoice.Terms.State != channeldb.ContractSettled { + if invoice.State != channeldb.ContractSettled { t.Fatal("carol invoice haven't been settled") } @@ -962,11 +929,12 @@ func TestUpdateForwardingPolicy(t *testing.T) { t.Fatalf("payment should've been rejected") } - ferr, ok := err.(*ForwardingError) + rtErr, ok := err.(ClearTextError) if !ok { - t.Fatalf("expected a ForwardingError, instead got (%T): %v", err, err) + t.Fatalf("expected a ClearTextError, instead got (%T): %v", err, err) } - switch ferr.FailureMessage.(type) { + + switch rtErr.WireMessage().(type) { case *lnwire.FailFeeInsufficient: default: t.Fatalf("expected FailFeeInsufficient instead got: %v", err) @@ -1001,12 +969,13 @@ func TestUpdateForwardingPolicy(t *testing.T) { t.Fatalf("payment should've been rejected") } - ferr, ok = err.(*ForwardingError) + rtErr, ok = err.(ClearTextError) if !ok { - t.Fatalf("expected a ForwardingError, instead got (%T): %v", + t.Fatalf("expected a ClearTextError, instead got (%T): %v", err, err) } - switch ferr.FailureMessage.(type) { + + switch rtErr.WireMessage().(type) { case *lnwire.FailTemporaryChannelFailure: default: t.Fatalf("expected TemporaryChannelFailure, instead got: %v", @@ -1076,7 +1045,7 @@ func TestChannelLinkMultiHopInsufficientPayment(t *testing.T) { if err != nil { t.Fatalf("unable to get invoice: %v", err) } - if invoice.Terms.State == channeldb.ContractSettled { + if invoice.State == channeldb.ContractSettled { t.Fatal("carol invoice have been settled") } @@ -1246,8 +1215,15 @@ func TestChannelLinkMultiHopUnknownNextHop(t *testing.T) { totalTimelock).Wait(30 * time.Second) if err == nil { t.Fatal("error haven't been received") - } else if err.Error() != lnwire.CodeUnknownNextPeer.String() { - t.Fatalf("wrong error have been received: %v", err) + } + rtErr, ok := err.(ClearTextError) + if !ok { + t.Fatalf("expected ClearTextError") + } + + if _, ok = rtErr.WireMessage().(*lnwire.FailUnknownNextPeer); !ok { + t.Fatalf("wrong error has been received: %T", + rtErr.WireMessage()) } // Wait for Alice to receive the revocation. @@ -1261,7 +1237,7 @@ func TestChannelLinkMultiHopUnknownNextHop(t *testing.T) { if err != nil { t.Fatalf("unable to get invoice: %v", err) } - if invoice.Terms.State == channeldb.ContractSettled { + if invoice.State == channeldb.ContractSettled { t.Fatal("carol invoice have been settled") } @@ -1356,12 +1332,12 @@ func TestChannelLinkMultiHopDecodeError(t *testing.T) { t.Fatal("error haven't been received") } - ferr, ok := err.(*ForwardingError) + rtErr, ok := err.(ClearTextError) if !ok { - t.Fatalf("expected a ForwardingError, instead got: %T", err) + t.Fatalf("expected a ClearTextError, instead got: %T", err) } - switch ferr.FailureMessage.(type) { + switch rtErr.WireMessage().(type) { case *lnwire.FailInvalidOnionVersion: default: t.Fatalf("wrong error have been received: %v", err) @@ -1376,7 +1352,7 @@ func TestChannelLinkMultiHopDecodeError(t *testing.T) { if err != nil { t.Fatalf("unable to get invoice: %v", err) } - if invoice.Terms.State == channeldb.ContractSettled { + if invoice.State == channeldb.ContractSettled { t.Fatal("carol invoice have been settled") } @@ -1448,13 +1424,13 @@ func TestChannelLinkExpiryTooSoonExitNode(t *testing.T) { "time lock value") } - ferr, ok := err.(*ForwardingError) + rtErr, ok := err.(ClearTextError) if !ok { - t.Fatalf("expected a ForwardingError, instead got: %T %v", - err, err) + t.Fatalf("expected a ClearTextError, instead got: %T %v", + rtErr, err) } - switch ferr.FailureMessage.(type) { + switch rtErr.WireMessage().(type) { case *lnwire.FailIncorrectDetails: default: t.Fatalf("expected incorrect_or_unknown_payment_details, "+ @@ -1511,12 +1487,13 @@ func TestChannelLinkExpiryTooSoonMidNode(t *testing.T) { "time lock value") } - ferr, ok := err.(*ForwardingError) + rtErr, ok := err.(ClearTextError) if !ok { - t.Fatalf("expected a ForwardingError, instead got: %T: %v", err, err) + t.Fatalf("expected a ClearTextError, instead got: %T: %v", + rtErr, err) } - switch ferr.FailureMessage.(type) { + switch rtErr.WireMessage().(type) { case *lnwire.FailExpiryTooSoon: default: t.Fatalf("incorrect error, expected final time lock too "+ @@ -1639,9 +1616,7 @@ func (m *mockPeer) AddNewChannel(_ *channeldb.OpenChannel, _ <-chan struct{}) error { return nil } -func (m *mockPeer) WipeChannel(*wire.OutPoint) error { - return nil -} +func (m *mockPeer) WipeChannel(*wire.OutPoint) {} func (m *mockPeer) PubKey() [33]byte { return [33]byte{} } @@ -1651,10 +1626,10 @@ func (m *mockPeer) IdentityKey() *btcec.PublicKey { func (m *mockPeer) Address() net.Addr { return nil } -func (m *mockPeer) LocalGlobalFeatures() *lnwire.FeatureVector { +func (m *mockPeer) LocalFeatures() *lnwire.FeatureVector { return nil } -func (m *mockPeer) RemoteGlobalFeatures() *lnwire.FeatureVector { +func (m *mockPeer) RemoteFeatures() *lnwire.FeatureVector { return nil } @@ -1686,7 +1661,7 @@ func newSingleLinkTestHarness(chanAmt, chanReserve btcutil.Amount) ( quit: make(chan struct{}), } globalPolicy = ForwardingPolicy{ - MinHTLC: lnwire.NewMSatFromSatoshis(5), + MinHTLCOut: lnwire.NewMSatFromSatoshis(5), MaxHTLC: lnwire.NewMSatFromSatoshis(chanAmt), BaseFee: lnwire.NewMSatFromSatoshis(1), TimeLockDelta: 6, @@ -1724,10 +1699,11 @@ func newSingleLinkTestHarness(chanAmt, chanReserve btcutil.Amount) ( UpdateContractSignals: func(*contractcourt.ContractSignals) error { return nil }, - Registry: invoiceRegistry, - ChainEvents: &contractcourt.ChainEventSubscription{}, - BatchTicker: bticker, - FwdPkgGCTicker: ticker.NewForce(15 * time.Second), + Registry: invoiceRegistry, + ChainEvents: &contractcourt.ChainEventSubscription{}, + BatchTicker: bticker, + FwdPkgGCTicker: ticker.NewForce(15 * time.Second), + PendingCommitTicker: ticker.New(time.Minute), // Make the BatchSize and Min/MaxFeeUpdateTimeout large enough // to not trigger commit updates automatically during tests. BatchSize: 10000, @@ -1735,8 +1711,10 @@ func newSingleLinkTestHarness(chanAmt, chanReserve btcutil.Amount) ( MaxFeeUpdateTimeout: 40 * time.Minute, MaxOutgoingCltvExpiry: DefaultMaxOutgoingCltvExpiry, MaxFeeAllocation: DefaultMaxLinkFeeAllocation, + NotifyActiveLink: func(wire.OutPoint) {}, NotifyActiveChannel: func(wire.OutPoint) {}, NotifyInactiveChannel: func(wire.OutPoint) {}, + HtlcNotifier: aliceSwitch.cfg.HtlcNotifier, } aliceLink := NewChannelLink(aliceCfg, aliceLc.channel) @@ -1959,18 +1937,21 @@ func TestChannelLinkBandwidthConsistency(t *testing.T) { // incoming HTLCs automatically. coreLink.cfg.HodlMask = hodl.MaskFromFlags(hodl.ExitSettle) - estimator := lnwallet.NewStaticFeeEstimator(6000, 0) + estimator := chainfee.NewStaticEstimator(6000, 0) feePerKw, err := estimator.EstimateFeePerKW(1) if err != nil { t.Fatalf("unable to query fee estimator: %v", err) } htlcFee := lnwire.NewMSatFromSatoshis( - feePerKw.FeeForWeight(input.HtlcWeight), + feePerKw.FeeForWeight(input.HTLCWeight), ) // The starting bandwidth of the channel should be exactly the amount - // that we created the channel between her and Bob. - expectedBandwidth := lnwire.NewMSatFromSatoshis(chanAmt - defaultCommitFee) + // that we created the channel between her and Bob, minus the + // commitment fee and fee for adding an additional HTLC. + expectedBandwidth := lnwire.NewMSatFromSatoshis( + chanAmt-defaultCommitFee, + ) - htlcFee assertLinkBandwidth(t, aliceLink, expectedBandwidth) // Next, we'll create an HTLC worth 1 BTC, and send it into the link as @@ -2352,227 +2333,6 @@ func TestChannelLinkBandwidthConsistency(t *testing.T) { assertLinkBandwidth(t, aliceLink, aliceStartingBandwidth) } -// TestChannelLinkBandwidthConsistencyOverflow tests that in the case of a -// commitment overflow (no more space for new HTLC's), the bandwidth is updated -// properly as items are being added and removed from the overflow queue. -func TestChannelLinkBandwidthConsistencyOverflow(t *testing.T) { - t.Parallel() - - var mockBlob [lnwire.OnionPacketSize]byte - - const chanAmt = btcutil.SatoshiPerBitcoin * 5 - aliceLink, bobChannel, batchTick, start, cleanUp, _, err := - newSingleLinkTestHarness(chanAmt, 0) - if err != nil { - t.Fatalf("unable to create link: %v", err) - } - defer cleanUp() - - if err := start(); err != nil { - t.Fatalf("unable to start test harness: %v", err) - } - - var ( - coreLink = aliceLink.(*channelLink) - defaultCommitFee = coreLink.channel.StateSnapshot().CommitFee - aliceStartingBandwidth = aliceLink.Bandwidth() - aliceMsgs = coreLink.cfg.Peer.(*mockPeer).sentMsgs - ) - - estimator := lnwallet.NewStaticFeeEstimator(6000, 0) - feePerKw, err := estimator.EstimateFeePerKW(1) - if err != nil { - t.Fatalf("unable to query fee estimator: %v", err) - } - - var htlcID uint64 - addLinkHTLC := func(id uint64, amt lnwire.MilliSatoshi) [32]byte { - invoice, htlc, _, err := generatePayment( - amt, amt, 5, mockBlob, - ) - if err != nil { - t.Fatalf("unable to create payment: %v", err) - } - - addPkt := &htlcPacket{ - htlc: htlc, - incomingHTLCID: id, - amount: amt, - obfuscator: NewMockObfuscator(), - } - circuit := makePaymentCircuit(&htlc.PaymentHash, addPkt) - _, err = coreLink.cfg.Switch.commitCircuits(&circuit) - if err != nil { - t.Fatalf("unable to commit circuit: %v", err) - } - - addPkt.circuit = &circuit - aliceLink.HandleSwitchPacket(addPkt) - return invoice.Terms.PaymentPreimage - } - - // We'll first start by adding enough HTLC's to overflow the commitment - // transaction, checking the reported link bandwidth for proper - // consistency along the way - htlcAmt := lnwire.NewMSatFromSatoshis(100000) - totalHtlcAmt := lnwire.MilliSatoshi(0) - const numHTLCs = input.MaxHTLCNumber / 2 - var preImages [][32]byte - for i := 0; i < numHTLCs; i++ { - preImage := addLinkHTLC(htlcID, htlcAmt) - preImages = append(preImages, preImage) - - totalHtlcAmt += htlcAmt - htlcID++ - } - - // The HTLCs should all be sent to the remote. - var msg lnwire.Message - for i := 0; i < numHTLCs; i++ { - select { - case msg = <-aliceMsgs: - case <-time.After(15 * time.Second): - t.Fatalf("did not receive message %d", i) - } - - addHtlc, ok := msg.(*lnwire.UpdateAddHTLC) - if !ok { - t.Fatalf("expected UpdateAddHTLC, got %T", msg) - } - - _, err := bobChannel.ReceiveHTLC(addHtlc) - if err != nil { - t.Fatalf("bob failed receiving htlc: %v", err) - } - } - - select { - case msg = <-aliceMsgs: - t.Fatalf("unexpected message: %T", msg) - case <-time.After(20 * time.Millisecond): - } - - // TODO(roasbeef): increase sleep - time.Sleep(time.Second * 1) - commitWeight := input.CommitWeight + input.HtlcWeight*numHTLCs - htlcFee := lnwire.NewMSatFromSatoshis( - feePerKw.FeeForWeight(commitWeight), - ) - expectedBandwidth := aliceStartingBandwidth - totalHtlcAmt - htlcFee - expectedBandwidth += lnwire.NewMSatFromSatoshis(defaultCommitFee) - assertLinkBandwidth(t, aliceLink, expectedBandwidth) - - // The overflow queue should be empty at this point, as the commitment - // transaction should be full, but not yet overflown. - if coreLink.overflowQueue.Length() != 0 { - t.Fatalf("wrong overflow queue length: expected %v, got %v", 0, - coreLink.overflowQueue.Length()) - } - - // At this point, the commitment transaction should now be fully - // saturated. We'll continue adding HTLC's, and asserting that the - // bandwidth accounting is done properly. - const numOverFlowHTLCs = 20 - for i := 0; i < numOverFlowHTLCs; i++ { - preImage := addLinkHTLC(htlcID, htlcAmt) - preImages = append(preImages, preImage) - - totalHtlcAmt += htlcAmt - htlcID++ - } - - // No messages should be sent to the remote at this point. - select { - case msg = <-aliceMsgs: - t.Fatalf("unexpected message: %T", msg) - case <-time.After(20 * time.Millisecond): - } - - time.Sleep(time.Second * 2) - expectedBandwidth -= (numOverFlowHTLCs * htlcAmt) - assertLinkBandwidth(t, aliceLink, expectedBandwidth) - - // With the extra HTLC's added, the overflow queue should now be - // populated with our 20 additional HTLC's. - if coreLink.overflowQueue.Length() != numOverFlowHTLCs { - t.Fatalf("wrong overflow queue length: expected %v, got %v", - numOverFlowHTLCs, - coreLink.overflowQueue.Length()) - } - - // We trigger a state update to lock in the HTLCs. This should - // not change Alice's bandwidth. - if err := updateState(batchTick, coreLink, bobChannel, true); err != nil { - t.Fatalf("unable to update state: %v", err) - } - time.Sleep(time.Millisecond * 500) - assertLinkBandwidth(t, aliceLink, expectedBandwidth) - - // At this point, we'll now settle enough HTLCs to empty the overflow - // queue. The resulting bandwidth change should be non-existent as this - // will simply transfer over funds to the remote party. However, the - // size of the overflow queue should be decreasing - for i := 0; i < numOverFlowHTLCs; i++ { - err = bobChannel.SettleHTLC(preImages[i], uint64(i), nil, nil, nil) - if err != nil { - t.Fatalf("unable to settle htlc: %v", err) - } - - htlcSettle := &lnwire.UpdateFulfillHTLC{ - ID: uint64(i), - PaymentPreimage: preImages[i], - } - - aliceLink.HandleChannelUpdate(htlcSettle) - time.Sleep(time.Millisecond * 50) - } - time.Sleep(time.Millisecond * 500) - assertLinkBandwidth(t, aliceLink, expectedBandwidth) - - // We trigger a state update to lock in the Settles. - if err := updateState(batchTick, coreLink, bobChannel, false); err != nil { - t.Fatalf("unable to update state: %v", err) - } - - // After the state update is done, Alice should start sending - // HTLCs from the overflow queue. - for i := 0; i < numOverFlowHTLCs; i++ { - var msg lnwire.Message - select { - case msg = <-aliceMsgs: - case <-time.After(15 * time.Second): - t.Fatalf("did not receive message") - } - - addHtlc, ok := msg.(*lnwire.UpdateAddHTLC) - if !ok { - t.Fatalf("expected UpdateAddHTLC, got %T", msg) - } - - _, err := bobChannel.ReceiveHTLC(addHtlc) - if err != nil { - t.Fatalf("bob failed receiving htlc: %v", err) - } - } - - select { - case msg = <-aliceMsgs: - t.Fatalf("unexpected message: %T", msg) - case <-time.After(20 * time.Millisecond): - } - - assertLinkBandwidth(t, aliceLink, expectedBandwidth) - - // Finally, at this point, the queue itself should be fully empty. As - // enough slots have been drained from the commitment transaction to - // allocate the queue items to. - time.Sleep(time.Millisecond * 500) - if coreLink.overflowQueue.Length() != 0 { - t.Fatalf("wrong overflow queue length: expected %v, got %v", 0, - coreLink.overflowQueue.Length()) - } -} - // genAddsAndCircuits creates `numHtlcs` sequential ADD packets and there // corresponding circuits. The provided `htlc` is used in all test packets. func genAddsAndCircuits(numHtlcs int, htlc *lnwire.UpdateAddHTLC) ( @@ -2630,7 +2390,7 @@ func TestChannelLinkTrimCircuitsPending(t *testing.T) { // Compute the static fees that will be used to determine the // correctness of Alice's bandwidth when forwarding HTLCs. - estimator := lnwallet.NewStaticFeeEstimator(6000, 0) + estimator := chainfee.NewStaticEstimator(6000, 0) feePerKw, err := estimator.EstimateFeePerKW(1) if err != nil { t.Fatalf("unable to query fee estimator: %v", err) @@ -2638,13 +2398,15 @@ func TestChannelLinkTrimCircuitsPending(t *testing.T) { defaultCommitFee := alice.channel.StateSnapshot().CommitFee htlcFee := lnwire.NewMSatFromSatoshis( - feePerKw.FeeForWeight(input.HtlcWeight), + feePerKw.FeeForWeight(input.HTLCWeight), ) // The starting bandwidth of the channel should be exactly the amount // that we created the channel between her and Bob, minus the commitment - // fee. - expectedBandwidth := lnwire.NewMSatFromSatoshis(chanAmt - defaultCommitFee) + // fee and fee of adding an HTLC. + expectedBandwidth := lnwire.NewMSatFromSatoshis( + chanAmt-defaultCommitFee, + ) - htlcFee assertLinkBandwidth(t, alice.link, expectedBandwidth) // Capture Alice's starting bandwidth to perform later, relative @@ -2909,7 +2671,7 @@ func TestChannelLinkTrimCircuitsNoCommit(t *testing.T) { // Compute the static fees that will be used to determine the // correctness of Alice's bandwidth when forwarding HTLCs. - estimator := lnwallet.NewStaticFeeEstimator(6000, 0) + estimator := chainfee.NewStaticEstimator(6000, 0) feePerKw, err := estimator.EstimateFeePerKW(1) if err != nil { t.Fatalf("unable to query fee estimator: %v", err) @@ -2917,13 +2679,15 @@ func TestChannelLinkTrimCircuitsNoCommit(t *testing.T) { defaultCommitFee := alice.channel.StateSnapshot().CommitFee htlcFee := lnwire.NewMSatFromSatoshis( - feePerKw.FeeForWeight(input.HtlcWeight), + feePerKw.FeeForWeight(input.HTLCWeight), ) // The starting bandwidth of the channel should be exactly the amount // that we created the channel between her and Bob, minus the commitment - // fee. - expectedBandwidth := lnwire.NewMSatFromSatoshis(chanAmt - defaultCommitFee) + // fee and fee for adding an additional HTLC. + expectedBandwidth := lnwire.NewMSatFromSatoshis( + chanAmt-defaultCommitFee, + ) - htlcFee assertLinkBandwidth(t, alice.link, expectedBandwidth) // Capture Alice's starting bandwidth to perform later, relative @@ -3133,8 +2897,163 @@ func TestChannelLinkTrimCircuitsNoCommit(t *testing.T) { t.Fatalf("expected %d packet to be failed", halfHtlcs) } - // Alice balance should not have changed since the start. - assertLinkBandwidth(t, alice.link, aliceStartingBandwidth) + // Alice balance should not have changed since the start. + assertLinkBandwidth(t, alice.link, aliceStartingBandwidth) +} + +// TestChannelLinkTrimCircuitsRemoteCommit checks that the switch and link +// don't trim circuits if the ADD is locked in on the remote commitment but +// not on our local commitment. +func TestChannelLinkTrimCircuitsRemoteCommit(t *testing.T) { + t.Parallel() + + const ( + chanAmt = btcutil.SatoshiPerBitcoin * 5 + numHtlcs = 2 + ) + + // We'll start by creating a new link with our chanAmt (5 BTC). + aliceLink, bobChan, batchTicker, start, cleanUp, restore, err := + newSingleLinkTestHarness(chanAmt, 0) + if err != nil { + t.Fatalf("unable to create link: %v", err) + } + + if err := start(); err != nil { + t.Fatalf("unable to start test harness: %v", err) + } + defer cleanUp() + + alice := newPersistentLinkHarness( + t, aliceLink, batchTicker, restore, + ) + + // Compute the static fees that will be used to determine the + // correctness of Alice's bandwidth when forwarding HTLCs. + estimator := chainfee.NewStaticEstimator(6000, 0) + feePerKw, err := estimator.EstimateFeePerKW(1) + if err != nil { + t.Fatalf("unable to query fee estimator: %v", err) + } + + defaultCommitFee := alice.channel.StateSnapshot().CommitFee + htlcFee := lnwire.NewMSatFromSatoshis( + feePerKw.FeeForWeight(input.HTLCWeight), + ) + + // The starting bandwidth of the channel should be exactly the amount + // that we created the channel between her and Bob, minus the commitment + // fee and fee of adding an HTLC. + expectedBandwidth := lnwire.NewMSatFromSatoshis( + chanAmt-defaultCommitFee, + ) - htlcFee + assertLinkBandwidth(t, alice.link, expectedBandwidth) + + // Capture Alice's starting bandwidth to perform later, relative + // bandwidth assertions. + aliceStartingBandwidth := alice.link.Bandwidth() + + // Next, we'll create an HTLC worth 1 BTC that will be used as a dummy + // message for the test. + var mockBlob [lnwire.OnionPacketSize]byte + htlcAmt := lnwire.NewMSatFromSatoshis(btcutil.SatoshiPerBitcoin) + _, htlc, _, err := generatePayment(htlcAmt, htlcAmt, 5, mockBlob) + if err != nil { + t.Fatalf("unable to create payment: %v", err) + } + + // Create `numHtlc` htlcPackets and payment circuits that will be used + // to drive the test. All of the packets will use the same dummy HTLC. + addPkts, circuits := genAddsAndCircuits(numHtlcs, htlc) + + // To begin the test, start by committing the circuits for our first two + // HTLCs. + fwdActions := alice.commitCircuits(circuits) + + // Both of these circuits should have successfully added, as this is the + // first attempt to send them. + if len(fwdActions.Adds) != numHtlcs { + t.Fatalf("expected %d circuits to be added", numHtlcs) + } + alice.assertNumPendingNumOpenCircuits(2, 0) + + // Since both were committed successfully, we will now deliver them to + // Alice's link. + for _, addPkt := range addPkts { + if err := alice.link.HandleSwitchPacket(addPkt); err != nil { + t.Fatalf("unable to handle switch packet: %v", err) + } + } + + // Wait until Alice's link has sent both HTLCs via the peer. + alice.checkSent(addPkts) + + // Pass both of the htlcs to Bob. + for i, addPkt := range addPkts { + pkt, ok := addPkt.htlc.(*lnwire.UpdateAddHTLC) + if !ok { + t.Fatalf("unable to add packet") + } + + pkt.ID = uint64(i) + + _, err := bobChan.ReceiveHTLC(pkt) + if err != nil { + t.Fatalf("unable to receive htlc: %v", err) + } + } + + // The resulting bandwidth should reflect that Alice is paying both + // htlc amounts, in addition to both htlc fees. + assertLinkBandwidth(t, alice.link, + aliceStartingBandwidth-numHtlcs*(htlcAmt+htlcFee), + ) + + // Now, initiate a state transition by Alice so that the pending HTLCs + // are locked in. + alice.trySignNextCommitment() + alice.assertNumPendingNumOpenCircuits(2, 2) + + select { + case aliceMsg := <-alice.msgs: + // Pass the commitment signature to Bob. + sig, ok := aliceMsg.(*lnwire.CommitSig) + if !ok { + t.Fatalf("alice did not send commitment signature") + } + + err := bobChan.ReceiveNewCommitment(sig.CommitSig, sig.HtlcSigs) + if err != nil { + t.Fatalf("unable to receive new commitment: %v", err) + } + case <-time.After(time.Second): + } + + // Next, revoke Bob's current commitment and send it to Alice so that we + // can test that Alice's circuits aren't trimmed. + rev, _, err := bobChan.RevokeCurrentCommitment() + if err != nil { + t.Fatalf("unable to revoke current commitment: %v", err) + } + + _, _, _, _, err = alice.channel.ReceiveRevocation(rev) + if err != nil { + t.Fatalf("unable to receive revocation: %v", err) + } + + // Restart Alice's link, which simulates a disconnection with the remote + // peer. + cleanUp = alice.restart(false) + defer cleanUp() + + alice.assertNumPendingNumOpenCircuits(2, 2) + + // Restart the link + switch and check that the number of open circuits + // doesn't change. + cleanUp = alice.restart(true) + defer cleanUp() + + alice.assertNumPendingNumOpenCircuits(2, 2) } // TestChannelLinkBandwidthChanReserve checks that the bandwidth available @@ -3167,20 +3086,20 @@ func TestChannelLinkBandwidthChanReserve(t *testing.T) { aliceMsgs = coreLink.cfg.Peer.(*mockPeer).sentMsgs ) - estimator := lnwallet.NewStaticFeeEstimator(6000, 0) + estimator := chainfee.NewStaticEstimator(6000, 0) feePerKw, err := estimator.EstimateFeePerKW(1) if err != nil { t.Fatalf("unable to query fee estimator: %v", err) } htlcFee := lnwire.NewMSatFromSatoshis( - feePerKw.FeeForWeight(input.HtlcWeight), + feePerKw.FeeForWeight(input.HTLCWeight), ) // The starting bandwidth of the channel should be exactly the amount // that we created the channel between her and Bob, minus the channel - // reserve. + // reserve, commitment fee and fee for adding an additional HTLC. expectedBandwidth := lnwire.NewMSatFromSatoshis( - chanAmt - defaultCommitFee - chanReserve) + chanAmt-defaultCommitFee-chanReserve) - htlcFee assertLinkBandwidth(t, aliceLink, expectedBandwidth) // Next, we'll create an HTLC worth 3 BTC, and send it into the link as @@ -3510,7 +3429,7 @@ func TestChannelRetransmission(t *testing.T) { err = errors.Errorf("unable to get invoice: %v", err) continue } - if invoice.Terms.State != channeldb.ContractSettled { + if invoice.State != channeldb.ContractSettled { err = errors.Errorf("alice invoice haven't been settled") continue } @@ -3554,8 +3473,8 @@ func TestChannelRetransmission(t *testing.T) { // deviates from our current fee by more 10% or more. func TestShouldAdjustCommitFee(t *testing.T) { tests := []struct { - netFee lnwallet.SatPerKWeight - chanFee lnwallet.SatPerKWeight + netFee chainfee.SatPerKWeight + chanFee chainfee.SatPerKWeight shouldAdjust bool }{ @@ -3837,7 +3756,7 @@ func TestChannelLinkUpdateCommitFee(t *testing.T) { // triggerFeeUpdate is a helper closure to determine whether a fee // update was triggered and completed properly. - triggerFeeUpdate := func(feeEstimate, newFeeRate lnwallet.SatPerKWeight, + triggerFeeUpdate := func(feeEstimate, newFeeRate chainfee.SatPerKWeight, shouldUpdate bool) { t.Helper() @@ -3898,7 +3817,7 @@ func TestChannelLinkUpdateCommitFee(t *testing.T) { // Triggering the link to update the fee of the channel with a fee rate // that exceeds its maximum fee allocation should result in a fee rate // corresponding to the maximum fee allocation. - const maxFeeRate lnwallet.SatPerKWeight = 207182320 + const maxFeeRate chainfee.SatPerKWeight = 207182320 triggerFeeUpdate(maxFeeRate+1, maxFeeRate, true) } @@ -4047,7 +3966,7 @@ func TestChannelLinkAcceptOverpay(t *testing.T) { if err != nil { t.Fatalf("unable to get invoice: %v", err) } - if invoice.Terms.State != channeldb.ContractSettled { + if invoice.State != channeldb.ContractSettled { t.Fatal("carol invoice haven't been settled") } @@ -4245,7 +4164,7 @@ func (h *persistentLinkHarness) restartLink( } globalPolicy = ForwardingPolicy{ - MinHTLC: lnwire.NewMSatFromSatoshis(5), + MinHTLCOut: lnwire.NewMSatFromSatoshis(5), BaseFee: lnwire.NewMSatFromSatoshis(1), TimeLockDelta: 6, } @@ -4285,10 +4204,11 @@ func (h *persistentLinkHarness) restartLink( UpdateContractSignals: func(*contractcourt.ContractSignals) error { return nil }, - Registry: h.coreLink.cfg.Registry, - ChainEvents: &contractcourt.ChainEventSubscription{}, - BatchTicker: bticker, - FwdPkgGCTicker: ticker.New(5 * time.Second), + Registry: h.coreLink.cfg.Registry, + ChainEvents: &contractcourt.ChainEventSubscription{}, + BatchTicker: bticker, + FwdPkgGCTicker: ticker.New(5 * time.Second), + PendingCommitTicker: ticker.New(time.Minute), // Make the BatchSize and Min/MaxFeeUpdateTimeout large enough // to not trigger commit updates automatically during tests. BatchSize: 10000, @@ -4298,8 +4218,10 @@ func (h *persistentLinkHarness) restartLink( HodlMask: hodl.MaskFromFlags(hodlFlags...), MaxOutgoingCltvExpiry: DefaultMaxOutgoingCltvExpiry, MaxFeeAllocation: DefaultMaxLinkFeeAllocation, + NotifyActiveLink: func(wire.OutPoint) {}, NotifyActiveChannel: func(wire.OutPoint) {}, NotifyInactiveChannel: func(wire.OutPoint) {}, + HtlcNotifier: aliceSwitch.cfg.HtlcNotifier, } aliceLink := NewChannelLink(aliceCfg, aliceChannel) @@ -4326,7 +4248,7 @@ func (h *persistentLinkHarness) restartLink( // gnerateHtlc generates a simple payment from Bob to Alice. func generateHtlc(t *testing.T, coreLink *channelLink, - bobChannel *lnwallet.LightningChannel, id uint64) *lnwire.UpdateAddHTLC { + id uint64) *lnwire.UpdateAddHTLC { t.Helper() @@ -4353,13 +4275,13 @@ func generateHtlcAndInvoice(t *testing.T, htlcAmt := lnwire.NewMSatFromSatoshis(10000) htlcExpiry := testStartingHeight + testInvoiceCltvExpiry - hops := []hop.ForwardingInfo{ - { - Network: hop.BitcoinNetwork, - NextHop: hop.Exit, - AmountToForward: htlcAmt, - OutgoingCTLV: uint32(htlcExpiry), - }, + hops := []*hop.Payload{ + hop.NewLegacyPayload(&sphinx.HopData{ + Realm: [1]byte{}, // hop.BitcoinNetwork + NextAddress: [8]byte{}, // hop.Exit, + ForwardAmount: uint64(htlcAmt), + OutgoingCltv: uint32(htlcExpiry), + }), } blob, err := generateRoute(hops...) if err != nil { @@ -4402,8 +4324,8 @@ func TestChannelLinkNoMoreUpdates(t *testing.T) { ) // Add two HTLCs to Alice's registry, that Bob can pay. - htlc1 := generateHtlc(t, coreLink, bobChannel, 0) - htlc2 := generateHtlc(t, coreLink, bobChannel, 1) + htlc1 := generateHtlc(t, coreLink, 0) + htlc2 := generateHtlc(t, coreLink, 1) ctx := linkTestContext{ t: t, @@ -4484,15 +4406,24 @@ func checkHasPreimages(t *testing.T, coreLink *channelLink, t.Helper() - for i := range htlcs { - _, ok := coreLink.cfg.PreimageCache.LookupPreimage( - htlcs[i].PaymentHash, - ) - if ok != expOk { - t.Fatalf("expected to find witness: %v, "+ + err := wait.NoError(func() error { + for i := range htlcs { + _, ok := coreLink.cfg.PreimageCache.LookupPreimage( + htlcs[i].PaymentHash, + ) + if ok == expOk { + continue + } + + return fmt.Errorf("expected to find witness: %v, "+ "got %v for hash=%x", expOk, ok, htlcs[i].PaymentHash) } + + return nil + }, 5*time.Second) + if err != nil { + t.Fatalf("unable to find preimages: %v", err) } } @@ -4524,7 +4455,7 @@ func TestChannelLinkWaitForRevocation(t *testing.T) { numHtlcs := 10 var htlcs []*lnwire.UpdateAddHTLC for i := 0; i < numHtlcs; i++ { - htlc := generateHtlc(t, coreLink, bobChannel, uint64(i)) + htlc := generateHtlc(t, coreLink, uint64(i)) htlcs = append(htlcs, htlc) } @@ -4618,6 +4549,91 @@ func TestChannelLinkWaitForRevocation(t *testing.T) { assertNoMsgFromAlice() } +// TestChannelLinkNoEmptySig asserts that no empty commit sig message is sent +// when the commitment txes are out of sync. +func TestChannelLinkNoEmptySig(t *testing.T) { + t.Parallel() + + const chanAmt = btcutil.SatoshiPerBitcoin * 5 + const chanReserve = btcutil.SatoshiPerBitcoin * 1 + aliceLink, bobChannel, batchTicker, start, cleanUp, _, err := + newSingleLinkTestHarness(chanAmt, chanReserve) + if err != nil { + t.Fatalf("unable to create link: %v", err) + } + defer cleanUp() + + if err := start(); err != nil { + t.Fatalf("unable to start test harness: %v", err) + } + defer aliceLink.Stop() + + var ( + coreLink = aliceLink.(*channelLink) + aliceMsgs = coreLink.cfg.Peer.(*mockPeer).sentMsgs + ) + + ctx := linkTestContext{ + t: t, + aliceLink: aliceLink, + aliceMsgs: aliceMsgs, + bobChannel: bobChannel, + } + + // Send htlc 1 from Alice to Bob. + htlc1, _ := generateHtlcAndInvoice(t, 0) + ctx.sendHtlcAliceToBob(0, htlc1) + ctx.receiveHtlcAliceToBob() + + // Tick the batch ticker to trigger a commitsig from Alice->Bob. + select { + case batchTicker <- time.Now(): + case <-time.After(5 * time.Second): + t.Fatalf("could not force commit sig") + } + + // Receive a CommitSig from Alice covering the Add from above. + ctx.receiveCommitSigAliceToBob(1) + + // Bob revokes previous commitment tx. + ctx.sendRevAndAckBobToAlice() + + // Alice sends htlc 2 to Bob. + htlc2, _ := generateHtlcAndInvoice(t, 0) + ctx.sendHtlcAliceToBob(1, htlc2) + ctx.receiveHtlcAliceToBob() + + // Tick the batch ticker to trigger a commitsig from Alice->Bob. + select { + case batchTicker <- time.Now(): + case <-time.After(5 * time.Second): + t.Fatalf("could not force commit sig") + } + + // Get the commit sig from Alice, but don't send it to Bob yet. + commitSigAlice := ctx.receiveCommitSigAlice(2) + + // Bob adds htlc 1 to its remote commit tx. + ctx.sendCommitSigBobToAlice(1) + + // Now send Bob the signature from Alice covering both htlcs. + err = bobChannel.ReceiveNewCommitment( + commitSigAlice.CommitSig, commitSigAlice.HtlcSigs, + ) + if err != nil { + t.Fatalf("bob failed receiving commitment: %v", err) + } + + // Both Alice and Bob revoke their previous commitment txes. + ctx.receiveRevAndAckAliceToBob() + ctx.sendRevAndAckBobToAlice() + + // The commit txes are not in sync, but it is Bob's turn to send a new + // signature. We don't expect Alice to send out any message. This check + // allows some time for the log commit ticker to trigger for Alice. + ctx.assertNoMsgFromAlice(time.Second) +} + // TestChannelLinkBatchPreimageWrite asserts that a link will batch preimage // writes when just as it receives a CommitSig to lock in any Settles, and also // if the link is aware of any uncommitted preimages if the link is stopped, @@ -4782,8 +4798,8 @@ func TestChannelLinkCleanupSpuriousResponses(t *testing.T) { coreLink.cfg.HodlMask = hodl.ExitSettle.Mask() // Add two HTLCs to Alice's registry, that Bob can pay. - htlc1 := generateHtlc(t, coreLink, bobChannel, 0) - htlc2 := generateHtlc(t, coreLink, bobChannel, 1) + htlc1 := generateHtlc(t, coreLink, 0) + htlc2 := generateHtlc(t, coreLink, 1) ctx := linkTestContext{ t: t, @@ -5063,32 +5079,32 @@ type mockPackager struct { failLoadFwdPkgs bool } -func (*mockPackager) AddFwdPkg(tx *bbolt.Tx, fwdPkg *channeldb.FwdPkg) error { +func (*mockPackager) AddFwdPkg(tx kvdb.RwTx, fwdPkg *channeldb.FwdPkg) error { return nil } -func (*mockPackager) SetFwdFilter(tx *bbolt.Tx, height uint64, +func (*mockPackager) SetFwdFilter(tx kvdb.RwTx, height uint64, fwdFilter *channeldb.PkgFilter) error { return nil } -func (*mockPackager) AckAddHtlcs(tx *bbolt.Tx, +func (*mockPackager) AckAddHtlcs(tx kvdb.RwTx, addRefs ...channeldb.AddRef) error { return nil } -func (m *mockPackager) LoadFwdPkgs(tx *bbolt.Tx) ([]*channeldb.FwdPkg, error) { +func (m *mockPackager) LoadFwdPkgs(tx kvdb.ReadTx) ([]*channeldb.FwdPkg, error) { if m.failLoadFwdPkgs { return nil, fmt.Errorf("failing LoadFwdPkgs") } return nil, nil } -func (*mockPackager) RemovePkg(tx *bbolt.Tx, height uint64) error { +func (*mockPackager) RemovePkg(tx kvdb.RwTx, height uint64) error { return nil } -func (*mockPackager) AckSettleFails(tx *bbolt.Tx, +func (*mockPackager) AckSettleFails(tx kvdb.RwTx, settleFailRefs ...channeldb.SettleFailRef) error { return nil } @@ -5168,7 +5184,7 @@ func TestChannelLinkFail(t *testing.T) { func(t *testing.T, c *channelLink, remoteChannel *lnwallet.LightningChannel) { // Generate an HTLC and send to the link. - htlc1 := generateHtlc(t, c, remoteChannel, 0) + htlc1 := generateHtlc(t, c, 0) ctx := linkTestContext{ t: t, aliceLink: c, @@ -5205,7 +5221,7 @@ func TestChannelLinkFail(t *testing.T) { func(t *testing.T, c *channelLink, remoteChannel *lnwallet.LightningChannel) { // Generate an HTLC and send to the link. - htlc1 := generateHtlc(t, c, remoteChannel, 0) + htlc1 := generateHtlc(t, c, 0) ctx := linkTestContext{ t: t, aliceLink: c, @@ -5396,9 +5412,9 @@ func TestForwardingAsymmetricTimeLockPolicies(t *testing.T) { } } -// TestHtlcSatisfyPolicy tests that a link is properly enforcing the HTLC +// TestCheckHtlcForward tests that a link is properly enforcing the HTLC // forwarding policy. -func TestHtlcSatisfyPolicy(t *testing.T) { +func TestCheckHtlcForward(t *testing.T) { fetchLastChannelUpdate := func(lnwire.ShortChannelID) ( *lnwire.ChannelUpdate, error) { @@ -5406,23 +5422,35 @@ func TestHtlcSatisfyPolicy(t *testing.T) { return &lnwire.ChannelUpdate{}, nil } + testChannel, _, fCleanUp, err := createTestChannel( + alicePrivKey, bobPrivKey, 100000, 100000, + 1000, 1000, lnwire.ShortChannelID{}, + ) + if err != nil { + t.Fatal(err) + } + defer fCleanUp() + link := channelLink{ cfg: ChannelLinkConfig{ FwrdingPolicy: ForwardingPolicy{ TimeLockDelta: 20, - MinHTLC: 500, + MinHTLCOut: 500, MaxHTLC: 1000, BaseFee: 10, }, FetchLastChannelUpdate: fetchLastChannelUpdate, MaxOutgoingCltvExpiry: DefaultMaxOutgoingCltvExpiry, + HtlcNotifier: &mockHTLCNotifier{}, }, + log: log, + channel: testChannel.channel, } var hash [32]byte t.Run("satisfied", func(t *testing.T) { - result := link.HtlcSatifiesPolicy(hash, 1500, 1000, + result := link.CheckHtlcForward(hash, 1500, 1000, 200, 150, 0) if result != nil { t.Fatalf("expected policy to be satisfied") @@ -5430,41 +5458,41 @@ func TestHtlcSatisfyPolicy(t *testing.T) { }) t.Run("below minhtlc", func(t *testing.T) { - result := link.HtlcSatifiesPolicy(hash, 100, 50, + result := link.CheckHtlcForward(hash, 100, 50, 200, 150, 0) - if _, ok := result.(*lnwire.FailAmountBelowMinimum); !ok { + if _, ok := result.WireMessage().(*lnwire.FailAmountBelowMinimum); !ok { t.Fatalf("expected FailAmountBelowMinimum failure code") } }) t.Run("above maxhtlc", func(t *testing.T) { - result := link.HtlcSatifiesPolicy(hash, 1500, 1200, + result := link.CheckHtlcForward(hash, 1500, 1200, 200, 150, 0) - if _, ok := result.(*lnwire.FailTemporaryChannelFailure); !ok { + if _, ok := result.WireMessage().(*lnwire.FailTemporaryChannelFailure); !ok { t.Fatalf("expected FailTemporaryChannelFailure failure code") } }) t.Run("insufficient fee", func(t *testing.T) { - result := link.HtlcSatifiesPolicy(hash, 1005, 1000, + result := link.CheckHtlcForward(hash, 1005, 1000, 200, 150, 0) - if _, ok := result.(*lnwire.FailFeeInsufficient); !ok { + if _, ok := result.WireMessage().(*lnwire.FailFeeInsufficient); !ok { t.Fatalf("expected FailFeeInsufficient failure code") } }) t.Run("expiry too soon", func(t *testing.T) { - result := link.HtlcSatifiesPolicy(hash, 1500, 1000, + result := link.CheckHtlcForward(hash, 1500, 1000, 200, 150, 190) - if _, ok := result.(*lnwire.FailExpiryTooSoon); !ok { + if _, ok := result.WireMessage().(*lnwire.FailExpiryTooSoon); !ok { t.Fatalf("expected FailExpiryTooSoon failure code") } }) t.Run("incorrect cltv expiry", func(t *testing.T) { - result := link.HtlcSatifiesPolicy(hash, 1500, 1000, + result := link.CheckHtlcForward(hash, 1500, 1000, 200, 190, 0) - if _, ok := result.(*lnwire.FailIncorrectCltvExpiry); !ok { + if _, ok := result.WireMessage().(*lnwire.FailIncorrectCltvExpiry); !ok { t.Fatalf("expected FailIncorrectCltvExpiry failure code") } @@ -5472,9 +5500,9 @@ func TestHtlcSatisfyPolicy(t *testing.T) { t.Run("cltv expiry too far in the future", func(t *testing.T) { // Check that expiry isn't too far in the future. - result := link.HtlcSatifiesPolicy(hash, 1500, 1000, + result := link.CheckHtlcForward(hash, 1500, 1000, 10200, 10100, 0) - if _, ok := result.(*lnwire.FailExpiryTooFar); !ok { + if _, ok := result.WireMessage().(*lnwire.FailExpiryTooFar); !ok { t.Fatalf("expected FailExpiryTooFar failure code") } }) @@ -5527,11 +5555,11 @@ func TestChannelLinkCanceledInvoice(t *testing.T) { // Because the invoice is canceled, we expect an unknown payment hash // result. - fErr, ok := err.(*ForwardingError) + rtErr, ok := err.(ClearTextError) if !ok { - t.Fatalf("expected ForwardingError, but got %v", err) + t.Fatalf("expected ClearTextError, but got %v", err) } - _, ok = fErr.FailureMessage.(*lnwire.FailIncorrectDetails) + _, ok = rtErr.WireMessage().(*lnwire.FailIncorrectDetails) if !ok { t.Fatalf("expected unknown payment hash, but got %v", err) } @@ -5803,6 +5831,95 @@ func TestChannelLinkHoldInvoiceRestart(t *testing.T) { } } +// TestChannelLinkRevocationWindowRegular asserts that htlcs paying to a regular +// invoice are settled even if the revocation window gets exhausted. +func TestChannelLinkRevocationWindowRegular(t *testing.T) { + t.Parallel() + + const ( + chanAmt = btcutil.SatoshiPerBitcoin * 5 + ) + + // We'll start by creating a new link with our chanAmt (5 BTC). We will + // only be testing Alice's behavior, so the reference to Bob's channel + // state is unnecessary. + aliceLink, bobChannel, _, start, cleanUp, _, err := + newSingleLinkTestHarness(chanAmt, 0) + if err != nil { + t.Fatalf("unable to create link: %v", err) + } + defer cleanUp() + + if err := start(); err != nil { + t.Fatalf("unable to start test harness: %v", err) + } + defer aliceLink.Stop() + + var ( + coreLink = aliceLink.(*channelLink) + registry = coreLink.cfg.Registry.(*mockInvoiceRegistry) + aliceMsgs = coreLink.cfg.Peer.(*mockPeer).sentMsgs + ) + + ctx := linkTestContext{ + t: t, + aliceLink: aliceLink, + aliceMsgs: aliceMsgs, + bobChannel: bobChannel, + } + + registry.settleChan = make(chan lntypes.Hash) + + htlc1, invoice1 := generateHtlcAndInvoice(t, 0) + htlc2, invoice2 := generateHtlcAndInvoice(t, 1) + + // We must add the invoice to the registry, such that Alice + // expects this payment. + err = registry.AddInvoice(*invoice1, htlc1.PaymentHash) + if err != nil { + t.Fatalf("unable to add invoice to registry: %v", err) + } + err = registry.AddInvoice(*invoice2, htlc2.PaymentHash) + if err != nil { + t.Fatalf("unable to add invoice to registry: %v", err) + } + + // Lock in htlc 1 on both sides. + ctx.sendHtlcBobToAlice(htlc1) + ctx.sendCommitSigBobToAlice(1) + ctx.receiveRevAndAckAliceToBob() + ctx.receiveCommitSigAliceToBob(1) + ctx.sendRevAndAckBobToAlice() + + // We expect a call to the invoice registry to notify the arrival of the + // htlc. + select { + case <-registry.settleChan: + case <-time.After(5 * time.Second): + t.Fatal("expected invoice to be settled") + } + + // Expect alice to send a settle and commitsig message to bob. Bob does + // not yet send the revocation. + ctx.receiveSettleAliceToBob() + ctx.receiveCommitSigAliceToBob(0) + + // Pay invoice 2. + ctx.sendHtlcBobToAlice(htlc2) + ctx.sendCommitSigBobToAlice(2) + ctx.receiveRevAndAckAliceToBob() + + // At this point, Alice cannot send a new commit sig to bob because the + // revocation window is exhausted. + + // Bob sends revocation and signs commit with htlc1 settled. + ctx.sendRevAndAckBobToAlice() + + // After the revocation, it is again possible for Alice to send a commit + // sig with htlc2. + ctx.receiveCommitSigAliceToBob(1) +} + // TestChannelLinkRevocationWindowHodl asserts that htlcs paying to a hodl // invoice are settled even if the revocation window gets exhausted. func TestChannelLinkRevocationWindowHodl(t *testing.T) { @@ -5948,16 +6065,172 @@ func TestChannelLinkRevocationWindowHodl(t *testing.T) { } } -// assertFailureCode asserts that an error is of type ForwardingError and that +// TestChannelLinkReceiveEmptySig tests the response of the link to receiving an +// empty commit sig. This should be tolerated, but we shouldn't send out an +// empty sig ourselves. +func TestChannelLinkReceiveEmptySig(t *testing.T) { + t.Parallel() + + const chanAmt = btcutil.SatoshiPerBitcoin * 5 + const chanReserve = btcutil.SatoshiPerBitcoin * 1 + aliceLink, bobChannel, batchTicker, start, cleanUp, _, err := + newSingleLinkTestHarness(chanAmt, chanReserve) + if err != nil { + t.Fatalf("unable to create link: %v", err) + } + defer cleanUp() + + if err := start(); err != nil { + t.Fatalf("unable to start test harness: %v", err) + } + + var ( + coreLink = aliceLink.(*channelLink) + aliceMsgs = coreLink.cfg.Peer.(*mockPeer).sentMsgs + ) + + ctx := linkTestContext{ + t: t, + aliceLink: aliceLink, + aliceMsgs: aliceMsgs, + bobChannel: bobChannel, + } + + htlc, _ := generateHtlcAndInvoice(t, 0) + + // First, send an Add from Alice to Bob. + ctx.sendHtlcAliceToBob(0, htlc) + ctx.receiveHtlcAliceToBob() + + // Tick the batch ticker to trigger a commitsig from Alice->Bob. + select { + case batchTicker <- time.Now(): + case <-time.After(5 * time.Second): + t.Fatalf("could not force commit sig") + } + + // Make Bob send a CommitSig. Since Bob hasn't received Alice's sig, he + // cannot add the htlc to his remote tx yet. The commit sig that we + // force Bob to send will be empty. Note that this normally does not + // happen, because the link (which is not present for Bob in this test) + // check whether Bob actually owes a sig first. + ctx.sendCommitSigBobToAlice(0) + + // Receive a CommitSig from Alice covering the htlc from above. + ctx.receiveCommitSigAliceToBob(1) + + // Wait for RevokeAndAck Alice->Bob. Even though Bob sent an empty + // commit sig, Alice still needs to revoke the previous commitment tx. + ctx.receiveRevAndAckAliceToBob() + + // Send RevokeAndAck Bob->Alice to ack the added htlc. + ctx.sendRevAndAckBobToAlice() + + // We received an empty commit sig, we accepted it, but there is nothing + // new to sign for us. + + // No other messages are expected. + ctx.assertNoMsgFromAlice(time.Second) + + // Stop the link + aliceLink.Stop() +} + +// TestPendingCommitTicker tests that a link will fail itself after a timeout if +// the commitment dance stalls out. +func TestPendingCommitTicker(t *testing.T) { + t.Parallel() + + const chanAmt = btcutil.SatoshiPerBitcoin * 5 + const chanReserve = btcutil.SatoshiPerBitcoin * 1 + aliceLink, bobChannel, batchTicker, start, cleanUp, _, err := + newSingleLinkTestHarness(chanAmt, chanReserve) + if err != nil { + t.Fatalf("unable to create link: %v", err) + } + + var ( + coreLink = aliceLink.(*channelLink) + aliceMsgs = coreLink.cfg.Peer.(*mockPeer).sentMsgs + ) + + coreLink.cfg.PendingCommitTicker = ticker.NewForce(time.Millisecond) + + linkErrs := make(chan LinkFailureError) + coreLink.cfg.OnChannelFailure = func(_ lnwire.ChannelID, + _ lnwire.ShortChannelID, linkErr LinkFailureError) { + + linkErrs <- linkErr + } + + if err := start(); err != nil { + t.Fatalf("unable to start test harness: %v", err) + } + defer cleanUp() + + ctx := linkTestContext{ + t: t, + aliceLink: aliceLink, + bobChannel: bobChannel, + aliceMsgs: aliceMsgs, + } + + // Send an HTLC from Alice to Bob, and signal the batch ticker to signa + // a commitment. + htlc, _ := generateHtlcAndInvoice(t, 0) + ctx.sendHtlcAliceToBob(0, htlc) + ctx.receiveHtlcAliceToBob() + batchTicker <- time.Now() + + select { + case msg := <-aliceMsgs: + if _, ok := msg.(*lnwire.CommitSig); !ok { + t.Fatalf("expected CommitSig, got: %T", msg) + } + case <-time.After(time.Second): + t.Fatalf("alice did not send commit sig") + } + + // Check that Alice hasn't failed. + select { + case linkErr := <-linkErrs: + t.Fatalf("link failed unexpectedly: %v", linkErr) + case <-time.After(50 * time.Millisecond): + } + + // Without completing the dance, send another HTLC from Alice to Bob. + // Since the revocation window has been exhausted, we should see the + // link fail itself immediately due to the low pending commit timeout. + // In production this would be much longer, e.g. a minute. + htlc, _ = generateHtlcAndInvoice(t, 1) + ctx.sendHtlcAliceToBob(1, htlc) + ctx.receiveHtlcAliceToBob() + batchTicker <- time.Now() + + // Assert that we get the expected link failure from Alice. + select { + case linkErr := <-linkErrs: + if linkErr.code != ErrRemoteUnresponsive { + t.Fatalf("error code mismatch, "+ + "want: ErrRemoteUnresponsive, got: %v", + linkErr.code) + } + + case <-time.After(time.Second): + t.Fatalf("did not receive failure") + } +} + +// assertFailureCode asserts that an error is of type ClearTextError and that // the failure code is as expected. func assertFailureCode(t *testing.T, err error, code lnwire.FailCode) { - fErr, ok := err.(*ForwardingError) + rtErr, ok := err.(ClearTextError) if !ok { - t.Fatalf("expected ForwardingError but got %T", err) + t.Fatalf("expected ClearTextError but got %T", err) } - if fErr.FailureMessage.Code() != code { + if rtErr.WireMessage().Code() != code { t.Fatalf("expected %v but got %v", - code, fErr.FailureMessage.Code()) + code, rtErr.WireMessage().Code()) } } diff --git a/htlcswitch/linkfailure.go b/htlcswitch/linkfailure.go index c806c4b269..840a4d8dad 100644 --- a/htlcswitch/linkfailure.go +++ b/htlcswitch/linkfailure.go @@ -20,6 +20,10 @@ const ( // to fail the link. ErrRemoteError + // ErrRemoteUnresponsive indicates that our peer took too long to + // complete a commitment dance. + ErrRemoteUnresponsive + // ErrSyncError indicates that we failed synchronizing the state of the // channel with our peer. ErrSyncError @@ -71,6 +75,8 @@ func (e LinkFailureError) Error() string { return "internal error" case ErrRemoteError: return "remote error" + case ErrRemoteUnresponsive: + return "remote unresponsive" case ErrSyncError: return "sync error" case ErrInvalidUpdate: @@ -90,13 +96,23 @@ func (e LinkFailureError) Error() string { // the link fails with this LinkFailureError. func (e LinkFailureError) ShouldSendToPeer() bool { switch e.code { - // If the failure is a result of the peer sending us an error, we don't - // have to respond with one. - case ErrRemoteError: - return false - // In all other cases we will attempt to send our peer an error message. - default: + // Since sending an error can lead some nodes to force close the + // channel, create a whitelist of the failures we want to send so that + // newly added error codes aren't automatically sent to the remote peer. + case + ErrInternalError, + ErrRemoteError, + ErrSyncError, + ErrInvalidUpdate, + ErrInvalidCommitment, + ErrInvalidRevocation, + ErrRecoveryError: + return true + + // In all other cases we will not attempt to send our peer an error. + default: + return false } } diff --git a/htlcswitch/mailbox.go b/htlcswitch/mailbox.go index 633232b91e..95d88763e1 100644 --- a/htlcswitch/mailbox.go +++ b/htlcswitch/mailbox.go @@ -1,17 +1,26 @@ package htlcswitch import ( + "bytes" "container/list" "errors" + "fmt" "sync" "time" + "github.com/lightningnetwork/lnd/clock" "github.com/lightningnetwork/lnd/lnwire" ) -// ErrMailBoxShuttingDown is returned when the mailbox is interrupted by a -// shutdown request. -var ErrMailBoxShuttingDown = errors.New("mailbox is shutting down") +var ( + // ErrMailBoxShuttingDown is returned when the mailbox is interrupted by + // a shutdown request. + ErrMailBoxShuttingDown = errors.New("mailbox is shutting down") + + // ErrPacketAlreadyExists signals that an attempt to add a packet failed + // because it already exists in the mailbox. + ErrPacketAlreadyExists = errors.New("mailbox already has packet") +) // MailBox is an interface which represents a concurrent-safe, in-order // delivery queue for messages from the network and also from the main switch. @@ -31,8 +40,17 @@ type MailBox interface { // AckPacket removes a packet from the mailboxes in-memory replay // buffer. This will prevent a packet from being delivered after a link - // restarts if the switch has remained online. - AckPacket(CircuitKey) error + // restarts if the switch has remained online. The returned boolean + // indicates whether or not a packet with the passed incoming circuit + // key was removed. + AckPacket(CircuitKey) bool + + // FailAdd fails an UpdateAddHTLC that exists within the mailbox, + // removing it from the in-memory replay buffer. This will prevent the + // packet from being delivered after the link restarts if the switch has + // remained online. The generated LinkError will show an + // OutgoingFailureDownstreamHtlcAdd FailureDetail. + FailAdd(pkt *htlcPacket) // MessageOutBox returns a channel that any new messages ready for // delivery will be sent on. @@ -50,10 +68,33 @@ type MailBox interface { // Start starts the mailbox and any goroutines it needs to operate // properly. - Start() error + Start() // Stop signals the mailbox and its goroutines for a graceful shutdown. - Stop() error + Stop() +} + +type mailBoxConfig struct { + // shortChanID is the short channel id of the channel this mailbox + // belongs to. + shortChanID lnwire.ShortChannelID + + // fetchUpdate retreives the most recent channel update for the channel + // this mailbox belongs to. + fetchUpdate func(lnwire.ShortChannelID) (*lnwire.ChannelUpdate, error) + + // forwardPackets send a varidic number of htlcPackets to the switch to + // be routed. A quit channel should be provided so that the call can + // properly exit during shutdown. + forwardPackets func(chan struct{}, ...*htlcPacket) chan error + + // clock is a time source for the mailbox. + clock clock.Clock + + // expiry is the interval after which Adds will be cancelled if they + // have not been yet been delivered. The computed deadline will expiry + // this long after the Adds are added via AddPacket. + expiry time.Duration } // memoryMailBox is an implementation of the MailBox struct backed by purely @@ -62,6 +103,8 @@ type memoryMailBox struct { started sync.Once stopped sync.Once + cfg *mailBoxConfig + wireMessages *list.List wireMtx sync.Mutex wireCond *sync.Cond @@ -69,29 +112,42 @@ type memoryMailBox struct { messageOutbox chan lnwire.Message msgReset chan chan struct{} - htlcPkts *list.List - pktIndex map[CircuitKey]*list.Element - pktHead *list.Element - pktMtx sync.Mutex - pktCond *sync.Cond + // repPkts is a queue for reply packets, e.g. Settles and Fails. + repPkts *list.List + repIndex map[CircuitKey]*list.Element + repHead *list.Element + + // addPkts is a dedicated queue for Adds. + addPkts *list.List + addIndex map[CircuitKey]*list.Element + addHead *list.Element + + pktMtx sync.Mutex + pktCond *sync.Cond pktOutbox chan *htlcPacket pktReset chan chan struct{} - wg sync.WaitGroup - quit chan struct{} + wireShutdown chan struct{} + pktShutdown chan struct{} + quit chan struct{} } // newMemoryMailBox creates a new instance of the memoryMailBox. -func newMemoryMailBox() *memoryMailBox { +func newMemoryMailBox(cfg *mailBoxConfig) *memoryMailBox { box := &memoryMailBox{ + cfg: cfg, wireMessages: list.New(), - htlcPkts: list.New(), + repPkts: list.New(), + addPkts: list.New(), messageOutbox: make(chan lnwire.Message), pktOutbox: make(chan *htlcPacket), msgReset: make(chan chan struct{}, 1), pktReset: make(chan chan struct{}, 1), - pktIndex: make(map[CircuitKey]*list.Element), + repIndex: make(map[CircuitKey]*list.Element), + addIndex: make(map[CircuitKey]*list.Element), + wireShutdown: make(chan struct{}), + pktShutdown: make(chan struct{}), quit: make(chan struct{}), } box.wireCond = sync.NewCond(&box.wireMtx) @@ -120,13 +176,11 @@ const ( // Start starts the mailbox and any goroutines it needs to operate properly. // // NOTE: This method is part of the MailBox interface. -func (m *memoryMailBox) Start() error { +func (m *memoryMailBox) Start() { m.started.Do(func() { - m.wg.Add(2) go m.mailCourier(wireCourier) go m.mailCourier(pktCourier) }) - return nil } // ResetMessages blocks until all buffered wire messages are cleared. @@ -158,6 +212,7 @@ func (m *memoryMailBox) signalUntilReset(cType courierType, done chan struct{}) error { for { + switch cType { case wireCourier: m.wireCond.Signal() @@ -177,29 +232,59 @@ func (m *memoryMailBox) signalUntilReset(cType courierType, } // AckPacket removes the packet identified by it's incoming circuit key from the -// queue of packets to be delivered. +// queue of packets to be delivered. The returned boolean indicates whether or +// not a packet with the passed incoming circuit key was removed. // // NOTE: It is safe to call this method multiple times for the same circuit key. -func (m *memoryMailBox) AckPacket(inKey CircuitKey) error { +func (m *memoryMailBox) AckPacket(inKey CircuitKey) bool { m.pktCond.L.Lock() - entry, ok := m.pktIndex[inKey] - if !ok { - m.pktCond.L.Unlock() - return nil + defer m.pktCond.L.Unlock() + + if entry, ok := m.repIndex[inKey]; ok { + // Check whether we are removing the head of the queue. If so, + // we must advance the head to the next packet before removing. + // It's possible that the courier has already advanced the + // repHead, so this check prevents the repHead from getting + // desynchronized. + if entry == m.repHead { + m.repHead = entry.Next() + } + m.repPkts.Remove(entry) + delete(m.repIndex, inKey) + + return true } - m.htlcPkts.Remove(entry) - delete(m.pktIndex, inKey) - m.pktCond.L.Unlock() + if entry, ok := m.addIndex[inKey]; ok { + // Check whether we are removing the head of the queue. If so, + // we must advance the head to the next add before removing. + // It's possible that the courier has already advanced the + // addHead, so this check prevents the addHead from getting + // desynchronized. + // + // NOTE: While this event is rare for Settles or Fails, it could + // be very common for Adds since the mailbox has the ability to + // cancel Adds before they are delivered. When that occurs, the + // head of addPkts has only been peeked and we expect to be + // removing the head of the queue. + if entry == m.addHead { + m.addHead = entry.Next() + } - return nil + m.addPkts.Remove(entry) + delete(m.addIndex, inKey) + + return true + } + + return false } // HasPacket queries the packets for a circuit key, this is used to drop packets // bound for the switch that already have a queued response. func (m *memoryMailBox) HasPacket(inKey CircuitKey) bool { m.pktCond.L.Lock() - _, ok := m.pktIndex[inKey] + _, ok := m.repIndex[inKey] m.pktCond.L.Unlock() return ok @@ -208,14 +293,52 @@ func (m *memoryMailBox) HasPacket(inKey CircuitKey) bool { // Stop signals the mailbox and its goroutines for a graceful shutdown. // // NOTE: This method is part of the MailBox interface. -func (m *memoryMailBox) Stop() error { +func (m *memoryMailBox) Stop() { m.stopped.Do(func() { close(m.quit) - m.wireCond.Signal() - m.pktCond.Signal() + m.signalUntilShutdown(wireCourier) + m.signalUntilShutdown(pktCourier) }) - return nil +} + +// signalUntilShutdown strobes the condition variable of the passed courier +// type, blocking until the worker has exited. +func (m *memoryMailBox) signalUntilShutdown(cType courierType) { + var ( + cond *sync.Cond + shutdown chan struct{} + ) + + switch cType { + case wireCourier: + cond = m.wireCond + shutdown = m.wireShutdown + case pktCourier: + cond = m.pktCond + shutdown = m.pktShutdown + } + + for { + select { + case <-time.After(time.Millisecond): + cond.Signal() + case <-shutdown: + return + } + } +} + +// pktWithExpiry wraps an incoming packet and records the time at which it it +// should be canceled from the mailbox. This will be used to detect if it gets +// stuck in the mailbox and inform when to cancel back. +type pktWithExpiry struct { + pkt *htlcPacket + expiry time.Time +} + +func (p *pktWithExpiry) deadline(clock clock.Clock) <-chan time.Time { + return clock.TickAfter(p.expiry.Sub(clock.Now())) } // mailCourier is a dedicated goroutine whose job is to reliably deliver @@ -223,7 +346,12 @@ func (m *memoryMailBox) Stop() error { // couriers, and mail couriers. Depending on the passed courierType, this // goroutine will assume one of two roles. func (m *memoryMailBox) mailCourier(cType courierType) { - defer m.wg.Done() + switch cType { + case wireCourier: + defer close(m.wireShutdown) + case pktCourier: + defer close(m.pktShutdown) + } // TODO(roasbeef): refactor... @@ -250,7 +378,7 @@ func (m *memoryMailBox) mailCourier(cType courierType) { case pktCourier: m.pktCond.L.Lock() - for m.pktHead == nil { + for m.repHead == nil && m.addHead == nil { m.pktCond.Wait() select { @@ -259,9 +387,11 @@ func (m *memoryMailBox) mailCourier(cType courierType) { // any un-ACK'd messages are re-delivered upon // reconnect. case pktDone := <-m.pktReset: - m.pktHead = m.htlcPkts.Front() + m.repHead = m.repPkts.Front() + m.addHead = m.addPkts.Front() close(pktDone) + case <-m.quit: m.pktCond.L.Unlock() return @@ -271,8 +401,11 @@ func (m *memoryMailBox) mailCourier(cType courierType) { } var ( - nextPkt *htlcPacket - nextMsg lnwire.Message + nextRep *htlcPacket + nextRepEl *list.Element + nextAdd *pktWithExpiry + nextAddEl *list.Element + nextMsg lnwire.Message ) switch cType { // Grab the datum off the front of the queue, shifting the @@ -287,8 +420,20 @@ func (m *memoryMailBox) mailCourier(cType courierType) { // doesn't make it into a commitment, then it'll be // re-delivered once the link comes back online. case pktCourier: - nextPkt = m.pktHead.Value.(*htlcPacket) - m.pktHead = m.pktHead.Next() + // Peek at the head of the Settle/Fails and Add queues. + // We peak both even if there is a Settle/Fail present + // because we need to set a deadline for the next + // pending Add if it's present. Due to clock + // monotonicity, we know that the head of the Adds is + // the next to expire. + if m.repHead != nil { + nextRep = m.repHead.Value.(*htlcPacket) + nextRepEl = m.repHead + } + if m.addHead != nil { + nextAdd = m.addHead.Value.(*pktWithExpiry) + nextAddEl = m.addHead + } } // Now that we're done with the condition, we can unlock it to @@ -318,14 +463,77 @@ func (m *memoryMailBox) mailCourier(cType courierType) { } case pktCourier: + var ( + pktOutbox chan *htlcPacket + addOutbox chan *htlcPacket + add *htlcPacket + deadline <-chan time.Time + ) + + // Prioritize delivery of Settle/Fail packets over Adds. + // This ensures that we actively clear the commitment of + // existing HTLCs before trying to add new ones. This + // can help to improve forwarding performance since the + // time to sign a commitment is linear in the number of + // HTLCs manifested on the commitments. + // + // NOTE: Both types are eventually delivered over the + // same channel, but we can control which is delivered + // by exclusively making one nil and the other non-nil. + // We know from our loop condition that at least one + // nextRep and nextAdd are non-nil. + if nextRep != nil { + pktOutbox = m.pktOutbox + } else { + addOutbox = m.pktOutbox + } + + // If we have a pending Add, we'll also construct the + // deadline so we can fail it back if we are unable to + // deliver any message in time. We also dereference the + // nextAdd's packet, since we will need access to it in + // the case we are delivering it and/or if the deadline + // expires. + // + // NOTE: It's possible after this point for add to be + // nil, but this can only occur when addOutbox is also + // nil, hence we won't accidentally deliver a nil + // packet. + if nextAdd != nil { + add = nextAdd.pkt + deadline = nextAdd.deadline(m.cfg.clock) + } + select { - case m.pktOutbox <- nextPkt: + case pktOutbox <- nextRep: + m.pktCond.L.Lock() + // Only advance the repHead if this Settle or + // Fail is still at the head of the queue. + if m.repHead != nil && m.repHead == nextRepEl { + m.repHead = m.repHead.Next() + } + m.pktCond.L.Unlock() + + case addOutbox <- add: + m.pktCond.L.Lock() + // Only advance the addHead if this Add is still + // at the head of the queue. + if m.addHead != nil && m.addHead == nextAddEl { + m.addHead = m.addHead.Next() + } + m.pktCond.L.Unlock() + + case <-deadline: + m.FailAdd(add) + case pktDone := <-m.pktReset: m.pktCond.L.Lock() - m.pktHead = m.htlcPkts.Front() + m.repHead = m.repPkts.Front() + m.addHead = m.addPkts.Front() m.pktCond.L.Unlock() close(pktDone) + case <-m.quit: return } @@ -357,18 +565,41 @@ func (m *memoryMailBox) AddMessage(msg lnwire.Message) error { // NOTE: This method is safe for concrete use and part of the MailBox // interface. func (m *memoryMailBox) AddPacket(pkt *htlcPacket) error { - // First, we'll lock the condition, and add the packet to the end of - // the htlc packet inbox. m.pktCond.L.Lock() - if _, ok := m.pktIndex[pkt.inKey()]; ok { - m.pktCond.L.Unlock() - return nil - } + switch htlc := pkt.htlc.(type) { + + // Split off Settle/Fail packets into the repPkts queue. + case *lnwire.UpdateFulfillHTLC, *lnwire.UpdateFailHTLC: + if _, ok := m.repIndex[pkt.inKey()]; ok { + m.pktCond.L.Unlock() + return ErrPacketAlreadyExists + } + + entry := m.repPkts.PushBack(pkt) + m.repIndex[pkt.inKey()] = entry + if m.repHead == nil { + m.repHead = entry + } - entry := m.htlcPkts.PushBack(pkt) - m.pktIndex[pkt.inKey()] = entry - if m.pktHead == nil { - m.pktHead = entry + // Split off Add packets into the addPkts queue. + case *lnwire.UpdateAddHTLC: + if _, ok := m.addIndex[pkt.inKey()]; ok { + m.pktCond.L.Unlock() + return ErrPacketAlreadyExists + } + + entry := m.addPkts.PushBack(&pktWithExpiry{ + pkt: pkt, + expiry: m.cfg.clock.Now().Add(m.cfg.expiry), + }) + m.addIndex[pkt.inKey()] = entry + if m.addHead == nil { + m.addHead = entry + } + + default: + m.pktCond.L.Unlock() + return fmt.Errorf("unknown htlc type: %T", htlc) } m.pktCond.L.Unlock() @@ -379,6 +610,80 @@ func (m *memoryMailBox) AddPacket(pkt *htlcPacket) error { return nil } +// FailAdd fails an UpdateAddHTLC that exists within the mailbox, removing it +// from the in-memory replay buffer. This will prevent the packet from being +// delivered after the link restarts if the switch has remained online. The +// generated LinkError will show an OutgoingFailureDownstreamHtlcAdd +// FailureDetail. +func (m *memoryMailBox) FailAdd(pkt *htlcPacket) { + // First, remove the packet from mailbox. If we didn't find the packet + // because it has already been acked, we'll exit early to avoid sending + // a duplicate fail message through the switch. + if !m.AckPacket(pkt.inKey()) { + return + } + + var ( + localFailure = false + reason lnwire.OpaqueReason + ) + + // Create a temporary channel failure which we will send back to our + // peer if this is a forward, or report to the user if the failed + // payment was locally initiated. + var failure lnwire.FailureMessage + update, err := m.cfg.fetchUpdate(m.cfg.shortChanID) + if err != nil { + failure = &lnwire.FailTemporaryNodeFailure{} + } else { + failure = lnwire.NewTemporaryChannelFailure(update) + } + + // If the payment was locally initiated (which is indicated by a nil + // obfuscator), we do not need to encrypt it back to the sender. + if pkt.obfuscator == nil { + var b bytes.Buffer + err := lnwire.EncodeFailure(&b, failure, 0) + if err != nil { + log.Errorf("Unable to encode failure: %v", err) + return + } + reason = lnwire.OpaqueReason(b.Bytes()) + localFailure = true + } else { + // If the packet is part of a forward, (identified by a non-nil + // obfuscator) we need to encrypt the error back to the source. + var err error + reason, err = pkt.obfuscator.EncryptFirstHop(failure) + if err != nil { + log.Errorf("Unable to obfuscate error: %v", err) + return + } + } + + // Create a link error containing the temporary channel failure and a + // detail which indicates the we failed to add the htlc. + linkError := NewDetailedLinkError( + failure, OutgoingFailureDownstreamHtlcAdd, + ) + + failPkt := &htlcPacket{ + incomingChanID: pkt.incomingChanID, + incomingHTLCID: pkt.incomingHTLCID, + circuit: pkt.circuit, + sourceRef: pkt.sourceRef, + hasSource: true, + localFailure: localFailure, + linkFailure: linkError, + htlc: &lnwire.UpdateFailHTLC{ + Reason: reason, + }, + } + + errChan := m.cfg.forwardPackets(m.quit, failPkt) + go handleBatchFwdErrs(errChan, log) +} + // MessageOutBox returns a channel that any new messages ready for delivery // will be sent on. // @@ -403,6 +708,8 @@ func (m *memoryMailBox) PacketOutBox() chan *htlcPacket { type mailOrchestrator struct { mu sync.RWMutex + cfg *mailOrchConfig + // mailboxes caches exactly one mailbox for all known channels. mailboxes map[lnwire.ChannelID]MailBox @@ -423,9 +730,29 @@ type mailOrchestrator struct { unclaimedPackets map[lnwire.ShortChannelID][]*htlcPacket } +type mailOrchConfig struct { + // forwardPackets send a varidic number of htlcPackets to the switch to + // be routed. A quit channel should be provided so that the call can + // properly exit during shutdown. + forwardPackets func(chan struct{}, ...*htlcPacket) chan error + + // fetchUpdate retreives the most recent channel update for the channel + // this mailbox belongs to. + fetchUpdate func(lnwire.ShortChannelID) (*lnwire.ChannelUpdate, error) + + // clock is a time source for the generated mailboxes. + clock clock.Clock + + // expiry is the interval after which Adds will be cancelled if they + // have not been yet been delivered. The computed deadline will expiry + // this long after the Adds are added to a mailbox via AddPacket. + expiry time.Duration +} + // newMailOrchestrator initializes a fresh mailOrchestrator. -func newMailOrchestrator() *mailOrchestrator { +func newMailOrchestrator(cfg *mailOrchConfig) *mailOrchestrator { return &mailOrchestrator{ + cfg: cfg, mailboxes: make(map[lnwire.ChannelID]MailBox), liveIndex: make(map[lnwire.ShortChannelID]lnwire.ChannelID), unclaimedPackets: make(map[lnwire.ShortChannelID][]*htlcPacket), @@ -441,7 +768,9 @@ func (mo *mailOrchestrator) Stop() { // GetOrCreateMailBox returns an existing mailbox belonging to `chanID`, or // creates and returns a new mailbox if none is found. -func (mo *mailOrchestrator) GetOrCreateMailBox(chanID lnwire.ChannelID) MailBox { +func (mo *mailOrchestrator) GetOrCreateMailBox(chanID lnwire.ChannelID, + shortChanID lnwire.ShortChannelID) MailBox { + // First, try lookup the mailbox directly using only the shared mutex. mo.mu.RLock() mailbox, ok := mo.mailboxes[chanID] @@ -454,7 +783,7 @@ func (mo *mailOrchestrator) GetOrCreateMailBox(chanID lnwire.ChannelID) MailBox // Otherwise, we will try again with exclusive lock, creating a mailbox // if one still has not been created. mo.mu.Lock() - mailbox = mo.exclusiveGetOrCreateMailBox(chanID) + mailbox = mo.exclusiveGetOrCreateMailBox(chanID, shortChanID) mo.mu.Unlock() return mailbox @@ -466,11 +795,17 @@ func (mo *mailOrchestrator) GetOrCreateMailBox(chanID lnwire.ChannelID) MailBox // // NOTE: This method MUST be invoked with the mailOrchestrator's exclusive lock. func (mo *mailOrchestrator) exclusiveGetOrCreateMailBox( - chanID lnwire.ChannelID) MailBox { + chanID lnwire.ChannelID, shortChanID lnwire.ShortChannelID) MailBox { mailbox, ok := mo.mailboxes[chanID] if !ok { - mailbox = newMemoryMailBox() + mailbox = newMemoryMailBox(&mailBoxConfig{ + shortChanID: shortChanID, + fetchUpdate: mo.cfg.fetchUpdate, + forwardPackets: mo.cfg.forwardPackets, + clock: mo.cfg.clock, + expiry: mo.cfg.expiry, + }) mailbox.Start() mo.mailboxes[chanID] = mailbox } @@ -550,7 +885,7 @@ func (mo *mailOrchestrator) Deliver( // index should only be set if the mailbox had been initialized // beforehand. However, this does ensure that this case is // handled properly in the event that it could happen. - mailbox = mo.exclusiveGetOrCreateMailBox(chanID) + mailbox = mo.exclusiveGetOrCreateMailBox(chanID, sid) mo.mu.Unlock() // Deliver the packet to the mailbox if it was found or created. diff --git a/htlcswitch/mailbox_test.go b/htlcswitch/mailbox_test.go index e8356c97d8..7e15eb040f 100644 --- a/htlcswitch/mailbox_test.go +++ b/htlcswitch/mailbox_test.go @@ -7,9 +7,12 @@ import ( "time" "github.com/davecgh/go-spew/spew" + "github.com/lightningnetwork/lnd/clock" "github.com/lightningnetwork/lnd/lnwire" ) +const testExpiry = time.Minute + // TestMailBoxCouriers tests that both aspects of the mailBox struct works // properly. Both packets and messages should be able to added to each // respective mailbox concurrently, and also messages/packets should also be @@ -19,9 +22,8 @@ func TestMailBoxCouriers(t *testing.T) { // First, we'll create new instance of the current default mailbox // type. - mailBox := newMemoryMailBox() - mailBox.Start() - defer mailBox.Stop() + ctx := newMailboxContext(t, time.Now(), testExpiry) + defer ctx.mailbox.Stop() // We'll be adding 10 message of both types to the mailbox. const numPackets = 10 @@ -34,10 +36,16 @@ func TestMailBoxCouriers(t *testing.T) { outgoingChanID: lnwire.NewShortChanIDFromInt(uint64(prand.Int63())), incomingChanID: lnwire.NewShortChanIDFromInt(uint64(prand.Int63())), amount: lnwire.MilliSatoshi(prand.Int63()), + htlc: &lnwire.UpdateAddHTLC{ + ID: uint64(i), + }, } sentPackets[i] = pkt - mailBox.AddPacket(pkt) + err := ctx.mailbox.AddPacket(pkt) + if err != nil { + t.Fatalf("unable to add packet: %v", err) + } } // Next, we'll do the same, but this time adding wire messages. @@ -49,7 +57,10 @@ func TestMailBoxCouriers(t *testing.T) { } sentMessages[i] = msg - mailBox.AddMessage(msg) + err := ctx.mailbox.AddMessage(msg) + if err != nil { + t.Fatalf("unable to add message: %v", err) + } } // Now we'll attempt to read back the packets/messages we added to the @@ -63,14 +74,14 @@ func TestMailBoxCouriers(t *testing.T) { select { case <-timeout: t.Fatalf("didn't recv pkt after timeout") - case pkt := <-mailBox.PacketOutBox(): + case pkt := <-ctx.mailbox.PacketOutBox(): recvdPackets = append(recvdPackets, pkt) } } else { select { case <-timeout: t.Fatalf("didn't recv message after timeout") - case msg := <-mailBox.MessageOutBox(): + case msg := <-ctx.mailbox.MessageOutBox(): recvdMessages = append(recvdMessages, msg) } } @@ -101,13 +112,19 @@ func TestMailBoxCouriers(t *testing.T) { // Now that we've received all of the intended msgs/pkts, ack back half // of the packets. for _, recvdPkt := range recvdPackets[:halfPackets] { - mailBox.AckPacket(recvdPkt.inKey()) + ctx.mailbox.AckPacket(recvdPkt.inKey()) } // With the packets drained and partially acked, we reset the mailbox, // simulating a link shutting down and then coming back up. - mailBox.ResetMessages() - mailBox.ResetPackets() + err := ctx.mailbox.ResetMessages() + if err != nil { + t.Fatalf("unable to reset messages: %v", err) + } + err = ctx.mailbox.ResetPackets() + if err != nil { + t.Fatalf("unable to reset packets: %v", err) + } // Now, we'll use the same alternating strategy to read from our // mailbox. All wire messages are dropped on startup, but any unacked @@ -120,12 +137,12 @@ func TestMailBoxCouriers(t *testing.T) { select { case <-timeout: t.Fatalf("didn't recv pkt after timeout") - case pkt := <-mailBox.PacketOutBox(): + case pkt := <-ctx.mailbox.PacketOutBox(): recvdPackets2 = append(recvdPackets2, pkt) } } else { select { - case <-mailBox.MessageOutBox(): + case <-ctx.mailbox.MessageOutBox(): t.Fatalf("should not receive wire msg after reset") default: } @@ -148,6 +165,380 @@ func TestMailBoxCouriers(t *testing.T) { } } +// TestMailBoxResetAfterShutdown tests that ResetMessages and ResetPackets +// return ErrMailBoxShuttingDown after the mailbox has been stopped. +func TestMailBoxResetAfterShutdown(t *testing.T) { + t.Parallel() + + ctx := newMailboxContext(t, time.Now(), time.Second) + + // Stop the mailbox, then try to reset the message and packet couriers. + ctx.mailbox.Stop() + + err := ctx.mailbox.ResetMessages() + if err != ErrMailBoxShuttingDown { + t.Fatalf("expected ErrMailBoxShuttingDown, got: %v", err) + } + + err = ctx.mailbox.ResetPackets() + if err != ErrMailBoxShuttingDown { + t.Fatalf("expected ErrMailBoxShuttingDown, got: %v", err) + } +} + +type mailboxContext struct { + t *testing.T + mailbox MailBox + clock *clock.TestClock + forwards chan *htlcPacket +} + +func newMailboxContext(t *testing.T, startTime time.Time, + expiry time.Duration) *mailboxContext { + + ctx := &mailboxContext{ + t: t, + clock: clock.NewTestClock(startTime), + forwards: make(chan *htlcPacket, 1), + } + ctx.mailbox = newMemoryMailBox(&mailBoxConfig{ + fetchUpdate: func(sid lnwire.ShortChannelID) ( + *lnwire.ChannelUpdate, error) { + return &lnwire.ChannelUpdate{ + ShortChannelID: sid, + }, nil + }, + forwardPackets: ctx.forward, + clock: ctx.clock, + expiry: expiry, + }) + ctx.mailbox.Start() + + return ctx +} + +func (c *mailboxContext) forward(_ chan struct{}, + pkts ...*htlcPacket) chan error { + + for _, pkt := range pkts { + c.forwards <- pkt + } + + errChan := make(chan error) + close(errChan) + + return errChan +} + +func (c *mailboxContext) sendAdds(start, num int) []*htlcPacket { + c.t.Helper() + + sentPackets := make([]*htlcPacket, num) + for i := 0; i < num; i++ { + pkt := &htlcPacket{ + outgoingChanID: lnwire.NewShortChanIDFromInt( + uint64(prand.Int63())), + incomingChanID: lnwire.NewShortChanIDFromInt( + uint64(prand.Int63())), + incomingHTLCID: uint64(start + i), + amount: lnwire.MilliSatoshi(prand.Int63()), + htlc: &lnwire.UpdateAddHTLC{ + ID: uint64(start + i), + }, + } + sentPackets[i] = pkt + + err := c.mailbox.AddPacket(pkt) + if err != nil { + c.t.Fatalf("unable to add packet: %v", err) + } + } + + return sentPackets +} + +func (c *mailboxContext) receivePkts(pkts []*htlcPacket) { + c.t.Helper() + + for i, expPkt := range pkts { + select { + case pkt := <-c.mailbox.PacketOutBox(): + if reflect.DeepEqual(expPkt, pkt) { + continue + } + + c.t.Fatalf("inkey mismatch #%d, want: %v vs "+ + "got: %v", i, expPkt.inKey(), pkt.inKey()) + + case <-time.After(50 * time.Millisecond): + c.t.Fatalf("did not receive fail for index %d", i) + } + } +} + +func (c *mailboxContext) checkFails(adds []*htlcPacket) { + c.t.Helper() + + for i, add := range adds { + select { + case fail := <-c.forwards: + if add.inKey() == fail.inKey() { + continue + } + c.t.Fatalf("inkey mismatch #%d, add: %v vs fail: %v", + i, add.inKey(), fail.inKey()) + + case <-time.After(50 * time.Millisecond): + c.t.Fatalf("did not receive fail for index %d", i) + } + } + + select { + case pkt := <-c.forwards: + c.t.Fatalf("unexpected forward: %v", pkt) + case <-time.After(50 * time.Millisecond): + } +} + +// TestMailBoxFailAdd asserts that FailAdd returns a response to the switch +// under various interleavings with other operations on the mailbox. +func TestMailBoxFailAdd(t *testing.T) { + var ( + batchDelay = time.Second + expiry = time.Minute + firstBatchStart = time.Now() + secondBatchStart = time.Now().Add(batchDelay) + thirdBatchStart = time.Now().Add(2 * batchDelay) + thirdBatchExpiry = thirdBatchStart.Add(expiry) + ) + ctx := newMailboxContext(t, firstBatchStart, expiry) + defer ctx.mailbox.Stop() + + failAdds := func(adds []*htlcPacket) { + for _, add := range adds { + ctx.mailbox.FailAdd(add) + } + } + + const numBatchPackets = 5 + + // Send 10 adds, and pull them from the mailbox. + firstBatch := ctx.sendAdds(0, numBatchPackets) + ctx.receivePkts(firstBatch) + + // Fail all of these adds, simulating an error adding the HTLCs to the + // commitment. We should see a failure message for each. + go failAdds(firstBatch) + ctx.checkFails(firstBatch) + + // As a sanity check, Fail all of them again and assert that no + // duplicate fails are sent. + go failAdds(firstBatch) + ctx.checkFails(nil) + + // Now, send a second batch of adds after a short delay and deliver them + // to the link. + ctx.clock.SetTime(secondBatchStart) + secondBatch := ctx.sendAdds(numBatchPackets, numBatchPackets) + ctx.receivePkts(secondBatch) + + // Reset the packet queue w/o changing the current time. This simulates + // the link flapping and coming back up before the second batch's + // expiries have elapsed. We should see no failures sent back. + err := ctx.mailbox.ResetPackets() + if err != nil { + t.Fatalf("unable to reset packets: %v", err) + } + ctx.checkFails(nil) + + // Redeliver the second batch to the link and hold them there. + ctx.receivePkts(secondBatch) + + // Send a third batch of adds shortly after the second batch. + ctx.clock.SetTime(thirdBatchStart) + thirdBatch := ctx.sendAdds(2*numBatchPackets, numBatchPackets) + + // Advance the clock so that the third batch expires. We expect to only + // see fails for the third batch, since the second batch is still being + // held by the link. + ctx.clock.SetTime(thirdBatchExpiry) + ctx.checkFails(thirdBatch) + + // Finally, reset the link which should cause the second batch to be + // cancelled immediately. + err = ctx.mailbox.ResetPackets() + if err != nil { + t.Fatalf("unable to reset packets: %v", err) + } + ctx.checkFails(secondBatch) +} + +// TestMailBoxPacketPrioritization asserts that the mailbox will prioritize +// delivering Settle and Fail packets over Adds if both are available for +// delivery at the same time. +func TestMailBoxPacketPrioritization(t *testing.T) { + t.Parallel() + + // First, we'll create new instance of the current default mailbox + // type. + ctx := newMailboxContext(t, time.Now(), testExpiry) + defer ctx.mailbox.Stop() + + const numPackets = 5 + + _, _, aliceChanID, bobChanID := genIDs() + + // Next we'll send the following sequence of packets: + // - Settle1 + // - Add1 + // - Add2 + // - Fail + // - Settle2 + sentPackets := make([]*htlcPacket, numPackets) + for i := 0; i < numPackets; i++ { + pkt := &htlcPacket{ + outgoingChanID: aliceChanID, + outgoingHTLCID: uint64(i), + incomingChanID: bobChanID, + incomingHTLCID: uint64(i), + amount: lnwire.MilliSatoshi(prand.Int63()), + } + + switch i { + case 0, 4: + // First and last packets are a Settle. A non-Add is + // sent first to make the test deterministic w/o needing + // to sleep. + pkt.htlc = &lnwire.UpdateFulfillHTLC{ID: uint64(i)} + case 1, 2: + // Next two packets are Adds. + pkt.htlc = &lnwire.UpdateAddHTLC{ID: uint64(i)} + case 3: + // Last packet is a Fail. + pkt.htlc = &lnwire.UpdateFailHTLC{ID: uint64(i)} + } + + sentPackets[i] = pkt + + err := ctx.mailbox.AddPacket(pkt) + if err != nil { + t.Fatalf("failed to add packet: %v", err) + } + } + + // When dequeueing the packets, we expect the following sequence: + // - Settle1 + // - Fail + // - Settle2 + // - Add1 + // - Add2 + // + // We expect to see Fail and Settle2 to be delivered before either Add1 + // or Add2 due to the prioritization between the split queue. + for i := 0; i < numPackets; i++ { + select { + case pkt := <-ctx.mailbox.PacketOutBox(): + var expPkt *htlcPacket + switch i { + case 0: + // First packet should be Settle1. + expPkt = sentPackets[0] + case 1: + // Second packet should be Fail. + expPkt = sentPackets[3] + case 2: + // Third packet should be Settle2. + expPkt = sentPackets[4] + case 3: + // Fourth packet should be Add1. + expPkt = sentPackets[1] + case 4: + // Last packet should be Add2. + expPkt = sentPackets[2] + } + + if !reflect.DeepEqual(expPkt, pkt) { + t.Fatalf("recvd packet mismatch %d, want: %v, got: %v", + i, spew.Sdump(expPkt), spew.Sdump(pkt)) + } + + case <-time.After(50 * time.Millisecond): + t.Fatalf("didn't receive packet %d before timeout", i) + } + } +} + +// TestMailBoxAddExpiry asserts that the mailbox will cancel back Adds that have +// reached their expiry time. +func TestMailBoxAddExpiry(t *testing.T) { + var ( + expiry = time.Minute + batchDelay = time.Second + firstBatchStart = time.Now() + firstBatchExpiry = firstBatchStart.Add(expiry) + secondBatchStart = firstBatchStart.Add(batchDelay) + secondBatchExpiry = secondBatchStart.Add(expiry) + ) + + ctx := newMailboxContext(t, firstBatchStart, expiry) + defer ctx.mailbox.Stop() + + // Each batch will consist of 10 messages. + const numBatchPackets = 10 + + firstBatch := ctx.sendAdds(0, numBatchPackets) + + ctx.clock.SetTime(secondBatchStart) + ctx.checkFails(nil) + + secondBatch := ctx.sendAdds(numBatchPackets, numBatchPackets) + + ctx.clock.SetTime(firstBatchExpiry) + ctx.checkFails(firstBatch) + + ctx.clock.SetTime(secondBatchExpiry) + ctx.checkFails(secondBatch) +} + +// TestMailBoxDuplicateAddPacket asserts that the mailbox returns an +// ErrPacketAlreadyExists failure when two htlcPackets are added with identical +// incoming circuit keys. +func TestMailBoxDuplicateAddPacket(t *testing.T) { + t.Parallel() + + ctx := newMailboxContext(t, time.Now(), testExpiry) + ctx.mailbox.Start() + defer ctx.mailbox.Stop() + + addTwice := func(t *testing.T, pkt *htlcPacket) { + // The first add should succeed. + err := ctx.mailbox.AddPacket(pkt) + if err != nil { + t.Fatalf("unable to add packet: %v", err) + } + + // Adding again with the same incoming circuit key should fail. + err = ctx.mailbox.AddPacket(pkt) + if err != ErrPacketAlreadyExists { + t.Fatalf("expected ErrPacketAlreadyExists, got: %v", err) + } + } + + // Assert duplicate AddPacket calls fail for all types of HTLCs. + addTwice(t, &htlcPacket{ + incomingHTLCID: 0, + htlc: &lnwire.UpdateAddHTLC{}, + }) + addTwice(t, &htlcPacket{ + incomingHTLCID: 1, + htlc: &lnwire.UpdateFulfillHTLC{}, + }) + addTwice(t, &htlcPacket{ + incomingHTLCID: 2, + htlc: &lnwire.UpdateFailHTLC{}, + }) +} + // TestMailOrchestrator asserts that the orchestrator properly buffers packets // for channels that haven't been made live, such that they are delivered // immediately after BindLiveShortChanID. It also tests that packets are delivered @@ -156,7 +547,24 @@ func TestMailOrchestrator(t *testing.T) { t.Parallel() // First, we'll create a new instance of our orchestrator. - mo := newMailOrchestrator() + mo := newMailOrchestrator(&mailOrchConfig{ + fetchUpdate: func(sid lnwire.ShortChannelID) ( + *lnwire.ChannelUpdate, error) { + return &lnwire.ChannelUpdate{ + ShortChannelID: sid, + }, nil + }, + forwardPackets: func(_ chan struct{}, + pkts ...*htlcPacket) chan error { + // Close the channel immediately so the goroutine + // logging errors can exit. + errChan := make(chan error) + close(errChan) + return errChan + }, + clock: clock.NewTestClock(time.Now()), + expiry: testExpiry, + }) defer mo.Stop() // We'll be delivering 10 htlc packets via the orchestrator. @@ -174,6 +582,9 @@ func TestMailOrchestrator(t *testing.T) { incomingChanID: bobChanID, incomingHTLCID: uint64(i), amount: lnwire.MilliSatoshi(prand.Int63()), + htlc: &lnwire.UpdateAddHTLC{ + ID: uint64(i), + }, } sentPackets[i] = pkt @@ -181,7 +592,7 @@ func TestMailOrchestrator(t *testing.T) { } // Now, initialize a new mailbox for Alice's chanid. - mailbox := mo.GetOrCreateMailBox(chanID1) + mailbox := mo.GetOrCreateMailBox(chanID1, aliceChanID) // Verify that no messages are received, since Alice's mailbox has not // been made live. @@ -226,7 +637,7 @@ func TestMailOrchestrator(t *testing.T) { // For the second half of the test, create a new mailbox for Bob and // immediately make it live with an assigned short chan id. - mailbox = mo.GetOrCreateMailBox(chanID2) + mailbox = mo.GetOrCreateMailBox(chanID2, bobChanID) mo.BindLiveShortChanID(mailbox, chanID2, bobChanID) // Create the second half of our htlcs, and deliver them via the @@ -239,6 +650,9 @@ func TestMailOrchestrator(t *testing.T) { incomingChanID: bobChanID, incomingHTLCID: uint64(halfPackets + i), amount: lnwire.MilliSatoshi(prand.Int63()), + htlc: &lnwire.UpdateAddHTLC{ + ID: uint64(halfPackets + i), + }, } sentPackets[i] = pkt diff --git a/htlcswitch/mock.go b/htlcswitch/mock.go index 626ebe824c..e9a2a1efab 100644 --- a/htlcswitch/mock.go +++ b/htlcswitch/mock.go @@ -22,13 +22,14 @@ import ( sphinx "github.com/lightningnetwork/lightning-onion" "github.com/lightningnetwork/lnd/chainntnfs" "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/clock" "github.com/lightningnetwork/lnd/contractcourt" "github.com/lightningnetwork/lnd/htlcswitch/hop" "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/invoices" "github.com/lightningnetwork/lnd/lnpeer" "github.com/lightningnetwork/lnd/lntypes" - "github.com/lightningnetwork/lnd/lnwallet" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/ticker" ) @@ -70,13 +71,13 @@ func (m *mockPreimageCache) SubscribeUpdates() *contractcourt.WitnessSubscriptio } type mockFeeEstimator struct { - byteFeeIn chan lnwallet.SatPerKWeight + byteFeeIn chan chainfee.SatPerKWeight quit chan struct{} } func (m *mockFeeEstimator) EstimateFeePerKW( - numBlocks uint32) (lnwallet.SatPerKWeight, error) { + numBlocks uint32) (chainfee.SatPerKWeight, error) { select { case feeRate := <-m.byteFeeIn: @@ -86,7 +87,7 @@ func (m *mockFeeEstimator) EstimateFeePerKW( } } -func (m *mockFeeEstimator) RelayFeePerKW() lnwallet.SatPerKWeight { +func (m *mockFeeEstimator) RelayFeePerKW() chainfee.SatPerKWeight { return 1e3 } @@ -98,7 +99,7 @@ func (m *mockFeeEstimator) Stop() error { return nil } -var _ lnwallet.FeeEstimator = (*mockFeeEstimator)(nil) +var _ chainfee.Estimator = (*mockFeeEstimator)(nil) type mockForwardingLog struct { sync.Mutex @@ -175,6 +176,9 @@ func initSwitchWithDB(startingHeight uint32, db *channeldb.DB) (*Switch, error) FwdEventTicker: ticker.NewForce(DefaultFwdEventInterval), LogEventTicker: ticker.NewForce(DefaultLogInterval), AckEventTicker: ticker.NewForce(DefaultAckInterval), + HtlcNotifier: &mockHTLCNotifier{}, + Clock: clock.NewDefaultClock(), + HTLCExpiry: time.Hour, } return New(cfg, startingHeight) @@ -265,16 +269,14 @@ func (s *mockServer) QuitSignal() <-chan struct{} { // mockHopIterator represents the test version of hop iterator which instead // of encrypting the path in onion blob just stores the path as a list of hops. type mockHopIterator struct { - hops []hop.ForwardingInfo + hops []*hop.Payload } -func newMockHopIterator(hops ...hop.ForwardingInfo) hop.Iterator { +func newMockHopIterator(hops ...*hop.Payload) hop.Iterator { return &mockHopIterator{hops: hops} } -func (r *mockHopIterator) ForwardingInstructions() ( - hop.ForwardingInfo, error) { - +func (r *mockHopIterator) HopPayload() (*hop.Payload, error) { h := r.hops[0] r.hops = r.hops[1:] return h, nil @@ -300,7 +302,8 @@ func (r *mockHopIterator) EncodeNextHop(w io.Writer) error { } for _, hop := range r.hops { - if err := encodeFwdInfo(w, &hop); err != nil { + fwdInfo := hop.ForwardingInfo() + if err := encodeFwdInfo(w, &fwdInfo); err != nil { return err } } @@ -334,6 +337,7 @@ var _ hop.Iterator = (*mockHopIterator)(nil) // encodes the failure and do not makes any onion obfuscation. type mockObfuscator struct { ogPacket *sphinx.OnionPacket + failure lnwire.FailureMessage } // NewMockObfuscator initializes a dummy mockObfuscator used for testing. @@ -366,6 +370,8 @@ func (o *mockObfuscator) Reextract( func (o *mockObfuscator) EncryptFirstHop(failure lnwire.FailureMessage) ( lnwire.OpaqueReason, error) { + o.failure = failure + var b bytes.Buffer if err := lnwire.EncodeFailure(&b, failure, 0); err != nil { return nil, err @@ -397,10 +403,7 @@ func (o *mockDeobfuscator) DecryptError(reason lnwire.OpaqueReason) (*Forwarding return nil, err } - return &ForwardingError{ - FailureSourceIdx: 1, - FailureMessage: failure, - }, nil + return NewForwardingError(failure, 1), nil } var _ ErrorDecrypter = (*mockDeobfuscator)(nil) @@ -431,14 +434,22 @@ func (p *mockIteratorDecoder) DecodeHopIterator(r io.Reader, rHash []byte, } hopLength := binary.BigEndian.Uint32(b[:]) - hops := make([]hop.ForwardingInfo, hopLength) + hops := make([]*hop.Payload, hopLength) for i := uint32(0); i < hopLength; i++ { - f := &hop.ForwardingInfo{} - if err := decodeFwdInfo(r, f); err != nil { + var f hop.ForwardingInfo + if err := decodeFwdInfo(r, &f); err != nil { return nil, lnwire.CodeTemporaryChannelFailure } - hops[i] = *f + var nextHopBytes [8]byte + binary.BigEndian.PutUint64(nextHopBytes[:], f.NextHop.ToUint64()) + + hops[i] = hop.NewLegacyPayload(&sphinx.HopData{ + Realm: [1]byte{}, // hop.BitcoinNetwork + NextAddress: nextHopBytes, + ForwardAmount: uint64(f.AmountToForward), + OutgoingCltv: f.OutgoingCTLV, + }) } return newMockHopIterator(hops...), lnwire.CodeNone @@ -593,15 +604,13 @@ func (s *mockServer) AddNewChannel(channel *channeldb.OpenChannel, return nil } -func (s *mockServer) WipeChannel(*wire.OutPoint) error { - return nil -} +func (s *mockServer) WipeChannel(*wire.OutPoint) {} -func (s *mockServer) LocalGlobalFeatures() *lnwire.FeatureVector { +func (s *mockServer) LocalFeatures() *lnwire.FeatureVector { return nil } -func (s *mockServer) RemoteGlobalFeatures() *lnwire.FeatureVector { +func (s *mockServer) RemoteFeatures() *lnwire.FeatureVector { return nil } @@ -637,7 +646,9 @@ type mockChannelLink struct { htlcID uint64 - htlcSatifiesPolicyLocalResult lnwire.FailureMessage + checkHtlcTransitResult *LinkError + + checkHtlcForwardResult *LinkError } // completeCircuit is a helper method for adding the finalized payment circuit @@ -696,16 +707,17 @@ func (f *mockChannelLink) HandleChannelUpdate(lnwire.Message) { func (f *mockChannelLink) UpdateForwardingPolicy(_ ForwardingPolicy) { } -func (f *mockChannelLink) HtlcSatifiesPolicy([32]byte, lnwire.MilliSatoshi, - lnwire.MilliSatoshi, uint32, uint32, uint32) lnwire.FailureMessage { - return nil +func (f *mockChannelLink) CheckHtlcForward([32]byte, lnwire.MilliSatoshi, + lnwire.MilliSatoshi, uint32, uint32, uint32) *LinkError { + + return f.checkHtlcForwardResult } -func (f *mockChannelLink) HtlcSatifiesPolicyLocal(payHash [32]byte, +func (f *mockChannelLink) CheckHtlcTransit(payHash [32]byte, amt lnwire.MilliSatoshi, timeout uint32, - heightNow uint32) lnwire.FailureMessage { + heightNow uint32) *LinkError { - return f.htlcSatifiesPolicyLocalResult + return f.checkHtlcTransitResult } func (f *mockChannelLink) Stats() (uint64, lnwire.MilliSatoshi, lnwire.MilliSatoshi) { @@ -777,9 +789,13 @@ func newMockRegistry(minDelta uint32) *mockInvoiceRegistry { panic(err) } - finalCltvRejectDelta := int32(5) - - registry := invoices.NewRegistry(cdb, finalCltvRejectDelta) + registry := invoices.NewRegistry( + cdb, + invoices.NewInvoiceExpiryWatcher(clock.NewDefaultClock()), + &invoices.RegistryConfig{ + FinalCltvRejectDelta: 5, + }, + ) registry.Start() return &mockInvoiceRegistry{ @@ -801,10 +817,11 @@ func (i *mockInvoiceRegistry) SettleHodlInvoice(preimage lntypes.Preimage) error func (i *mockInvoiceRegistry) NotifyExitHopHtlc(rhash lntypes.Hash, amt lnwire.MilliSatoshi, expiry uint32, currentHeight int32, circuitKey channeldb.CircuitKey, hodlChan chan<- interface{}, - eob []byte) (*invoices.HodlEvent, error) { + payload invoices.Payload) (invoices.HtlcResolution, error) { event, err := i.registry.NotifyExitHopHtlc( - rhash, amt, expiry, currentHeight, circuitKey, hodlChan, eob, + rhash, amt, expiry, currentHeight, circuitKey, hodlChan, + payload, ) if err != nil { return nil, err @@ -837,7 +854,9 @@ type mockSigner struct { key *btcec.PrivateKey } -func (m *mockSigner) SignOutputRaw(tx *wire.MsgTx, signDesc *input.SignDescriptor) ([]byte, error) { +func (m *mockSigner) SignOutputRaw(tx *wire.MsgTx, + signDesc *input.SignDescriptor) (input.Signature, error) { + amt := signDesc.Output.Value witnessScript := signDesc.WitnessScript privKey := m.key @@ -862,7 +881,7 @@ func (m *mockSigner) SignOutputRaw(tx *wire.MsgTx, signDesc *input.SignDescripto return nil, err } - return sig[:len(sig)-1], nil + return btcec.ParseDERSignature(sig[:len(sig)-1], btcec.S256()) } func (m *mockSigner) ComputeInputScript(tx *wire.MsgTx, signDesc *input.SignDescriptor) (*input.Script, error) { @@ -993,3 +1012,22 @@ func (m *mockOnionErrorDecryptor) DecryptError(encryptedData []byte) ( Message: m.message, }, m.err } + +var _ htlcNotifier = (*mockHTLCNotifier)(nil) + +type mockHTLCNotifier struct{} + +func (h *mockHTLCNotifier) NotifyForwardingEvent(key HtlcKey, info HtlcInfo, + eventType HtlcEventType) { +} + +func (h *mockHTLCNotifier) NotifyLinkFailEvent(key HtlcKey, info HtlcInfo, + eventType HtlcEventType, linkErr *LinkError, incoming bool) { +} + +func (h *mockHTLCNotifier) NotifyForwardingFailEvent(key HtlcKey, + eventType HtlcEventType) { +} + +func (h *mockHTLCNotifier) NotifySettleEvent(key HtlcKey, eventType HtlcEventType) { +} diff --git a/htlcswitch/packet.go b/htlcswitch/packet.go index 3e0816e6e1..e0aa752751 100644 --- a/htlcswitch/packet.go +++ b/htlcswitch/packet.go @@ -54,6 +54,11 @@ type htlcPacket struct { // encrypted with any shared secret. localFailure bool + // linkFailure is non-nil for htlcs that fail at our node. This may + // occur for our own payments which fail on the outgoing link, + // or for forwards which fail in the switch or on the outgoing link. + linkFailure *LinkError + // convertedError is set to true if this is an HTLC fail that was // created using an UpdateFailMalformedHTLC from the remote party. If // this is true, then when forwarding this failure packet, we'll need diff --git a/htlcswitch/payment_result.go b/htlcswitch/payment_result.go index faf15d84d6..b23dbe0a7e 100644 --- a/htlcswitch/payment_result.go +++ b/htlcswitch/payment_result.go @@ -7,8 +7,8 @@ import ( "io" "sync" - "github.com/coreos/bbolt" "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/multimutex" ) @@ -137,8 +137,8 @@ func (store *networkResultStore) storeResult(paymentID uint64, var paymentIDBytes [8]byte binary.BigEndian.PutUint64(paymentIDBytes[:], paymentID) - err := store.db.Batch(func(tx *bbolt.Tx) error { - networkResults, err := tx.CreateBucketIfNotExists( + err := kvdb.Batch(store.db.Backend, func(tx kvdb.RwTx) error { + networkResults, err := tx.CreateTopLevelBucket( networkResultStoreBucketKey, ) if err != nil { @@ -180,7 +180,7 @@ func (store *networkResultStore) subscribeResult(paymentID uint64) ( resultChan = make(chan *networkResult, 1) ) - err := store.db.View(func(tx *bbolt.Tx) error { + err := kvdb.View(store.db, func(tx kvdb.ReadTx) error { var err error result, err = fetchResult(tx, paymentID) switch { @@ -226,7 +226,7 @@ func (store *networkResultStore) getResult(pid uint64) ( *networkResult, error) { var result *networkResult - err := store.db.View(func(tx *bbolt.Tx) error { + err := kvdb.View(store.db, func(tx kvdb.ReadTx) error { var err error result, err = fetchResult(tx, pid) return err @@ -238,11 +238,11 @@ func (store *networkResultStore) getResult(pid uint64) ( return result, nil } -func fetchResult(tx *bbolt.Tx, pid uint64) (*networkResult, error) { +func fetchResult(tx kvdb.ReadTx, pid uint64) (*networkResult, error) { var paymentIDBytes [8]byte binary.BigEndian.PutUint64(paymentIDBytes[:], pid) - networkResults := tx.Bucket(networkResultStoreBucketKey) + networkResults := tx.ReadBucket(networkResultStoreBucketKey) if networkResults == nil { return nil, ErrPaymentIDNotFound } diff --git a/htlcswitch/queue.go b/htlcswitch/queue.go deleted file mode 100644 index 420d1f9b71..0000000000 --- a/htlcswitch/queue.go +++ /dev/null @@ -1,208 +0,0 @@ -package htlcswitch - -import ( - "sync" - "sync/atomic" - "time" - - "github.com/lightningnetwork/lnd/lnwire" -) - -// packetQueue is a goroutine-safe queue of htlc packets which over flow the -// current commitment transaction. An HTLC will overflow the current commitment -// transaction if one attempts to add a new HTLC to the state machine which -// already has the max number of pending HTLC's present on the commitment -// transaction. Packets are removed from the queue by the channelLink itself -// as additional slots become available on the commitment transaction itself. -// In order to synchronize properly we use a semaphore to allow the channelLink -// to signal the number of slots available, and a condition variable to allow -// the packetQueue to know when new items have been added to the queue. -type packetQueue struct { - // totalHtlcAmt is the sum of the value of all pending HTLC's currently - // residing within the overflow queue. This value should only read or - // modified *atomically*. - totalHtlcAmt int64 // To be used atomically. - - // queueLen is an internal counter that reflects the size of the queue - // at any given instance. This value is intended to be use atomically - // as this value is used by internal methods to obtain the length of - // the queue w/o grabbing the main lock. This allows callers to avoid a - // deadlock situation where the main goroutine is attempting a send - // with the lock held. - queueLen int32 // To be used atomically. - - streamShutdown int32 // To be used atomically. - - queue []*htlcPacket - - wg sync.WaitGroup - - // freeSlots serves as a semaphore who's current value signals the - // number of available slots on the commitment transaction. - freeSlots chan struct{} - - queueCond *sync.Cond - queueMtx sync.Mutex - - // outgoingPkts is a channel that the channelLink will receive on in - // order to drain the packetQueue as new slots become available on the - // commitment transaction. - outgoingPkts chan *htlcPacket - - quit chan struct{} -} - -// newPacketQueue returns a new instance of the packetQueue. The maxFreeSlots -// value should reflect the max number of HTLC's that we're allowed to have -// outstanding within the commitment transaction. -func newPacketQueue(maxFreeSlots int) *packetQueue { - p := &packetQueue{ - outgoingPkts: make(chan *htlcPacket), - freeSlots: make(chan struct{}, maxFreeSlots), - quit: make(chan struct{}), - } - p.queueCond = sync.NewCond(&p.queueMtx) - - return p -} - -// Start starts all goroutines that packetQueue needs to perform its normal -// duties. -func (p *packetQueue) Start() { - p.wg.Add(1) - go p.packetCoordinator() -} - -// Stop signals the packetQueue for a graceful shutdown, and waits for all -// goroutines to exit. -func (p *packetQueue) Stop() { - close(p.quit) - - // Now that we've closed the channel, we'll repeatedly signal the msg - // consumer until we've detected that it has exited. - for atomic.LoadInt32(&p.streamShutdown) == 0 { - p.queueCond.Signal() - time.Sleep(time.Millisecond * 100) - } -} - -// packetCoordinator is a goroutine that handles the packet overflow queue. -// Using a synchronized queue, outside callers are able to append to the end of -// the queue, waking up the coordinator when the queue transitions from empty -// to non-empty. The packetCoordinator will then aggressively try to empty out -// the queue, passing new htlcPackets to the channelLink as slots within the -// commitment transaction become available. -// -// Future iterations of the packetCoordinator will implement congestion -// avoidance logic in the face of persistent htlcPacket back-pressure. -// -// TODO(roasbeef): later will need to add back pressure handling heuristics -// like reg congestion avoidance: -// * random dropping, RED, etc -func (p *packetQueue) packetCoordinator() { - defer atomic.StoreInt32(&p.streamShutdown, 1) - - for { - // First, we'll check our condition. If the queue of packets is - // empty, then we'll wait until a new item is added. - p.queueCond.L.Lock() - for len(p.queue) == 0 { - p.queueCond.Wait() - - // If we were woke up in order to exit, then we'll do - // so. Otherwise, we'll check the message queue for any - // new items. - select { - case <-p.quit: - p.queueCond.L.Unlock() - return - default: - } - } - - nextPkt := p.queue[0] - - p.queueCond.L.Unlock() - - // If there aren't any further messages to sent (or the link - // didn't immediately read our message), then we'll block and - // wait for a new message to be sent into the overflow queue, - // or for the link's htlcForwarder to wake up. - select { - case <-p.freeSlots: - - select { - case p.outgoingPkts <- nextPkt: - // Pop the item off the front of the queue and - // slide down the reference one to re-position - // the head pointer. This will set us up for - // the next iteration. If the queue is empty - // at this point, then we'll block at the top. - p.queueCond.L.Lock() - p.queue[0] = nil - p.queue = p.queue[1:] - atomic.AddInt32(&p.queueLen, -1) - atomic.AddInt64(&p.totalHtlcAmt, int64(-nextPkt.amount)) - p.queueCond.L.Unlock() - case <-p.quit: - return - } - - case <-p.quit: - return - - default: - } - } -} - -// AddPkt adds the referenced packet to the overflow queue, preserving ordering -// of the existing items. -func (p *packetQueue) AddPkt(pkt *htlcPacket) { - // First, we'll lock the condition, and add the message to the end of - // the message queue, and increment the internal atomic for tracking - // the queue's length. - p.queueCond.L.Lock() - p.queue = append(p.queue, pkt) - atomic.AddInt32(&p.queueLen, 1) - atomic.AddInt64(&p.totalHtlcAmt, int64(pkt.amount)) - p.queueCond.L.Unlock() - - // With the message added, we signal to the msgConsumer that there are - // additional messages to consume. - p.queueCond.Signal() -} - -// SignalFreeSlot signals to the queue that a new slot has opened up within the -// commitment transaction. The max amount of free slots has been defined when -// initially creating the packetQueue itself. This method, combined with AddPkt -// creates the following abstraction: a synchronized queue of infinite length -// which can be added to at will, which flows onto a commitment of fixed -// capacity. -func (p *packetQueue) SignalFreeSlot() { - // We'll only send over a free slot signal if the queue *is not* empty. - // Otherwise, it's possible that we attempt to overfill the free slots - // semaphore and block indefinitely below. - if atomic.LoadInt32(&p.queueLen) == 0 { - return - } - - select { - case p.freeSlots <- struct{}{}: - case <-p.quit: - return - } -} - -// Length returns the number of pending htlc packets present within the over -// flow queue. -func (p *packetQueue) Length() int32 { - return atomic.LoadInt32(&p.queueLen) -} - -// TotalHtlcAmount is the total amount (in mSAT) of all HTLC's currently -// residing within the overflow queue. -func (p *packetQueue) TotalHtlcAmount() lnwire.MilliSatoshi { - // TODO(roasbeef): also factor in fee rate? - return lnwire.MilliSatoshi(atomic.LoadInt64(&p.totalHtlcAmt)) -} diff --git a/htlcswitch/queue_test.go b/htlcswitch/queue_test.go deleted file mode 100644 index 92d3116423..0000000000 --- a/htlcswitch/queue_test.go +++ /dev/null @@ -1,64 +0,0 @@ -package htlcswitch - -import ( - "reflect" - "testing" - "time" - - "github.com/lightningnetwork/lnd/lnwire" -) - -// TestWaitingQueueThreadSafety test the thread safety properties of the -// waiting queue, by executing methods in separate goroutines which operates -// with the same data. -func TestWaitingQueueThreadSafety(t *testing.T) { - t.Parallel() - - const numPkts = 1000 - - q := newPacketQueue(numPkts) - q.Start() - defer q.Stop() - - a := make([]uint64, numPkts) - for i := 0; i < numPkts; i++ { - a[i] = uint64(i) - q.AddPkt(&htlcPacket{ - incomingHTLCID: a[i], - htlc: &lnwire.UpdateAddHTLC{}, - }) - } - - // The reported length of the queue should be the exact number of - // packets we added above. - queueLength := q.Length() - if queueLength != numPkts { - t.Fatalf("queue has wrong length: expected %v, got %v", numPkts, - queueLength) - } - - var b []uint64 - for i := 0; i < numPkts; i++ { - q.SignalFreeSlot() - - select { - case packet := <-q.outgoingPkts: - b = append(b, packet.incomingHTLCID) - - case <-time.After(2 * time.Second): - t.Fatal("timeout") - } - } - - // The length of the queue should be zero at this point. - time.Sleep(time.Millisecond * 50) - queueLength = q.Length() - if queueLength != 0 { - t.Fatalf("queue has wrong length: expected %v, got %v", 0, - queueLength) - } - - if !reflect.DeepEqual(b, a) { - t.Fatal("wrong order of the objects") - } -} diff --git a/htlcswitch/sequencer.go b/htlcswitch/sequencer.go index 3a5247db14..5b1526b6bd 100644 --- a/htlcswitch/sequencer.go +++ b/htlcswitch/sequencer.go @@ -3,9 +3,9 @@ package htlcswitch import ( "sync" - "github.com/coreos/bbolt" "github.com/go-errors/errors" "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/channeldb/kvdb" ) // defaultSequenceBatchSize specifies the window of sequence numbers that are @@ -87,8 +87,8 @@ func (s *persistentSequencer) NextID() (uint64, error) { // allocated will start from the last known tip on disk, which is fine // as we only require uniqueness of the allocated numbers. var nextHorizonID uint64 - if err := s.db.Update(func(tx *bbolt.Tx) error { - nextIDBkt := tx.Bucket(nextPaymentIDKey) + if err := kvdb.Update(s.db, func(tx kvdb.RwTx) error { + nextIDBkt := tx.ReadWriteBucket(nextPaymentIDKey) if nextIDBkt == nil { return ErrSequencerCorrupted } @@ -121,8 +121,8 @@ func (s *persistentSequencer) NextID() (uint64, error) { // initDB populates the bucket used to generate payment sequence numbers. func (s *persistentSequencer) initDB() error { - return s.db.Update(func(tx *bbolt.Tx) error { - _, err := tx.CreateBucketIfNotExists(nextPaymentIDKey) + return kvdb.Update(s.db, func(tx kvdb.RwTx) error { + _, err := tx.CreateTopLevelBucket(nextPaymentIDKey) return err }) } diff --git a/htlcswitch/switch.go b/htlcswitch/switch.go index e2c38d1bd3..06c597bf60 100644 --- a/htlcswitch/switch.go +++ b/htlcswitch/switch.go @@ -9,15 +9,18 @@ import ( "time" "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btclog" "github.com/btcsuite/btcutil" - "github.com/coreos/bbolt" "github.com/davecgh/go-spew/spew" "github.com/lightningnetwork/lnd/chainntnfs" "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/channeldb/kvdb" + "github.com/lightningnetwork/lnd/clock" "github.com/lightningnetwork/lnd/contractcourt" "github.com/lightningnetwork/lnd/htlcswitch/hop" "github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lnwallet" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/ticker" ) @@ -34,6 +37,10 @@ const ( // DefaultAckInterval is the duration between attempts to ack any settle // fails in a forwarding package. DefaultAckInterval = 15 * time.Second + + // DefaultHTLCExpiry is the duration after which Adds will be cancelled + // if they could not get added to an outgoing commitment. + DefaultHTLCExpiry = time.Minute ) var ( @@ -44,11 +51,6 @@ var ( // through the switch and is locked into another commitment txn. ErrDuplicateAdd = errors.New("duplicate add HTLC detected") - // ErrIncompleteForward is used when an htlc was already forwarded - // through the switch, but did not get locked into another commitment - // txn. - ErrIncompleteForward = errors.New("incomplete forward detected") - // ErrUnknownErrorDecryptor signals that we were unable to locate the // error decryptor for this payment. This is likely due to restarting // the daemon. @@ -102,7 +104,10 @@ type ChanClose struct { // This value is only utilized if the closure type is CloseRegular. // This will be the starting offered fee when the fee negotiation // process for the cooperative closure transaction kicks off. - TargetFeePerKw lnwallet.SatPerKWeight + TargetFeePerKw chainfee.SatPerKWeight + + // DeliveryScript is an optional delivery script to pay funds out to. + DeliveryScript lnwire.DeliveryAddress // Updates is used by request creator to receive the notifications about // execution of the close channel request. @@ -151,6 +156,10 @@ type Config struct { // the switch when a new block has arrived. Notifier chainntnfs.ChainNotifier + // HtlcNotifier is an instance of a htlcNotifier which we will pipe htlc + // events through. + HtlcNotifier htlcNotifier + // FwdEventTicker is a signal that instructs the htlcswitch to flush any // pending forwarding events. FwdEventTicker ticker.Ticker @@ -163,9 +172,22 @@ type Config struct { // fails in forwarding packages. AckEventTicker ticker.Ticker + // AllowCircularRoute is true if the user has configured their node to + // allow forwards that arrive and depart our node over the same channel. + AllowCircularRoute bool + // RejectHTLC is a flag that instructs the htlcswitch to reject any // HTLCs that are not from the source hop. RejectHTLC bool + + // Clock is a time source for the switch. + Clock clock.Clock + + // HTLCExpiry is the interval after which Adds will be cancelled if they + // have not been yet been delivered to a link. The computed deadline + // will expiry this long after the Adds are added to a mailbox via + // AddPacket. + HTLCExpiry time.Duration } // Switch is the central messaging bus for all incoming/outgoing HTLCs. @@ -275,12 +297,11 @@ func New(cfg Config, currentHeight uint32) (*Switch, error) { return nil, err } - return &Switch{ + s := &Switch{ bestHeight: currentHeight, cfg: &cfg, circuits: circuitMap, linkIndex: make(map[lnwire.ChannelID]ChannelLink), - mailOrchestrator: newMailOrchestrator(), forwardingIndex: make(map[lnwire.ShortChannelID]ChannelLink), interfaceIndex: make(map[[33]byte]map[lnwire.ChannelID]ChannelLink), pendingLinkIndex: make(map[lnwire.ChannelID]ChannelLink), @@ -289,7 +310,16 @@ func New(cfg Config, currentHeight uint32) (*Switch, error) { chanCloseRequests: make(chan *ChanClose), resolutionMsgs: make(chan *resolutionMsg), quit: make(chan struct{}), - }, nil + } + + s.mailOrchestrator = newMailOrchestrator(&mailOrchConfig{ + fetchUpdate: s.cfg.FetchLastChannelUpdate, + forwardPackets: s.ForwardPackets, + clock: s.cfg.Clock, + expiry: s.cfg.HTLCExpiry, + }) + + return s, nil } // resolutionMsg is a struct that wraps an existing ResolutionMsg with a done @@ -456,6 +486,18 @@ func (s *Switch) UpdateForwardingPolicies( s.indexMtx.RUnlock() } +// IsForwardedHTLC checks for a given channel and htlc index if it is related +// to an opened circuit that represents a forwarded payment. +func (s *Switch) IsForwardedHTLC(chanID lnwire.ShortChannelID, + htlcIndex uint64) bool { + + circuit := s.circuits.LookupOpenCircuit(channeldb.CircuitKey{ + ChanID: chanID, + HtlcID: htlcIndex, + }) + return circuit != nil && circuit.Incoming.ChanID != hop.Source +} + // forward is used in order to find next channel link and apply htlc update. // Also this function is used by channel links itself in order to forward the // update after it has been included in the channel. @@ -488,9 +530,12 @@ func (s *Switch) forward(packet *htlcPacket) error { } else { failure = lnwire.NewTemporaryChannelFailure(update) } - addErr := ErrIncompleteForward - return s.failAddPacket(packet, failure, addErr) + linkError := NewDetailedLinkError( + failure, OutgoingFailureIncompleteForward, + ) + + return s.failAddPacket(packet, linkError) } packet.circuit = circuit @@ -639,14 +684,14 @@ func (s *Switch) ForwardPackets(linkQuit chan struct{}, } else { failure = lnwire.NewTemporaryChannelFailure(update) } + linkError := NewDetailedLinkError( + failure, OutgoingFailureIncompleteForward, + ) for _, packet := range failedPackets { - addErr := errors.New("failing packet after " + - "detecting incomplete forward") - // We don't handle the error here since this method // always returns an error. - s.failAddPacket(packet, failure, addErr) + _ = s.failAddPacket(packet, linkError) } } @@ -746,64 +791,23 @@ func (s *Switch) handleLocalDispatch(pkt *htlcPacket) error { // User have created the htlc update therefore we should find the // appropriate channel link and send the payment over this link. if htlc, ok := pkt.htlc.(*lnwire.UpdateAddHTLC); ok { - // Try to find links by node destination. - s.indexMtx.RLock() - link, err := s.getLinkByShortID(pkt.outgoingChanID) - s.indexMtx.RUnlock() + link, err := s.handleLocalAddHTLC(pkt, htlc) if err != nil { - log.Errorf("Link %v not found", pkt.outgoingChanID) - return &ForwardingError{ - FailureSourceIdx: 0, - FailureMessage: &lnwire.FailUnknownNextPeer{}, - } - } - - if !link.EligibleToForward() { - err := fmt.Errorf("Link %v is not available to forward", - pkt.outgoingChanID) - log.Error(err) - - // The update does not need to be populated as the error - // will be returned back to the router. - htlcErr := lnwire.NewTemporaryChannelFailure(nil) - return &ForwardingError{ - FailureSourceIdx: 0, - ExtraMsg: err.Error(), - FailureMessage: htlcErr, - } - } - - // Ensure that the htlc satisfies the outgoing channel policy. - currentHeight := atomic.LoadUint32(&s.bestHeight) - htlcErr := link.HtlcSatifiesPolicyLocal( - htlc.PaymentHash, - htlc.Amount, - htlc.Expiry, currentHeight, - ) - if htlcErr != nil { - log.Errorf("Link %v policy for local forward not "+ - "satisfied", pkt.outgoingChanID) - - return &ForwardingError{ - FailureSourceIdx: 0, - FailureMessage: htlcErr, - } - } - - if link.Bandwidth() < htlc.Amount { - err := fmt.Errorf("Link %v has insufficient capacity: "+ - "need %v, has %v", pkt.outgoingChanID, - htlc.Amount, link.Bandwidth()) - log.Error(err) + // Notify the htlc notifier of a link failure on our + // outgoing link. Incoming timelock/amount values are + // not set because they are not present for local sends. + s.cfg.HtlcNotifier.NotifyLinkFailEvent( + newHtlcKey(pkt), + HtlcInfo{ + OutgoingTimeLock: htlc.Expiry, + OutgoingAmt: htlc.Amount, + }, + HtlcEventTypeSend, + err, + false, + ) - // The update does not need to be populated as the error - // will be returned back to the router. - htlcErr := lnwire.NewTemporaryChannelFailure(nil) - return &ForwardingError{ - FailureSourceIdx: 0, - ExtraMsg: err.Error(), - FailureMessage: htlcErr, - } + return err } return link.HandleSwitchPacket(pkt) @@ -815,6 +819,47 @@ func (s *Switch) handleLocalDispatch(pkt *htlcPacket) error { return nil } +// handleLocalAddHTLC handles the addition of a htlc for a send that +// originates from our node. It returns the link that the htlc should +// be forwarded outwards on, and a link error if the htlc cannot be +// forwarded. +func (s *Switch) handleLocalAddHTLC(pkt *htlcPacket, + htlc *lnwire.UpdateAddHTLC) (ChannelLink, *LinkError) { + + // Try to find links by node destination. + s.indexMtx.RLock() + link, err := s.getLinkByShortID(pkt.outgoingChanID) + s.indexMtx.RUnlock() + if err != nil { + log.Errorf("Link %v not found", pkt.outgoingChanID) + return nil, NewLinkError(&lnwire.FailUnknownNextPeer{}) + } + + if !link.EligibleToForward() { + log.Errorf("Link %v is not available to forward", + pkt.outgoingChanID) + + // The update does not need to be populated as the error + // will be returned back to the router. + return nil, NewDetailedLinkError( + lnwire.NewTemporaryChannelFailure(nil), + OutgoingFailureLinkNotEligible, + ) + } + + // Ensure that the htlc satisfies the outgoing channel policy. + currentHeight := atomic.LoadUint32(&s.bestHeight) + htlcErr := link.CheckHtlcTransit( + htlc.PaymentHash, htlc.Amount, htlc.Expiry, currentHeight, + ) + if htlcErr != nil { + log.Errorf("Link %v policy for local forward not "+ + "satisfied", pkt.outgoingChanID) + return nil, htlcErr + } + return link, nil +} + // handleLocalResponse processes a Settle or Fail responding to a // locally-initiated payment. This is handled asynchronously to avoid blocking // the main event loop within the switch, as these operations can require @@ -876,6 +921,18 @@ func (s *Switch) handleLocalResponse(pkt *htlcPacket) { pkt.inKey(), err) return } + + // Finally, notify on the htlc failure or success that has been handled. + key := newHtlcKey(pkt) + eventType := getEventType(pkt) + + switch pkt.htlc.(type) { + case *lnwire.UpdateFulfillHTLC: + s.cfg.HtlcNotifier.NotifySettleEvent(key, eventType) + + case *lnwire.UpdateFailHTLC: + s.cfg.HtlcNotifier.NotifyForwardingFailEvent(key, eventType) + } } // extractResult uses the given deobfuscator to extract the payment result from @@ -927,41 +984,44 @@ func (s *Switch) parseFailedPayment(deobfuscator ErrorDecrypter, // decrypt the error, simply decode it them report back to the // user. case unencrypted: - var userErr string r := bytes.NewReader(htlc.Reason) failureMsg, err := lnwire.DecodeFailure(r, 0) if err != nil { - userErr = fmt.Sprintf("unable to decode onion "+ - "failure (hash=%v, pid=%d): %v", + // If we could not decode the failure reason, return a link + // error indicating that we failed to decode the onion. + linkError := NewDetailedLinkError( + // As this didn't even clear the link, we don't + // need to apply an update here since it goes + // directly to the router. + lnwire.NewTemporaryChannelFailure(nil), + OutgoingFailureDecodeError, + ) + + log.Errorf("%v: (hash=%v, pid=%d): %v", + linkError.FailureDetail.FailureString(), paymentHash, paymentID, err) - log.Error(userErr) - // As this didn't even clear the link, we don't need to - // apply an update here since it goes directly to the - // router. - failureMsg = lnwire.NewTemporaryChannelFailure(nil) + return linkError } - return &ForwardingError{ - FailureSourceIdx: 0, - ExtraMsg: userErr, - FailureMessage: failureMsg, - } + // If we successfully decoded the failure reason, return it. + return NewLinkError(failureMsg) // A payment had to be timed out on chain before it got past // the first hop. In this case, we'll report a permanent // channel failure as this means us, or the remote party had to // go on chain. case isResolution && htlc.Reason == nil: - userErr := fmt.Sprintf("payment was resolved "+ - "on-chain, then canceled back (hash=%v, pid=%d)", + linkError := NewDetailedLinkError( + &lnwire.FailPermanentChannelFailure{}, + OutgoingFailureOnChainTimeout, + ) + + log.Info("%v: hash=%v, pid=%d", + linkError.FailureDetail.FailureString(), paymentHash, paymentID) - return &ForwardingError{ - FailureSourceIdx: 0, - ExtraMsg: userErr, - FailureMessage: &lnwire.FailPermanentChannelFailure{}, - } + return linkError // A regular multi-hop payment error that we'll need to // decrypt. @@ -994,10 +1054,12 @@ func (s *Switch) handlePacketForward(packet *htlcPacket) error { // Check if the node is set to reject all onward HTLCs and also make // sure that HTLC is not from the source node. if s.cfg.RejectHTLC && packet.incomingChanID != hop.Source { - failure := &lnwire.FailChannelDisabled{} - addErr := fmt.Errorf("unable to forward any htlcs") + failure := NewDetailedLinkError( + &lnwire.FailChannelDisabled{}, + OutgoingFailureForwardsDisabled, + ) - return s.failAddPacket(packet, failure, addErr) + return s.failAddPacket(packet, failure) } if packet.incomingChanID == hop.Source { @@ -1006,19 +1068,36 @@ func (s *Switch) handlePacketForward(packet *htlcPacket) error { return s.handleLocalDispatch(packet) } + // Before we attempt to find a non-strict forwarding path for + // this htlc, check whether the htlc is being routed over the + // same incoming and outgoing channel. If our node does not + // allow forwards of this nature, we fail the htlc early. This + // check is in place to disallow inefficiently routed htlcs from + // locking up our balance. + linkErr := checkCircularForward( + packet.incomingChanID, packet.outgoingChanID, + s.cfg.AllowCircularRoute, htlc.PaymentHash, + ) + if linkErr != nil { + return s.failAddPacket(packet, linkErr) + } + s.indexMtx.RLock() targetLink, err := s.getLinkByShortID(packet.outgoingChanID) if err != nil { s.indexMtx.RUnlock() + log.Debugf("unable to find link with "+ + "destination %v", packet.outgoingChanID) + // If packet was forwarded from another channel link // than we should notify this link that some error // occurred. - failure := &lnwire.FailUnknownNextPeer{} - addErr := fmt.Errorf("unable to find link with "+ - "destination %v", packet.outgoingChanID) + linkError := NewLinkError( + &lnwire.FailUnknownNextPeer{}, + ) - return s.failAddPacket(packet, failure, addErr) + return s.failAddPacket(packet, linkError) } targetPeerKey := targetLink.Peer().PubKey() interfaceLinks, _ := s.getLinks(targetPeerKey) @@ -1028,75 +1107,47 @@ func (s *Switch) handlePacketForward(packet *htlcPacket) error { // selection process. This way we can return the error for // precise link that the sender selected, while optimistically // trying all links to utilize our available bandwidth. - linkErrs := make(map[lnwire.ShortChannelID]lnwire.FailureMessage) + linkErrs := make(map[lnwire.ShortChannelID]*LinkError) // Try to find destination channel link with appropriate // bandwidth. var destination ChannelLink for _, link := range interfaceLinks { + var failure *LinkError + // We'll skip any links that aren't yet eligible for // forwarding. - switch { - case !link.EligibleToForward(): - continue - - // If the link doesn't yet have a source chan ID, then - // we'll skip it as well. - case link.ShortChanID() == hop.Source: - continue - } - - // Before we check the link's bandwidth, we'll ensure - // that the HTLC satisfies the current forwarding - // policy of this target link. - currentHeight := atomic.LoadUint32(&s.bestHeight) - err := link.HtlcSatifiesPolicy( - htlc.PaymentHash, packet.incomingAmount, - packet.amount, packet.incomingTimeout, - packet.outgoingTimeout, currentHeight, - ) - if err != nil { - linkErrs[link.ShortChanID()] = err - continue + if !link.EligibleToForward() { + failure = NewDetailedLinkError( + &lnwire.FailUnknownNextPeer{}, + OutgoingFailureLinkNotEligible, + ) + } else { + // We'll ensure that the HTLC satisfies the + // current forwarding conditions of this target + // link. + currentHeight := atomic.LoadUint32(&s.bestHeight) + failure = link.CheckHtlcForward( + htlc.PaymentHash, packet.incomingAmount, + packet.amount, packet.incomingTimeout, + packet.outgoingTimeout, currentHeight, + ) } - if link.Bandwidth() >= htlc.Amount { + // Stop searching if this link can forward the htlc. + if failure == nil { destination = link - break } - } - switch { - // If the channel link we're attempting to forward the update - // over has insufficient capacity, and didn't violate any - // forwarding policies, then we'll cancel the htlc as the - // payment cannot succeed. - case destination == nil && len(linkErrs) == 0: - // If packet was forwarded from another channel link - // than we should notify this link that some error - // occurred. - var failure lnwire.FailureMessage - update, err := s.cfg.FetchLastChannelUpdate( - packet.outgoingChanID, - ) - if err != nil { - failure = &lnwire.FailTemporaryNodeFailure{} - } else { - failure = lnwire.NewTemporaryChannelFailure(update) - } - - addErr := fmt.Errorf("unable to find appropriate "+ - "channel link insufficient capacity, need "+ - "%v towards node=%x", htlc.Amount, targetPeerKey) - - return s.failAddPacket(packet, failure, addErr) + linkErrs[link.ShortChanID()] = failure + } // If we had a forwarding failure due to the HTLC not // satisfying the current policy, then we'll send back an // error, but ensure we send back the error sourced at the // *target* link. - case destination == nil && len(linkErrs) != 0: + if destination == nil { // At this point, some or all of the links rejected the // HTLC so we couldn't forward it. So we'll try to look // up the error that came from the source. @@ -1105,7 +1156,9 @@ func (s *Switch) handlePacketForward(packet *htlcPacket) error { // If we can't find the error of the source, // then we'll return an unknown next peer, // though this should never happen. - linkErr = &lnwire.FailUnknownNextPeer{} + linkErr = NewLinkError( + &lnwire.FailUnknownNextPeer{}, + ) log.Warnf("unable to find err source for "+ "outgoing_link=%v, errors=%v", packet.outgoingChanID, newLogClosure(func() string { @@ -1113,12 +1166,12 @@ func (s *Switch) handlePacketForward(packet *htlcPacket) error { })) } - addErr := fmt.Errorf("incoming HTLC(%x) violated "+ + log.Tracef("incoming HTLC(%x) violated "+ "target outgoing link (id=%v) policy: %v", htlc.PaymentHash[:], packet.outgoingChanID, linkErr) - return s.failAddPacket(packet, linkErr, addErr) + return s.failAddPacket(packet, linkErr) } // Send the packet to the destination channel link which @@ -1134,6 +1187,12 @@ func (s *Switch) handlePacketForward(packet *htlcPacket) error { return err } + // closeCircuit returns a nil circuit when a settle packet returns an + // ErrUnknownCircuit error upon the inner call to CloseCircuit. + if circuit == nil { + return nil + } + fail, isFail := htlc.(*lnwire.UpdateFailHTLC) if isFail && !packet.hasSource { switch { @@ -1216,16 +1275,45 @@ func (s *Switch) handlePacketForward(packet *htlcPacket) error { } } +// checkCircularForward checks whether a forward is circular (arrives and +// departs on the same link) and returns a link error if the switch is +// configured to disallow this behaviour. +func checkCircularForward(incoming, outgoing lnwire.ShortChannelID, + allowCircular bool, paymentHash lntypes.Hash) *LinkError { + + // If the route is not circular we do not need to perform any further + // checks. + if incoming != outgoing { + return nil + } + + // If the incoming and outgoing link are equal, the htlc is part of a + // circular route which may be used to lock up our liquidity. If the + // switch is configured to allow circular routes, log that we are + // allowing the route then return nil. + if allowCircular { + log.Debugf("allowing circular route over link: %v "+ + "(payment hash: %x)", incoming, paymentHash) + return nil + } + + // If our node disallows circular routes, return a temporary channel + // failure. There is nothing wrong with the policy used by the remote + // node, so we do not include a channel update. + return NewDetailedLinkError( + lnwire.NewTemporaryChannelFailure(nil), + OutgoingFailureCircularRoute, + ) +} + // failAddPacket encrypts a fail packet back to an add packet's source. // The ciphertext will be derived from the failure message proivded by context. // This method returns the failErr if all other steps complete successfully. -func (s *Switch) failAddPacket(packet *htlcPacket, - failure lnwire.FailureMessage, failErr error) error { - +func (s *Switch) failAddPacket(packet *htlcPacket, failure *LinkError) error { // Encrypt the failure so that the sender will be able to read the error // message. Since we failed this packet, we use EncryptFirstHop to // obfuscate the failure for their eyes only. - reason, err := packet.obfuscator.EncryptFirstHop(failure) + reason, err := packet.obfuscator.EncryptFirstHop(failure.WireMessage()) if err != nil { err := fmt.Errorf("unable to obfuscate "+ "error: %v", err) @@ -1233,13 +1321,23 @@ func (s *Switch) failAddPacket(packet *htlcPacket, return err } - log.Error(failErr) + log.Error(failure.Error()) + // Create a failure packet for this htlc. The the full set of + // information about the htlc failure is included so that they can + // be included in link failure notifications. failPkt := &htlcPacket{ - sourceRef: packet.sourceRef, - incomingChanID: packet.incomingChanID, - incomingHTLCID: packet.incomingHTLCID, - circuit: packet.circuit, + sourceRef: packet.sourceRef, + incomingChanID: packet.incomingChanID, + incomingHTLCID: packet.incomingHTLCID, + outgoingChanID: packet.outgoingChanID, + outgoingHTLCID: packet.outgoingHTLCID, + incomingAmount: packet.incomingAmount, + amount: packet.amount, + incomingTimeout: packet.incomingTimeout, + outgoingTimeout: packet.outgoingTimeout, + circuit: packet.circuit, + linkFailure: failure, htlc: &lnwire.UpdateFailHTLC{ Reason: reason, }, @@ -1255,7 +1353,7 @@ func (s *Switch) failAddPacket(packet *htlcPacket, return err } - return failErr + return failure } // closeCircuit accepts a settle or fail htlc and the associated htlc packet and @@ -1325,19 +1423,28 @@ func (s *Switch) closeCircuit(pkt *htlcPacket) (*PaymentCircuit, error) { // Failed to close circuit because it does not exist. This is likely // because the circuit was already successfully closed. case ErrUnknownCircuit: - err := fmt.Errorf("Unable to find target channel "+ - "for HTLC settle/fail: channel ID = %s, "+ - "HTLC ID = %d", pkt.outgoingChanID, - pkt.outgoingHTLCID) - log.Error(err) - if pkt.destRef != nil { // Add this SettleFailRef to the set of pending settle/fail entries // awaiting acknowledgement. s.pendingSettleFails = append(s.pendingSettleFails, *pkt.destRef) } - return nil, err + // If this is a settle, we will not log an error message as settles + // are expected to hit the ErrUnknownCircuit case. The only way fails + // can hit this case if the link restarts after having just sent a fail + // to the switch. + _, isSettle := pkt.htlc.(*lnwire.UpdateFulfillHTLC) + if !isSettle { + err := fmt.Errorf("unable to find target channel "+ + "for HTLC fail: channel ID = %s, "+ + "HTLC ID = %d", pkt.outgoingChanID, + pkt.outgoingHTLCID) + log.Error(err) + + return nil, err + } + + return nil, nil // Unexpected error. default: @@ -1350,7 +1457,7 @@ func (s *Switch) closeCircuit(pkt *htlcPacket) (*PaymentCircuit, error) { // we're the originator of the payment, so the link stops attempting to // re-broadcast. func (s *Switch) ackSettleFail(settleFailRefs ...channeldb.SettleFailRef) error { - return s.cfg.DB.Batch(func(tx *bbolt.Tx) error { + return kvdb.Batch(s.cfg.DB.Backend, func(tx kvdb.RwTx) error { return s.cfg.SwitchPackager.AckSettleFails(tx, settleFailRefs...) }) } @@ -1411,12 +1518,13 @@ func (s *Switch) teardownCircuit(pkt *htlcPacket) error { } // CloseLink creates and sends the close channel command to the target link -// directing the specified closure type. If the closure type if CloseRegular, -// then the last parameter should be the ideal fee-per-kw that will be used as -// a starting point for close negotiation. -func (s *Switch) CloseLink(chanPoint *wire.OutPoint, closeType ChannelCloseType, - targetFeePerKw lnwallet.SatPerKWeight) (chan interface{}, - chan error) { +// directing the specified closure type. If the closure type is CloseRegular, +// targetFeePerKw parameter should be the ideal fee-per-kw that will be used as +// a starting point for close negotiation. The deliveryScript parameter is an +// optional parameter which sets a user specified script to close out to. +func (s *Switch) CloseLink(chanPoint *wire.OutPoint, + closeType ChannelCloseType, targetFeePerKw chainfee.SatPerKWeight, + deliveryScript lnwire.DeliveryAddress) (chan interface{}, chan error) { // TODO(roasbeef) abstract out the close updates. updateChan := make(chan interface{}, 2) @@ -1427,6 +1535,7 @@ func (s *Switch) CloseLink(chanPoint *wire.OutPoint, closeType ChannelCloseType, ChanPoint: chanPoint, Updates: updateChan, TargetFeePerKw: targetFeePerKw, + DeliveryScript: deliveryScript, Err: errChan, } @@ -1794,7 +1903,7 @@ func (s *Switch) reforwardResponses() error { func (s *Switch) loadChannelFwdPkgs(source lnwire.ShortChannelID) ([]*channeldb.FwdPkg, error) { var fwdPkgs []*channeldb.FwdPkg - if err := s.cfg.DB.Update(func(tx *bbolt.Tx) error { + if err := kvdb.Update(s.cfg.DB, func(tx kvdb.RwTx) error { var err error fwdPkgs, err = s.cfg.SwitchPackager.LoadChannelFwdPkgs( tx, source, @@ -1861,8 +1970,11 @@ func (s *Switch) reforwardSettleFails(fwdPkgs []*channeldb.FwdPkg) { // commitment state, so we'll forward this to the switch so the // backwards undo can continue. case lnwallet.Fail: - // Fetch the reason the HTLC was canceled so we can - // continue to propagate it. + // Fetch the reason the HTLC was canceled so + // we can continue to propagate it. This + // failure originated from another node, so + // the linkFailure field is not set on this + // packet. failPacket := &htlcPacket{ outgoingChanID: fwdPkg.Source, outgoingHTLCID: pd.ParentIndex, @@ -1883,13 +1995,13 @@ func (s *Switch) reforwardSettleFails(fwdPkgs []*channeldb.FwdPkg) { // link quit channel, meaning the send will fail only if the // switch receives a shutdown request. errChan := s.ForwardPackets(nil, switchPackets...) - go handleBatchFwdErrs(errChan) + go handleBatchFwdErrs(errChan, log) } } // handleBatchFwdErrs waits on the given errChan until it is closed, logging the // errors returned from any unsuccessful forwarding attempts. -func handleBatchFwdErrs(errChan chan error) { +func handleBatchFwdErrs(errChan chan error, l btclog.Logger) { for { err, ok := <-errChan if !ok { @@ -1902,7 +2014,7 @@ func handleBatchFwdErrs(errChan chan error) { continue } - log.Errorf("unhandled error while reforwarding htlc "+ + l.Errorf("Unhandled error while reforwarding htlc "+ "settle/fail over htlcswitch: %v", err) } } @@ -1947,7 +2059,8 @@ func (s *Switch) AddLink(link ChannelLink) error { // Get and attach the mailbox for this link, which buffers packets in // case there packets that we tried to deliver while this link was // offline. - mailbox := s.mailOrchestrator.GetOrCreateMailBox(chanID) + shortChanID := link.ShortChanID() + mailbox := s.mailOrchestrator.GetOrCreateMailBox(chanID, shortChanID) link.AttachMailBox(mailbox) if err := link.Start(); err != nil { @@ -1955,7 +2068,6 @@ func (s *Switch) AddLink(link ChannelLink) error { return err } - shortChanID := link.ShortChanID() if shortChanID == hop.Source { log.Infof("Adding pending link chan_id=%v, short_chan_id=%v", chanID, shortChanID) @@ -2127,7 +2239,7 @@ func (s *Switch) UpdateShortChanID(chanID lnwire.ChannelID) error { // Finally, alert the mail orchestrator to the change of short channel // ID, and deliver any unclaimed packets to the link. - mailbox := s.mailOrchestrator.GetOrCreateMailBox(chanID) + mailbox := s.mailOrchestrator.GetOrCreateMailBox(chanID, shortChanID) s.mailOrchestrator.BindLiveShortChanID( mailbox, chanID, shortChanID, ) diff --git a/htlcswitch/switch_test.go b/htlcswitch/switch_test.go index b0a53eb3a1..de9059615e 100644 --- a/htlcswitch/switch_test.go +++ b/htlcswitch/switch_test.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "io/ioutil" + "reflect" "testing" "time" @@ -13,11 +14,14 @@ import ( "github.com/btcsuite/fastsha256" "github.com/davecgh/go-spew/spew" "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/htlcswitch/hop" "github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/ticker" ) +var zeroCircuit = channeldb.CircuitKey{} + func genPreimage() ([32]byte, error) { var preimage [32]byte if _, err := io.ReadFull(rand.Reader, preimage[:]); err != nil { @@ -32,7 +36,9 @@ func genPreimage() ([32]byte, error) { func TestSwitchAddDuplicateLink(t *testing.T) { t.Parallel() - alicePeer, err := newMockServer(t, "alice", testStartingHeight, nil, 6) + alicePeer, err := newMockServer( + t, "alice", testStartingHeight, nil, testDefaultDelta, + ) if err != nil { t.Fatalf("unable to create alice server: %v", err) } @@ -90,7 +96,9 @@ func TestSwitchAddDuplicateLink(t *testing.T) { func TestSwitchHasActiveLink(t *testing.T) { t.Parallel() - alicePeer, err := newMockServer(t, "alice", testStartingHeight, nil, 6) + alicePeer, err := newMockServer( + t, "alice", testStartingHeight, nil, testDefaultDelta, + ) if err != nil { t.Fatalf("unable to create alice server: %v", err) } @@ -158,7 +166,9 @@ func TestSwitchHasActiveLink(t *testing.T) { func TestSwitchSendPending(t *testing.T) { t.Parallel() - alicePeer, err := newMockServer(t, "alice", testStartingHeight, nil, 6) + alicePeer, err := newMockServer( + t, "alice", testStartingHeight, nil, testDefaultDelta, + ) if err != nil { t.Fatalf("unable to create alice server: %v", err) } @@ -204,10 +214,13 @@ func TestSwitchSendPending(t *testing.T) { // Send the ADD packet, this should not be forwarded out to the link // since there are no eligible links. err = s.forward(packet) - expErr := fmt.Sprintf("unable to find link with destination %v", - aliceChanID) - if err != nil && err.Error() != expErr { - t.Fatalf("expected forward failure: %v", err) + linkErr, ok := err.(*LinkError) + if !ok { + t.Fatalf("expected link error, got: %T", err) + } + if linkErr.WireMessage().Code() != lnwire.CodeUnknownNextPeer { + t.Fatalf("expected fail unknown next peer, got: %T", + linkErr.WireMessage().Code()) } // No message should be sent, since the packet was failed. @@ -253,11 +266,15 @@ func TestSwitchSendPending(t *testing.T) { func TestSwitchForward(t *testing.T) { t.Parallel() - alicePeer, err := newMockServer(t, "alice", testStartingHeight, nil, 6) + alicePeer, err := newMockServer( + t, "alice", testStartingHeight, nil, testDefaultDelta, + ) if err != nil { t.Fatalf("unable to create alice server: %v", err) } - bobPeer, err := newMockServer(t, "bob", testStartingHeight, nil, 6) + bobPeer, err := newMockServer( + t, "bob", testStartingHeight, nil, testDefaultDelta, + ) if err != nil { t.Fatalf("unable to create bob server: %v", err) } @@ -322,6 +339,10 @@ func TestSwitchForward(t *testing.T) { t.Fatal("wrong amount of circuits") } + if !s.IsForwardedHTLC(bobChannelLink.ShortChanID(), 0) { + t.Fatal("htlc should be identified as forwarded") + } + // Create settle request pretending that bob link handled the add htlc // request and sent the htlc settle request back. This request should // be forwarder back to Alice link. @@ -358,11 +379,15 @@ func TestSwitchForwardFailAfterFullAdd(t *testing.T) { chanID1, chanID2, aliceChanID, bobChanID := genIDs() - alicePeer, err := newMockServer(t, "alice", testStartingHeight, nil, 6) + alicePeer, err := newMockServer( + t, "alice", testStartingHeight, nil, testDefaultDelta, + ) if err != nil { t.Fatalf("unable to create alice server: %v", err) } - bobPeer, err := newMockServer(t, "bob", testStartingHeight, nil, 6) + bobPeer, err := newMockServer( + t, "bob", testStartingHeight, nil, testDefaultDelta, + ) if err != nil { t.Fatalf("unable to create bob server: %v", err) } @@ -549,11 +574,15 @@ func TestSwitchForwardSettleAfterFullAdd(t *testing.T) { chanID1, chanID2, aliceChanID, bobChanID := genIDs() - alicePeer, err := newMockServer(t, "alice", testStartingHeight, nil, 6) + alicePeer, err := newMockServer( + t, "alice", testStartingHeight, nil, testDefaultDelta, + ) if err != nil { t.Fatalf("unable to create alice server: %v", err) } - bobPeer, err := newMockServer(t, "bob", testStartingHeight, nil, 6) + bobPeer, err := newMockServer( + t, "bob", testStartingHeight, nil, testDefaultDelta, + ) if err != nil { t.Fatalf("unable to create bob server: %v", err) } @@ -732,8 +761,8 @@ func TestSwitchForwardSettleAfterFullAdd(t *testing.T) { } // Send the settle packet again, which should fail. - if err := s2.forward(settle); err == nil { - t.Fatalf("expected failure when sending duplicate settle " + + if err := s2.forward(settle); err != nil { + t.Fatalf("expected success when sending duplicate settle " + "with no pending circuit") } } @@ -743,11 +772,15 @@ func TestSwitchForwardDropAfterFullAdd(t *testing.T) { chanID1, chanID2, aliceChanID, bobChanID := genIDs() - alicePeer, err := newMockServer(t, "alice", testStartingHeight, nil, 6) + alicePeer, err := newMockServer( + t, "alice", testStartingHeight, nil, testDefaultDelta, + ) if err != nil { t.Fatalf("unable to create alice server: %v", err) } - bobPeer, err := newMockServer(t, "bob", testStartingHeight, nil, 6) + bobPeer, err := newMockServer( + t, "bob", testStartingHeight, nil, testDefaultDelta, + ) if err != nil { t.Fatalf("unable to create bob server: %v", err) } @@ -906,11 +939,15 @@ func TestSwitchForwardFailAfterHalfAdd(t *testing.T) { chanID1, chanID2, aliceChanID, bobChanID := genIDs() - alicePeer, err := newMockServer(t, "alice", testStartingHeight, nil, 6) + alicePeer, err := newMockServer( + t, "alice", testStartingHeight, nil, testDefaultDelta, + ) if err != nil { t.Fatalf("unable to create alice server: %v", err) } - bobPeer, err := newMockServer(t, "bob", testStartingHeight, nil, 6) + bobPeer, err := newMockServer( + t, "bob", testStartingHeight, nil, testDefaultDelta, + ) if err != nil { t.Fatalf("unable to create bob server: %v", err) } @@ -1043,9 +1080,13 @@ func TestSwitchForwardFailAfterHalfAdd(t *testing.T) { // Resend the failed htlc, it should be returned to alice since the // switch will detect that it has been half added previously. err = s2.forward(ogPacket) - if err != ErrIncompleteForward { - t.Fatal("unexpected error when reforwarding a "+ - "failed packet", err) + linkErr, ok := err.(*LinkError) + if !ok { + t.Fatalf("expected link error, got: %T", err) + } + if linkErr.FailureDetail != OutgoingFailureIncompleteForward { + t.Fatalf("expected incomplete forward, got: %v", + linkErr.FailureDetail) } // After detecting an incomplete forward, the fail packet should have @@ -1064,11 +1105,15 @@ func TestSwitchForwardCircuitPersistence(t *testing.T) { chanID1, chanID2, aliceChanID, bobChanID := genIDs() - alicePeer, err := newMockServer(t, "alice", testStartingHeight, nil, 6) + alicePeer, err := newMockServer( + t, "alice", testStartingHeight, nil, testDefaultDelta, + ) if err != nil { t.Fatalf("unable to create alice server: %v", err) } - bobPeer, err := newMockServer(t, "bob", testStartingHeight, nil, 6) + bobPeer, err := newMockServer( + t, "bob", testStartingHeight, nil, testDefaultDelta, + ) if err != nil { t.Fatalf("unable to create bob server: %v", err) } @@ -1287,19 +1332,252 @@ func TestSwitchForwardCircuitPersistence(t *testing.T) { } } +type multiHopFwdTest struct { + name string + eligible1, eligible2 bool + failure1, failure2 *LinkError + expectedReply lnwire.FailCode +} + +// TestCircularForwards tests the allowing/disallowing of circular payments +// through the same channel in the case where the switch is configured to allow +// and disallow same channel circular forwards. +func TestCircularForwards(t *testing.T) { + chanID1, aliceChanID := genID() + preimage := [sha256.Size]byte{1} + hash := fastsha256.Sum256(preimage[:]) + + tests := []struct { + name string + allowCircularPayment bool + expectedErr error + }{ + { + name: "circular payment allowed", + allowCircularPayment: true, + expectedErr: nil, + }, + { + name: "circular payment disallowed", + allowCircularPayment: false, + expectedErr: NewDetailedLinkError( + lnwire.NewTemporaryChannelFailure(nil), + OutgoingFailureCircularRoute, + ), + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + alicePeer, err := newMockServer( + t, "alice", testStartingHeight, nil, + testDefaultDelta, + ) + if err != nil { + t.Fatalf("unable to create alice server: %v", + err) + } + + s, err := initSwitchWithDB(testStartingHeight, nil) + if err != nil { + t.Fatalf("unable to init switch: %v", err) + } + if err := s.Start(); err != nil { + t.Fatalf("unable to start switch: %v", err) + } + defer func() { _ = s.Stop() }() + + // Set the switch to allow or disallow circular routes + // according to the test's requirements. + s.cfg.AllowCircularRoute = test.allowCircularPayment + + aliceChannelLink := newMockChannelLink( + s, chanID1, aliceChanID, alicePeer, true, + ) + + if err := s.AddLink(aliceChannelLink); err != nil { + t.Fatalf("unable to add alice link: %v", err) + } + + // Create a new packet that loops through alice's link + // in a circle. + obfuscator := NewMockObfuscator() + packet := &htlcPacket{ + incomingChanID: aliceChannelLink.ShortChanID(), + outgoingChanID: aliceChannelLink.ShortChanID(), + htlc: &lnwire.UpdateAddHTLC{ + PaymentHash: hash, + Amount: 1, + }, + obfuscator: obfuscator, + } + + // Attempt to forward the packet and check for the expected + // error. + err = s.forward(packet) + if !reflect.DeepEqual(err, test.expectedErr) { + t.Fatalf("expected: %v, got: %v", + test.expectedErr, err) + } + + // Ensure that no circuits were opened. + if s.circuits.NumOpen() > 0 { + t.Fatal("do not expect any open circuits") + } + }) + } +} + +// TestCheckCircularForward tests the error returned by checkCircularForward +// in cases where we allow and disallow same channel circular forwards. +func TestCheckCircularForward(t *testing.T) { + tests := []struct { + name string + + // allowCircular determines whether we should allow circular + // forwards. + allowCircular bool + + // incomingLink is the link that the htlc arrived on. + incomingLink lnwire.ShortChannelID + + // outgoingLink is the link that the htlc forward + // is destined to leave on. + outgoingLink lnwire.ShortChannelID + + // expectedErr is the error we expect to be returned. + expectedErr *LinkError + }{ + { + name: "not circular, allowed in config", + allowCircular: true, + incomingLink: lnwire.NewShortChanIDFromInt(123), + outgoingLink: lnwire.NewShortChanIDFromInt(321), + expectedErr: nil, + }, + { + name: "not circular, not allowed in config", + allowCircular: false, + incomingLink: lnwire.NewShortChanIDFromInt(123), + outgoingLink: lnwire.NewShortChanIDFromInt(321), + expectedErr: nil, + }, + { + name: "circular, allowed in config", + allowCircular: true, + incomingLink: lnwire.NewShortChanIDFromInt(123), + outgoingLink: lnwire.NewShortChanIDFromInt(123), + expectedErr: nil, + }, + { + name: "circular, not allowed in config", + allowCircular: false, + incomingLink: lnwire.NewShortChanIDFromInt(123), + outgoingLink: lnwire.NewShortChanIDFromInt(123), + expectedErr: NewDetailedLinkError( + lnwire.NewTemporaryChannelFailure(nil), + OutgoingFailureCircularRoute, + ), + }, + } + + for _, test := range tests { + test := test + + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + // Check for a circular forward, the hash passed can + // be nil because it is only used for logging. + err := checkCircularForward( + test.incomingLink, test.outgoingLink, + test.allowCircular, lntypes.Hash{}, + ) + if !reflect.DeepEqual(err, test.expectedErr) { + t.Fatalf("expected: %v, got: %v", + test.expectedErr, err) + } + }) + } +} + // TestSkipIneligibleLinksMultiHopForward tests that if a multi-hop HTLC comes // along, then we won't attempt to froward it down al ink that isn't yet able // to forward any HTLC's. func TestSkipIneligibleLinksMultiHopForward(t *testing.T) { + tests := []multiHopFwdTest{ + // None of the channels is eligible. + { + name: "not eligible", + expectedReply: lnwire.CodeUnknownNextPeer, + }, + + // Channel one has a policy failure and the other channel isn't + // available. + { + name: "policy fail", + eligible1: true, + failure1: NewLinkError( + lnwire.NewFinalIncorrectCltvExpiry(0), + ), + expectedReply: lnwire.CodeFinalIncorrectCltvExpiry, + }, + + // The requested channel is not eligible, but the packet is + // forwarded through the other channel. + { + name: "non-strict success", + eligible2: true, + expectedReply: lnwire.CodeNone, + }, + + // The requested channel has insufficient bandwidth and the + // other channel's policy isn't satisfied. + { + name: "non-strict policy fail", + eligible1: true, + failure1: NewDetailedLinkError( + lnwire.NewTemporaryChannelFailure(nil), + OutgoingFailureInsufficientBalance, + ), + eligible2: true, + failure2: NewLinkError( + lnwire.NewFinalIncorrectCltvExpiry(0), + ), + expectedReply: lnwire.CodeTemporaryChannelFailure, + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + testSkipIneligibleLinksMultiHopForward(t, &test) + }) + } +} + +// testSkipIneligibleLinksMultiHopForward tests that if a multi-hop HTLC comes +// along, then we won't attempt to froward it down al ink that isn't yet able +// to forward any HTLC's. +func testSkipIneligibleLinksMultiHopForward(t *testing.T, + testCase *multiHopFwdTest) { + t.Parallel() var packet *htlcPacket - alicePeer, err := newMockServer(t, "alice", testStartingHeight, nil, 6) + alicePeer, err := newMockServer( + t, "alice", testStartingHeight, nil, testDefaultDelta, + ) if err != nil { t.Fatalf("unable to create alice server: %v", err) } - bobPeer, err := newMockServer(t, "bob", testStartingHeight, nil, 6) + bobPeer, err := newMockServer( + t, "bob", testStartingHeight, nil, testDefaultDelta, + ) if err != nil { t.Fatalf("unable to create bob server: %v", err) } @@ -1313,22 +1591,32 @@ func TestSkipIneligibleLinksMultiHopForward(t *testing.T) { } defer s.Stop() - chanID1, chanID2, aliceChanID, bobChanID := genIDs() - + chanID1, aliceChanID := genID() aliceChannelLink := newMockChannelLink( s, chanID1, aliceChanID, alicePeer, true, ) // We'll create a link for Bob, but mark the link as unable to forward // any new outgoing HTLC's. - bobChannelLink := newMockChannelLink( - s, chanID2, bobChanID, bobPeer, false, + chanID2, bobChanID2 := genID() + bobChannelLink1 := newMockChannelLink( + s, chanID2, bobChanID2, bobPeer, testCase.eligible1, ) + bobChannelLink1.checkHtlcForwardResult = testCase.failure1 + + chanID3, bobChanID3 := genID() + bobChannelLink2 := newMockChannelLink( + s, chanID3, bobChanID3, bobPeer, testCase.eligible2, + ) + bobChannelLink2.checkHtlcForwardResult = testCase.failure2 if err := s.AddLink(aliceChannelLink); err != nil { t.Fatalf("unable to add alice link: %v", err) } - if err := s.AddLink(bobChannelLink); err != nil { + if err := s.AddLink(bobChannelLink1); err != nil { + t.Fatalf("unable to add bob link: %v", err) + } + if err := s.AddLink(bobChannelLink2); err != nil { t.Fatalf("unable to add bob link: %v", err) } @@ -1336,21 +1624,37 @@ func TestSkipIneligibleLinksMultiHopForward(t *testing.T) { // Alice. preimage := [sha256.Size]byte{1} rhash := fastsha256.Sum256(preimage[:]) + obfuscator := NewMockObfuscator() packet = &htlcPacket{ incomingChanID: aliceChannelLink.ShortChanID(), incomingHTLCID: 0, - outgoingChanID: bobChannelLink.ShortChanID(), + outgoingChanID: bobChannelLink1.ShortChanID(), htlc: &lnwire.UpdateAddHTLC{ PaymentHash: rhash, Amount: 1, }, - obfuscator: NewMockObfuscator(), + obfuscator: obfuscator, } // The request to forward should fail as err = s.forward(packet) - if err == nil { - t.Fatalf("forwarding should have failed due to inactive link") + + failure := obfuscator.(*mockObfuscator).failure + if testCase.expectedReply == lnwire.CodeNone { + if err != nil { + t.Fatalf("forwarding should have succeeded") + } + if failure != nil { + t.Fatalf("unexpected failure %T", failure) + } + } else { + if err == nil { + t.Fatalf("forwarding should have failed due to " + + "inactive link") + } + if failure.Code() != testCase.expectedReply { + t.Fatalf("unexpected failure %T", failure) + } } if s.circuits.NumOpen() != 0 { @@ -1380,7 +1684,9 @@ func testSkipLinkLocalForward(t *testing.T, eligible bool, // We'll create a single link for this test, marking it as being unable // to forward form the get go. - alicePeer, err := newMockServer(t, "alice", testStartingHeight, nil, 6) + alicePeer, err := newMockServer( + t, "alice", testStartingHeight, nil, testDefaultDelta, + ) if err != nil { t.Fatalf("unable to create alice server: %v", err) } @@ -1399,7 +1705,9 @@ func testSkipLinkLocalForward(t *testing.T, eligible bool, aliceChannelLink := newMockChannelLink( s, chanID1, aliceChanID, alicePeer, eligible, ) - aliceChannelLink.htlcSatifiesPolicyLocalResult = policyResult + aliceChannelLink.checkHtlcTransitResult = NewLinkError( + policyResult, + ) if err := s.AddLink(aliceChannelLink); err != nil { t.Fatalf("unable to add alice link: %v", err) } @@ -1432,11 +1740,15 @@ func testSkipLinkLocalForward(t *testing.T, eligible bool, func TestSwitchCancel(t *testing.T) { t.Parallel() - alicePeer, err := newMockServer(t, "alice", testStartingHeight, nil, 6) + alicePeer, err := newMockServer( + t, "alice", testStartingHeight, nil, testDefaultDelta, + ) if err != nil { t.Fatalf("unable to create alice server: %v", err) } - bobPeer, err := newMockServer(t, "bob", testStartingHeight, nil, 6) + bobPeer, err := newMockServer( + t, "bob", testStartingHeight, nil, testDefaultDelta, + ) if err != nil { t.Fatalf("unable to create bob server: %v", err) } @@ -1545,11 +1857,15 @@ func TestSwitchAddSamePayment(t *testing.T) { chanID1, chanID2, aliceChanID, bobChanID := genIDs() - alicePeer, err := newMockServer(t, "alice", testStartingHeight, nil, 6) + alicePeer, err := newMockServer( + t, "alice", testStartingHeight, nil, testDefaultDelta, + ) if err != nil { t.Fatalf("unable to create alice server: %v", err) } - bobPeer, err := newMockServer(t, "bob", testStartingHeight, nil, 6) + bobPeer, err := newMockServer( + t, "bob", testStartingHeight, nil, testDefaultDelta, + ) if err != nil { t.Fatalf("unable to create bob server: %v", err) } @@ -1704,7 +2020,9 @@ func TestSwitchAddSamePayment(t *testing.T) { func TestSwitchSendPayment(t *testing.T) { t.Parallel() - alicePeer, err := newMockServer(t, "alice", testStartingHeight, nil, 6) + alicePeer, err := newMockServer( + t, "alice", testStartingHeight, nil, testDefaultDelta, + ) if err != nil { t.Fatalf("unable to create alice server: %v", err) } @@ -1809,6 +2127,9 @@ func TestSwitchSendPayment(t *testing.T) { t.Fatalf("unable obfuscate failure: %v", err) } + if s.IsForwardedHTLC(aliceChannelLink.ShortChanID(), update.ID) { + t.Fatal("htlc should be identified as not forwarded") + } packet := &htlcPacket{ outgoingChanID: aliceChannelLink.ShortChanID(), outgoingHTLCID: 0, @@ -2095,11 +2416,11 @@ func TestUpdateFailMalformedHTLCErrorConversion(t *testing.T) { t.Fatalf("unable to send payment: %v", err) } - fwdingErr := err.(*ForwardingError) - failureMsg := fwdingErr.FailureMessage + routingErr := err.(ClearTextError) + failureMsg := routingErr.WireMessage() if _, ok := failureMsg.(*lnwire.FailInvalidOnionKey); !ok { t.Fatalf("expected onion failure instead got: %v", - fwdingErr.FailureMessage) + routingErr.WireMessage()) } } @@ -2242,7 +2563,9 @@ func TestSwitchGetPaymentResult(t *testing.T) { func TestInvalidFailure(t *testing.T) { t.Parallel() - alicePeer, err := newMockServer(t, "alice", testStartingHeight, nil, 6) + alicePeer, err := newMockServer( + t, "alice", testStartingHeight, nil, testDefaultDelta, + ) if err != nil { t.Fatalf("unable to create alice server: %v", err) } @@ -2358,14 +2681,18 @@ func TestInvalidFailure(t *testing.T) { select { case result := <-resultChan: - fErr, ok := result.Error.(*ForwardingError) + rtErr, ok := result.Error.(ClearTextError) if !ok { - t.Fatal("expected ForwardingError") + t.Fatal("expected ClearTextError") } - if fErr.FailureSourceIdx != 2 { + source, ok := rtErr.(*ForwardingError) + if !ok { + t.Fatalf("expected forwarding error, got: %T", rtErr) + } + if source.FailureSourceIdx != 2 { t.Fatal("unexpected error source index") } - if fErr.FailureMessage != nil { + if rtErr.WireMessage() != nil { t.Fatal("expected empty failure message") } @@ -2373,3 +2700,363 @@ func TestInvalidFailure(t *testing.T) { t.Fatal("err wasn't received") } } + +// htlcNotifierEvents is a function that generates a set of expected htlc +// notifier evetns for each node in a three hop network with the dynamic +// values provided. These functions take dynamic values so that changes to +// external systems (such as our default timelock delta) do not break +// these tests. +type htlcNotifierEvents func(channels *clusterChannels, htlcID uint64, + ts time.Time, htlc *lnwire.UpdateAddHTLC, + hops []*hop.Payload) ([]interface{}, []interface{}, []interface{}) + +// TestHtlcNotifier tests the notifying of htlc events that are routed over a +// three hop network. It sets up an Alice -> Bob -> Carol network and routes +// payments from Alice -> Carol to test events from the perspective of a +// sending (Alice), forwarding (Bob) and receiving (Carol) node. Test cases +// are present for saduccessful and failed payments. +func TestHtlcNotifier(t *testing.T) { + tests := []struct { + name string + + // Options is a set of options to apply to the three hop + // network's servers. + options []serverOption + + // expectedEvents is a function which returns an expected set + // of events for the test. + expectedEvents htlcNotifierEvents + + // iterations is the number of times we will send a payment, + // this is used to send more than one payment to force non- + // zero htlc indexes to make sure we aren't just checking + // default values. + iterations int + }{ + { + name: "successful three hop payment", + options: nil, + expectedEvents: func(channels *clusterChannels, + htlcID uint64, ts time.Time, + htlc *lnwire.UpdateAddHTLC, + hops []*hop.Payload) ([]interface{}, + []interface{}, []interface{}) { + + return getThreeHopEvents( + channels, htlcID, ts, htlc, hops, nil, + ) + }, + iterations: 2, + }, + { + name: "failed at forwarding link", + // Set a functional option which disables bob as a + // forwarding node to force a payment error. + options: []serverOption{ + serverOptionRejectHtlc(false, true, false), + }, + expectedEvents: func(channels *clusterChannels, + htlcID uint64, ts time.Time, + htlc *lnwire.UpdateAddHTLC, + hops []*hop.Payload) ([]interface{}, + []interface{}, []interface{}) { + + return getThreeHopEvents( + channels, htlcID, ts, htlc, hops, + &LinkError{ + msg: &lnwire.FailChannelDisabled{}, + FailureDetail: OutgoingFailureForwardsDisabled, + }, + ) + }, + iterations: 1, + }, + } + + for _, test := range tests { + test := test + + t.Run(test.name, func(t *testing.T) { + testHtcNotifier( + t, test.options, test.iterations, + test.expectedEvents, + ) + }) + } +} + +// testHtcNotifier runs a htlc notifier test. +func testHtcNotifier(t *testing.T, testOpts []serverOption, iterations int, + getEvents htlcNotifierEvents) { + + t.Parallel() + + // First, we'll create our traditional three hop + // network. + channels, cleanUp, _, err := createClusterChannels( + btcutil.SatoshiPerBitcoin*3, + btcutil.SatoshiPerBitcoin*5) + if err != nil { + t.Fatalf("unable to create channel: %v", err) + } + defer cleanUp() + + // Mock time so that all events are reported with a static timestamp. + now := time.Now() + mockTime := func() time.Time { + return now + } + + // Create htlc notifiers for each server in the three hop network and + // start them. + aliceNotifier := NewHtlcNotifier(mockTime) + if err := aliceNotifier.Start(); err != nil { + t.Fatalf("could not start alice notifier") + } + defer aliceNotifier.Stop() + + bobNotifier := NewHtlcNotifier(mockTime) + if err := bobNotifier.Start(); err != nil { + t.Fatalf("could not start bob notifier") + } + defer bobNotifier.Stop() + + carolNotifier := NewHtlcNotifier(mockTime) + if err := carolNotifier.Start(); err != nil { + t.Fatalf("could not start carol notifier") + } + defer carolNotifier.Stop() + + // Create a notifier server option which will set our htlc notifiers + // for the three hop network. + notifierOption := serverOptionWithHtlcNotifier( + aliceNotifier, bobNotifier, carolNotifier, + ) + + // Add the htlcNotifier option to any other options + // set in the test. + options := append(testOpts, notifierOption) + + n := newThreeHopNetwork( + t, channels.aliceToBob, + channels.bobToAlice, channels.bobToCarol, + channels.carolToBob, testStartingHeight, + options..., + ) + if err := n.start(); err != nil { + t.Fatalf("unable to start three hop "+ + "network: %v", err) + } + defer n.stop() + + // Before we forward anything, subscribe to htlc events + // from each notifier. + aliceEvents, err := aliceNotifier.SubscribeHtlcEvents() + if err != nil { + t.Fatalf("could not subscribe to alice's"+ + " events: %v", err) + } + defer aliceEvents.Cancel() + + bobEvents, err := bobNotifier.SubscribeHtlcEvents() + if err != nil { + t.Fatalf("could not subscribe to bob's"+ + " events: %v", err) + } + defer bobEvents.Cancel() + + carolEvents, err := carolNotifier.SubscribeHtlcEvents() + if err != nil { + t.Fatalf("could not subscribe to carol's"+ + " events: %v", err) + } + defer carolEvents.Cancel() + + // Send multiple payments, as specified by the test to test incrementing + // of htlc ids. + for i := 0; i < iterations; i++ { + // We'll start off by making a payment from + // Alice -> Bob -> Carol. + htlc, hops := n.sendThreeHopPayment(t) + + alice, bob, carol := getEvents( + channels, uint64(i), now, htlc, hops, + ) + + checkHtlcEvents(t, aliceEvents.Updates(), alice) + checkHtlcEvents(t, bobEvents.Updates(), bob) + checkHtlcEvents(t, carolEvents.Updates(), carol) + + } +} + +// checkHtlcEvents checks that a subscription has the set of htlc events +// we expect it to have. +func checkHtlcEvents(t *testing.T, events <-chan interface{}, + expectedEvents []interface{}) { + + t.Helper() + + for _, expected := range expectedEvents { + select { + case event := <-events: + if !reflect.DeepEqual(event, expected) { + t.Fatalf("expected %v, got: %v", expected, + event) + } + + case <-time.After(5 * time.Second): + t.Fatalf("expected event: %v", expected) + } + } +} + +// sendThreeHopPayment is a helper function which sends a payment over +// Alice -> Bob -> Carol in a three hop network and returns Alice's first htlc +// and the remainder of the hops. +func (n *threeHopNetwork) sendThreeHopPayment(t *testing.T) (*lnwire.UpdateAddHTLC, + []*hop.Payload) { + + amount := lnwire.NewMSatFromSatoshis(btcutil.SatoshiPerBitcoin) + + htlcAmt, totalTimelock, hops := generateHops(amount, testStartingHeight, + n.firstBobChannelLink, n.carolChannelLink) + blob, err := generateRoute(hops...) + if err != nil { + t.Fatal(err) + } + invoice, htlc, pid, err := generatePayment( + amount, htlcAmt, totalTimelock, blob, + ) + if err != nil { + t.Fatal(err) + } + + err = n.carolServer.registry.AddInvoice(*invoice, htlc.PaymentHash) + if err != nil { + t.Fatalf("unable to add invoice in carol registry: %v", err) + } + + if err := n.aliceServer.htlcSwitch.SendHTLC( + n.firstBobChannelLink.ShortChanID(), pid, htlc, + ); err != nil { + t.Fatalf("could not send htlc") + } + + return htlc, hops +} + +// getThreeHopEvents gets the set of htlc events that we expect for a payment +// from Alice -> Bob -> Carol. If a non-nil link error is provided, the set +// of events will fail on Bob's outgoing link. +func getThreeHopEvents(channels *clusterChannels, htlcID uint64, + ts time.Time, htlc *lnwire.UpdateAddHTLC, hops []*hop.Payload, + linkError *LinkError) ([]interface{}, []interface{}, []interface{}) { + + aliceKey := HtlcKey{ + IncomingCircuit: zeroCircuit, + OutgoingCircuit: channeldb.CircuitKey{ + ChanID: channels.aliceToBob.ShortChanID(), + HtlcID: htlcID, + }, + } + + // Alice always needs a forwarding event because she initiates the + // send. + aliceEvents := []interface{}{ + &ForwardingEvent{ + HtlcKey: aliceKey, + HtlcInfo: HtlcInfo{ + OutgoingTimeLock: htlc.Expiry, + OutgoingAmt: htlc.Amount, + }, + HtlcEventType: HtlcEventTypeSend, + Timestamp: ts, + }, + } + + bobKey := HtlcKey{ + IncomingCircuit: channeldb.CircuitKey{ + ChanID: channels.bobToAlice.ShortChanID(), + HtlcID: htlcID, + }, + OutgoingCircuit: channeldb.CircuitKey{ + ChanID: channels.bobToCarol.ShortChanID(), + HtlcID: htlcID, + }, + } + + bobInfo := HtlcInfo{ + IncomingTimeLock: htlc.Expiry, + IncomingAmt: htlc.Amount, + OutgoingTimeLock: hops[1].FwdInfo.OutgoingCTLV, + OutgoingAmt: hops[1].FwdInfo.AmountToForward, + } + + // If we expect the payment to fail, we add failures for alice and + // bob, and no events for carol because the payment never reaches her. + if linkError != nil { + aliceEvents = append(aliceEvents, + &ForwardingFailEvent{ + HtlcKey: aliceKey, + HtlcEventType: HtlcEventTypeSend, + Timestamp: ts, + }, + ) + + bobEvents := []interface{}{ + &LinkFailEvent{ + HtlcKey: bobKey, + HtlcInfo: bobInfo, + HtlcEventType: HtlcEventTypeForward, + LinkError: linkError, + Incoming: false, + Timestamp: ts, + }, + } + + return aliceEvents, bobEvents, nil + } + + // If we want to get events for a successful payment, we add a settle + // for alice, a forward and settle for bob and a receive settle for + // carol. + aliceEvents = append( + aliceEvents, + &SettleEvent{ + HtlcKey: aliceKey, + HtlcEventType: HtlcEventTypeSend, + Timestamp: ts, + }, + ) + + bobEvents := []interface{}{ + &ForwardingEvent{ + HtlcKey: bobKey, + HtlcInfo: bobInfo, + HtlcEventType: HtlcEventTypeForward, + Timestamp: ts, + }, + &SettleEvent{ + HtlcKey: bobKey, + HtlcEventType: HtlcEventTypeForward, + Timestamp: ts, + }, + } + + carolEvents := []interface{}{ + &SettleEvent{ + HtlcKey: HtlcKey{ + IncomingCircuit: channeldb.CircuitKey{ + ChanID: channels.carolToBob.ShortChanID(), + HtlcID: htlcID, + }, + OutgoingCircuit: zeroCircuit, + }, + HtlcEventType: HtlcEventTypeReceive, + Timestamp: ts, + }, + } + + return aliceEvents, bobEvents, carolEvents +} diff --git a/htlcswitch/test_utils.go b/htlcswitch/test_utils.go index f2f86b752b..614abce695 100644 --- a/htlcswitch/test_utils.go +++ b/htlcswitch/test_utils.go @@ -21,9 +21,10 @@ import ( "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcutil" "github.com/btcsuite/fastsha256" - "github.com/coreos/bbolt" "github.com/go-errors/errors" + sphinx "github.com/lightningnetwork/lightning-onion" "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/contractcourt" "github.com/lightningnetwork/lnd/htlcswitch/hop" "github.com/lightningnetwork/lnd/input" @@ -32,6 +33,7 @@ import ( "github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lnwallet" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/shachain" "github.com/lightningnetwork/lnd/ticker" @@ -92,27 +94,28 @@ var ( var idSeqNum uint64 -func genIDs() (lnwire.ChannelID, lnwire.ChannelID, lnwire.ShortChannelID, - lnwire.ShortChannelID) { - - id := atomic.AddUint64(&idSeqNum, 2) +// genID generates a unique tuple to identify a test channel. +func genID() (lnwire.ChannelID, lnwire.ShortChannelID) { + id := atomic.AddUint64(&idSeqNum, 1) var scratch [8]byte binary.BigEndian.PutUint64(scratch[:], id) hash1, _ := chainhash.NewHash(bytes.Repeat(scratch[:], 4)) - binary.BigEndian.PutUint64(scratch[:], id+1) - hash2, _ := chainhash.NewHash(bytes.Repeat(scratch[:], 4)) - chanPoint1 := wire.NewOutPoint(hash1, uint32(id)) - chanPoint2 := wire.NewOutPoint(hash2, uint32(id+1)) - chanID1 := lnwire.NewChanIDFromOutPoint(chanPoint1) - chanID2 := lnwire.NewChanIDFromOutPoint(chanPoint2) - aliceChanID := lnwire.NewShortChanIDFromInt(id) - bobChanID := lnwire.NewShortChanIDFromInt(id + 1) + + return chanID1, aliceChanID +} + +// genIDs generates ids for two test channels. +func genIDs() (lnwire.ChannelID, lnwire.ChannelID, lnwire.ShortChannelID, + lnwire.ShortChannelID) { + + chanID1, aliceChanID := genID() + chanID2, bobChanID := genID() return chanID1, chanID2, aliceChanID, bobChanID } @@ -259,7 +262,7 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte, aliceCommitTx, bobCommitTx, err := lnwallet.CreateCommitmentTxns( aliceAmount, bobAmount, &aliceCfg, &bobCfg, aliceCommitPoint, - bobCommitPoint, *fundingTxIn, true, + bobCommitPoint, *fundingTxIn, channeldb.SingleFunderTweaklessBit, ) if err != nil { return nil, nil, nil, err @@ -285,7 +288,7 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte, return nil, nil, nil, err } - estimator := lnwallet.NewStaticFeeEstimator(6000, 0) + estimator := chainfee.NewStaticEstimator(6000, 0) feePerKw, err := estimator.EstimateFeePerKW(1) if err != nil { return nil, nil, nil, err @@ -327,7 +330,7 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte, RemoteChanCfg: bobCfg, IdentityPub: aliceKeyPub, FundingOutpoint: *prevOut, - ChanType: channeldb.SingleFunderTweakless, + ChanType: channeldb.SingleFunderTweaklessBit, IsInitiator: true, Capacity: channelCapacity, RemoteCurrentRevocation: bobCommitPoint, @@ -346,7 +349,7 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte, RemoteChanCfg: aliceCfg, IdentityPub: bobKeyPub, FundingOutpoint: *prevOut, - ChanType: channeldb.SingleFunderTweakless, + ChanType: channeldb.SingleFunderTweaklessBit, IsInitiator: false, Capacity: channelCapacity, RemoteCurrentRevocation: aliceCommitPoint, @@ -417,7 +420,7 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte, aliceStoredChannels, err := dbAlice.FetchOpenChannels(aliceKeyPub) switch err { case nil: - case bbolt.ErrDatabaseNotOpen: + case kvdb.ErrDatabaseNotOpen: dbAlice, err = channeldb.Open(dbAlice.Path()) if err != nil { return nil, errors.Errorf("unable to reopen alice "+ @@ -461,7 +464,7 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte, bobStoredChannels, err := dbBob.FetchOpenChannels(bobKeyPub) switch err { case nil: - case bbolt.ErrDatabaseNotOpen: + case kvdb.ErrDatabaseNotOpen: dbBob, err = channeldb.Open(dbBob.Path()) if err != nil { return nil, errors.Errorf("unable to reopen bob "+ @@ -557,10 +560,13 @@ func generatePaymentWithPreimage(invoiceAmt, htlcAmt lnwire.MilliSatoshi, invoice := &channeldb.Invoice{ CreationDate: time.Now(), Terms: channeldb.ContractTerm{ + FinalCltvDelta: testInvoiceCltvExpiry, Value: invoiceAmt, PaymentPreimage: preimage, + Features: lnwire.NewFeatureVector( + nil, lnwire.Features, + ), }, - FinalCltvDelta: testInvoiceCltvExpiry, } htlc := &lnwire.UpdateAddHTLC{ @@ -599,7 +605,7 @@ func generatePayment(invoiceAmt, htlcAmt lnwire.MilliSatoshi, timelock uint32, } // generateRoute generates the path blob by given array of peers. -func generateRoute(hops ...hop.ForwardingInfo) ( +func generateRoute(hops ...*hop.Payload) ( [lnwire.OnionPacketSize]byte, error) { var blob [lnwire.OnionPacketSize]byte @@ -640,13 +646,12 @@ type threeHopNetwork struct { // also the time lock value needed to route an HTLC with the target amount over // the specified path. func generateHops(payAmt lnwire.MilliSatoshi, startingHeight uint32, - path ...*channelLink) (lnwire.MilliSatoshi, uint32, - []hop.ForwardingInfo) { + path ...*channelLink) (lnwire.MilliSatoshi, uint32, []*hop.Payload) { totalTimelock := startingHeight runningAmt := payAmt - hops := make([]hop.ForwardingInfo, len(path)) + hops := make([]*hop.Payload, len(path)) for i := len(path) - 1; i >= 0; i-- { // If this is the last hop, then the next hop is the special // "exit node". Otherwise, we look to the "prior" hop. @@ -674,7 +679,7 @@ func generateHops(payAmt lnwire.MilliSatoshi, startingHeight uint32, amount := payAmt if i != len(path)-1 { prevHop := hops[i+1] - prevAmount := prevHop.AmountToForward + prevAmount := prevHop.ForwardingInfo().AmountToForward fee := ExpectedFee(path[i].cfg.FwrdingPolicy, prevAmount) runningAmt += fee @@ -685,12 +690,15 @@ func generateHops(payAmt lnwire.MilliSatoshi, startingHeight uint32, amount = runningAmt - fee } - hops[i] = hop.ForwardingInfo{ - Network: hop.BitcoinNetwork, - NextHop: nextHop, - AmountToForward: amount, - OutgoingCTLV: timeLock, - } + var nextHopBytes [8]byte + binary.BigEndian.PutUint64(nextHopBytes[:], nextHop.ToUint64()) + + hops[i] = hop.NewLegacyPayload(&sphinx.HopData{ + Realm: [1]byte{}, // hop.BitcoinNetwork + NextAddress: nextHopBytes, + ForwardAmount: uint64(amount), + OutgoingCltv: timeLock, + }) } return runningAmt, totalTimelock, hops @@ -737,7 +745,7 @@ func waitForPayFuncResult(payFunc func() error, d time.Duration) error { // * from Alice to Carol through the Bob // * from Alice to some another peer through the Bob func makePayment(sendingPeer, receivingPeer lnpeer.Peer, - firstHop lnwire.ShortChannelID, hops []hop.ForwardingInfo, + firstHop lnwire.ShortChannelID, hops []*hop.Payload, invoiceAmt, htlcAmt lnwire.MilliSatoshi, timelock uint32) *paymentResponse { @@ -771,7 +779,7 @@ func makePayment(sendingPeer, receivingPeer lnpeer.Peer, // preparePayment creates an invoice at the receivingPeer and returns a function // that, when called, launches the payment from the sendingPeer. func preparePayment(sendingPeer, receivingPeer lnpeer.Peer, - firstHop lnwire.ShortChannelID, hops []hop.ForwardingInfo, + firstHop lnwire.ShortChannelID, hops []*hop.Payload, invoiceAmt, htlcAmt lnwire.MilliSatoshi, timelock uint32) (*channeldb.Invoice, func() error, error) { @@ -958,9 +966,11 @@ func createClusterChannels(aliceToBob, bobToCarol btcutil.Amount) ( // alice first bob second bob carol // channel link channel link channel link channel link // +// This function takes server options which can be used to apply custom +// settings to alice, bob and carol. func newThreeHopNetwork(t testing.TB, aliceChannel, firstBobChannel, secondBobChannel, carolChannel *lnwallet.LightningChannel, - startingHeight uint32) *threeHopNetwork { + startingHeight uint32, opts ...serverOption) *threeHopNetwork { aliceDb := aliceChannel.State().Db bobDb := firstBobChannel.State().Db @@ -988,6 +998,12 @@ func newThreeHopNetwork(t testing.TB, aliceChannel, firstBobChannel, t.Fatalf("unable to create carol server: %v", err) } + // Apply all additional functional options to the servers before + // creating any links. + for _, option := range opts { + option(aliceServer, bobServer, carolServer) + } + // Create mock decoder instead of sphinx one in order to mock the route // which htlc should follow. aliceDecoder := newMockIteratorDecoder() @@ -1037,6 +1053,34 @@ func newThreeHopNetwork(t testing.TB, aliceChannel, firstBobChannel, } } +// serverOption is a function which alters the three servers created for +// a three hop network to allow custom settings on each server. +type serverOption func(aliceServer, bobServer, carolServer *mockServer) + +// serverOptionWithHtlcNotifier is a functional option for the creation of +// three hop network servers which allows setting of htlc notifiers. +// Note that these notifiers should be started and stopped by the calling +// function. +func serverOptionWithHtlcNotifier(alice, bob, + carol *HtlcNotifier) serverOption { + + return func(aliceServer, bobServer, carolServer *mockServer) { + aliceServer.htlcSwitch.cfg.HtlcNotifier = alice + bobServer.htlcSwitch.cfg.HtlcNotifier = bob + carolServer.htlcSwitch.cfg.HtlcNotifier = carol + } +} + +// serverOptionRejectHtlc is the functional option for setting the reject +// htlc config option in each server's switch. +func serverOptionRejectHtlc(alice, bob, carol bool) serverOption { + return func(aliceServer, bobServer, carolServer *mockServer) { + aliceServer.htlcSwitch.cfg.RejectHTLC = alice + bobServer.htlcSwitch.cfg.RejectHTLC = bob + carolServer.htlcSwitch.cfg.RejectHTLC = carol + } +} + // createTwoClusterChannels creates lightning channels which are needed for // a 2 hop network cluster to be initialized. func createTwoClusterChannels(aliceToBob, bobToCarol btcutil.Amount) ( @@ -1070,14 +1114,14 @@ func newHopNetwork() *hopNetwork { defaultDelta := uint32(6) globalPolicy := ForwardingPolicy{ - MinHTLC: lnwire.NewMSatFromSatoshis(5), + MinHTLCOut: lnwire.NewMSatFromSatoshis(5), BaseFee: lnwire.NewMSatFromSatoshis(1), TimeLockDelta: defaultDelta, } obfuscator := NewMockObfuscator() feeEstimator := &mockFeeEstimator{ - byteFeeIn: make(chan lnwallet.SatPerKWeight), + byteFeeIn: make(chan chainfee.SatPerKWeight), quit: make(chan struct{}), } @@ -1123,14 +1167,17 @@ func (h *hopNetwork) createChannelLink(server, peer *mockServer, BatchSize: 10, BatchTicker: ticker.NewForce(testBatchTimeout), FwdPkgGCTicker: ticker.NewForce(fwdPkgTimeout), + PendingCommitTicker: ticker.NewForce(2 * time.Minute), MinFeeUpdateTimeout: minFeeUpdateTimeout, MaxFeeUpdateTimeout: maxFeeUpdateTimeout, OnChannelFailure: func(lnwire.ChannelID, lnwire.ShortChannelID, LinkFailureError) {}, OutgoingCltvRejectDelta: 3, MaxOutgoingCltvExpiry: DefaultMaxOutgoingCltvExpiry, MaxFeeAllocation: DefaultMaxLinkFeeAllocation, + NotifyActiveLink: func(wire.OutPoint) {}, NotifyActiveChannel: func(wire.OutPoint) {}, NotifyInactiveChannel: func(wire.OutPoint) {}, + HtlcNotifier: server.htlcSwitch.cfg.HtlcNotifier, }, channel, ) @@ -1263,7 +1310,7 @@ func (n *twoHopNetwork) stop() { } func (n *twoHopNetwork) makeHoldPayment(sendingPeer, receivingPeer lnpeer.Peer, - firstHop lnwire.ShortChannelID, hops []hop.ForwardingInfo, + firstHop lnwire.ShortChannelID, hops []*hop.Payload, invoiceAmt, htlcAmt lnwire.MilliSatoshi, timelock uint32, preimage lntypes.Preimage) chan error { @@ -1299,15 +1346,13 @@ func (n *twoHopNetwork) makeHoldPayment(sendingPeer, receivingPeer lnpeer.Peer, } // Send payment and expose err channel. - go func() { - err := sender.htlcSwitch.SendHTLC( - firstHop, pid, htlc, - ) - if err != nil { - paymentErr <- err - return - } + err = sender.htlcSwitch.SendHTLC(firstHop, pid, htlc) + if err != nil { + paymentErr <- err + return paymentErr + } + go func() { resultChan, err := sender.htlcSwitch.GetPaymentResult( pid, rhash, newMockDeobfuscator(), ) @@ -1319,6 +1364,7 @@ func (n *twoHopNetwork) makeHoldPayment(sendingPeer, receivingPeer lnpeer.Peer, result, ok := <-resultChan if !ok { paymentErr <- fmt.Errorf("shutting down") + return } if result.Error != nil { diff --git a/input/input.go b/input/input.go index 10366b705d..38a1651e52 100644 --- a/input/input.go +++ b/input/input.go @@ -44,10 +44,11 @@ type Input interface { } type inputKit struct { - outpoint wire.OutPoint - witnessType WitnessType - signDesc SignDescriptor - heightHint uint32 + outpoint wire.OutPoint + witnessType WitnessType + signDesc SignDescriptor + heightHint uint32 + blockToMaturity uint32 } // OutPoint returns the breached output's identifier that is to be included as @@ -74,6 +75,13 @@ func (i *inputKit) HeightHint() uint32 { return i.heightHint } +// BlocksToMaturity returns the relative timelock, as a number of blocks, that +// must be built on top of the confirmation height before the output can be +// spent. For non-CSV locked inputs this is always zero. +func (i *inputKit) BlocksToMaturity() uint32 { + return i.blockToMaturity +} + // BaseInput contains all the information needed to sweep a basic output // (CSV/CLTV/no time lock) type BaseInput struct { @@ -107,6 +115,23 @@ func NewBaseInput(outpoint *wire.OutPoint, witnessType WitnessType, return &input } +// NewCsvInput assembles a new csv-locked input that can be used to +// construct a sweep transaction. +func NewCsvInput(outpoint *wire.OutPoint, witnessType WitnessType, + signDescriptor *SignDescriptor, heightHint uint32, + blockToMaturity uint32) *BaseInput { + + return &BaseInput{ + inputKit{ + outpoint: *outpoint, + witnessType: witnessType, + signDesc: *signDescriptor, + heightHint: heightHint, + blockToMaturity: blockToMaturity, + }, + } +} + // CraftInputScript returns a valid set of input scripts allowing this output // to be spent. The returned input scripts should target the input at location // txIndex within the passed transaction. The input scripts generated by this @@ -114,20 +139,11 @@ func NewBaseInput(outpoint *wire.OutPoint, witnessType WitnessType, func (bi *BaseInput) CraftInputScript(signer Signer, txn *wire.MsgTx, hashCache *txscript.TxSigHashes, txinIdx int) (*Script, error) { - witnessFunc := bi.witnessType.GenWitnessFunc( - signer, bi.SignDesc(), - ) + witnessFunc := bi.witnessType.WitnessGenerator(signer, bi.SignDesc()) return witnessFunc(txn, hashCache, txinIdx) } -// BlocksToMaturity returns the relative timelock, as a number of blocks, that -// must be built on top of the confirmation height before the output can be -// spent. For non-CSV locked inputs this is always zero. -func (bi *BaseInput) BlocksToMaturity() uint32 { - return 0 -} - // HtlcSucceedInput constitutes a sweep input that needs a pre-image. The input // is expected to reside on the commitment tx of the remote party and should // not be a second level tx output. @@ -140,15 +156,16 @@ type HtlcSucceedInput struct { // MakeHtlcSucceedInput assembles a new redeem input that can be used to // construct a sweep transaction. func MakeHtlcSucceedInput(outpoint *wire.OutPoint, - signDescriptor *SignDescriptor, - preimage []byte, heightHint uint32) HtlcSucceedInput { + signDescriptor *SignDescriptor, preimage []byte, heightHint, + blocksToMaturity uint32) HtlcSucceedInput { return HtlcSucceedInput{ inputKit: inputKit{ - outpoint: *outpoint, - witnessType: HtlcAcceptedRemoteSuccess, - signDesc: *signDescriptor, - heightHint: heightHint, + outpoint: *outpoint, + witnessType: HtlcAcceptedRemoteSuccess, + signDesc: *signDescriptor, + heightHint: heightHint, + blockToMaturity: blocksToMaturity, }, preimage: preimage, } @@ -177,13 +194,6 @@ func (h *HtlcSucceedInput) CraftInputScript(signer Signer, txn *wire.MsgTx, }, nil } -// BlocksToMaturity returns the relative timelock, as a number of blocks, that -// must be built on top of the confirmation height before the output can be -// spent. -func (h *HtlcSucceedInput) BlocksToMaturity() uint32 { - return 0 -} - // Compile-time constraints to ensure each input struct implement the Input // interface. var _ Input = (*BaseInput)(nil) diff --git a/input/script_utils.go b/input/script_utils.go index 8b47b05681..318d1faed6 100644 --- a/input/script_utils.go +++ b/input/script_utils.go @@ -23,6 +23,17 @@ var ( SequenceLockTimeSeconds = uint32(1 << 22) ) +// Signature is an interface for objects that can populate signatures during +// witness construction. +type Signature interface { + // Serialize returns a DER-encoded ECDSA signature. + Serialize() []byte + + // Verify return true if the ECDSA signature is valid for the passed + // message digest under the provided public key. + Verify([]byte, *btcec.PublicKey) bool +} + // WitnessScriptHash generates a pay-to-witness-script-hash public key script // paying to a version 0 witness program paying to the passed redeem script. func WitnessScriptHash(witnessScript []byte) ([]byte, error) { @@ -85,7 +96,9 @@ func GenFundingPkScript(aPub, bPub []byte, amt int64) ([]byte, *wire.TxOut, erro // SpendMultiSig generates the witness stack required to redeem the 2-of-2 p2wsh // multi-sig output. -func SpendMultiSig(witnessScript, pubA, sigA, pubB, sigB []byte) [][]byte { +func SpendMultiSig(witnessScript, pubA []byte, sigA Signature, + pubB []byte, sigB Signature) [][]byte { + witness := make([][]byte, 4) // When spending a p2wsh multi-sig script, rather than an OP_0, we add @@ -97,11 +110,11 @@ func SpendMultiSig(witnessScript, pubA, sigA, pubB, sigB []byte) [][]byte { // ensure the signatures appear on the Script Virtual Machine stack in // the correct order. if bytes.Compare(pubA, pubB) == 1 { - witness[1] = sigB - witness[2] = sigA + witness[1] = append(sigB.Serialize(), byte(txscript.SigHashAll)) + witness[2] = append(sigA.Serialize(), byte(txscript.SigHashAll)) } else { - witness[1] = sigA - witness[2] = sigB + witness[1] = append(sigA.Serialize(), byte(txscript.SigHashAll)) + witness[2] = append(sigB.Serialize(), byte(txscript.SigHashAll)) } // Finally, add the preimage as the last witness element. @@ -150,6 +163,9 @@ func Ripemd160H(d []byte) []byte { // * The receiver of the HTLC sweeping all the funds in the case that a // revoked commitment transaction bearing this HTLC was broadcast. // +// If confirmedSpend=true, a 1 OP_CSV check will be added to the non-revocation +// cases, to allow sweeping only after confirmation. +// // Possible Input Scripts: // SENDR: <0> <0> (spend using HTLC timeout transaction) // RECVR: @@ -168,9 +184,11 @@ func Ripemd160H(d []byte) []byte { // OP_HASH160 OP_EQUALVERIFY // OP_CHECKSIG // OP_ENDIF +// [1 OP_CHECKSEQUENCEVERIFY OP_DROP] <- if allowing confirmed spend only. // OP_ENDIF func SenderHTLCScript(senderHtlcKey, receiverHtlcKey, - revocationKey *btcec.PublicKey, paymentHash []byte) ([]byte, error) { + revocationKey *btcec.PublicKey, paymentHash []byte, + confirmedSpend bool) ([]byte, error) { builder := txscript.NewScriptBuilder() @@ -243,6 +261,14 @@ func SenderHTLCScript(senderHtlcKey, receiverHtlcKey, // Close out the OP_IF statement above. builder.AddOp(txscript.OP_ENDIF) + // Add 1 block CSV delay if a confirmation is required for the + // non-revocation clauses. + if confirmedSpend { + builder.AddOp(txscript.OP_1) + builder.AddOp(txscript.OP_CHECKSEQUENCEVERIFY) + builder.AddOp(txscript.OP_DROP) + } + // Close out the OP_IF statement at the top of the script. builder.AddOp(txscript.OP_ENDIF) @@ -270,7 +296,7 @@ func SenderHtlcSpendRevokeWithKey(signer Signer, signDesc *SignDescriptor, // manner in order to encode the revocation contract into a sig+key // pair. witnessStack := wire.TxWitness(make([][]byte, 3)) - witnessStack[0] = append(sweepSig, byte(signDesc.HashType)) + witnessStack[0] = append(sweepSig.Serialize(), byte(signDesc.HashType)) witnessStack[1] = revokeKey.SerializeCompressed() witnessStack[2] = signDesc.WitnessScript @@ -319,7 +345,7 @@ func SenderHtlcSpendRedeem(signer Signer, signDesc *SignDescriptor, // generated above under the receiver's public key, and the payment // pre-image. witnessStack := wire.TxWitness(make([][]byte, 3)) - witnessStack[0] = append(sweepSig, byte(signDesc.HashType)) + witnessStack[0] = append(sweepSig.Serialize(), byte(signDesc.HashType)) witnessStack[1] = paymentPreimage witnessStack[2] = signDesc.WitnessScript @@ -330,8 +356,10 @@ func SenderHtlcSpendRedeem(signer Signer, signDesc *SignDescriptor, // HTLC to activate the time locked covenant clause of a soon to be expired // HTLC. This script simply spends the multi-sig output using the // pre-generated HTLC timeout transaction. -func SenderHtlcSpendTimeout(receiverSig []byte, signer Signer, - signDesc *SignDescriptor, htlcTimeoutTx *wire.MsgTx) (wire.TxWitness, error) { +func SenderHtlcSpendTimeout(receiverSig Signature, + receiverSigHash txscript.SigHashType, signer Signer, + signDesc *SignDescriptor, htlcTimeoutTx *wire.MsgTx) ( + wire.TxWitness, error) { sweepSig, err := signer.SignOutputRaw(htlcTimeoutTx, signDesc) if err != nil { @@ -344,8 +372,8 @@ func SenderHtlcSpendTimeout(receiverSig []byte, signer Signer, // original OP_CHECKMULTISIG. witnessStack := wire.TxWitness(make([][]byte, 5)) witnessStack[0] = nil - witnessStack[1] = append(receiverSig, byte(txscript.SigHashAll)) - witnessStack[2] = append(sweepSig, byte(signDesc.HashType)) + witnessStack[1] = append(receiverSig.Serialize(), byte(receiverSigHash)) + witnessStack[2] = append(sweepSig.Serialize(), byte(signDesc.HashType)) witnessStack[3] = nil witnessStack[4] = signDesc.WitnessScript @@ -362,6 +390,9 @@ func SenderHtlcSpendTimeout(receiverSig []byte, signer Signer, // * The sender of the HTLC sweeps the HTLC on-chain after the timeout period // of the HTLC has passed. // +// If confirmedSpend=true, a 1 OP_CSV check will be added to the non-revocation +// cases, to allow sweeping only after confirmation. +// // Possible Input Scripts: // RECVR: <0> (spend using HTLC success transaction) // REVOK: @@ -381,10 +412,11 @@ func SenderHtlcSpendTimeout(receiverSig []byte, signer Signer, // OP_DROP OP_CHECKLOCKTIMEVERIFY OP_DROP // OP_CHECKSIG // OP_ENDIF +// [1 OP_CHECKSEQUENCEVERIFY OP_DROP] <- if allowing confirmed spend only. // OP_ENDIF func ReceiverHTLCScript(cltvExpiry uint32, senderHtlcKey, receiverHtlcKey, revocationKey *btcec.PublicKey, - paymentHash []byte) ([]byte, error) { + paymentHash []byte, confirmedSpend bool) ([]byte, error) { builder := txscript.NewScriptBuilder() @@ -467,6 +499,14 @@ func ReceiverHTLCScript(cltvExpiry uint32, senderHtlcKey, // Close out the inner if statement. builder.AddOp(txscript.OP_ENDIF) + // Add 1 block CSV delay for non-revocation clauses if confirmation is + // required. + if confirmedSpend { + builder.AddOp(txscript.OP_1) + builder.AddOp(txscript.OP_CHECKSEQUENCEVERIFY) + builder.AddOp(txscript.OP_DROP) + } + // Close out the outer if statement. builder.AddOp(txscript.OP_ENDIF) @@ -481,9 +521,10 @@ func ReceiverHTLCScript(cltvExpiry uint32, senderHtlcKey, // signed has a relative timelock delay enforced by its sequence number. This // delay give the sender of the HTLC enough time to revoke the output if this // is a breach commitment transaction. -func ReceiverHtlcSpendRedeem(senderSig, paymentPreimage []byte, - signer Signer, signDesc *SignDescriptor, - htlcSuccessTx *wire.MsgTx) (wire.TxWitness, error) { +func ReceiverHtlcSpendRedeem(senderSig Signature, + senderSigHash txscript.SigHashType, paymentPreimage []byte, + signer Signer, signDesc *SignDescriptor, htlcSuccessTx *wire.MsgTx) ( + wire.TxWitness, error) { // First, we'll generate a signature for the HTLC success transaction. // The signDesc should be signing with the public key used as the @@ -499,8 +540,8 @@ func ReceiverHtlcSpendRedeem(senderSig, paymentPreimage []byte, // order to consume the extra pop within OP_CHECKMULTISIG. witnessStack := wire.TxWitness(make([][]byte, 5)) witnessStack[0] = nil - witnessStack[1] = append(senderSig, byte(txscript.SigHashAll)) - witnessStack[2] = append(sweepSig, byte(signDesc.HashType)) + witnessStack[1] = append(senderSig.Serialize(), byte(senderSigHash)) + witnessStack[2] = append(sweepSig.Serialize(), byte(signDesc.HashType)) witnessStack[3] = paymentPreimage witnessStack[4] = signDesc.WitnessScript @@ -527,7 +568,7 @@ func ReceiverHtlcSpendRevokeWithKey(signer Signer, signDesc *SignDescriptor, // witness stack in order to force script execution to the HTLC // revocation clause. witnessStack := wire.TxWitness(make([][]byte, 3)) - witnessStack[0] = append(sweepSig, byte(signDesc.HashType)) + witnessStack[0] = append(sweepSig.Serialize(), byte(signDesc.HashType)) witnessStack[1] = revokeKey.SerializeCompressed() witnessStack[2] = signDesc.WitnessScript @@ -592,7 +633,7 @@ func ReceiverHtlcSpendTimeout(signer Signer, signDesc *SignDescriptor, } witnessStack := wire.TxWitness(make([][]byte, 3)) - witnessStack[0] = append(sweepSig, byte(signDesc.HashType)) + witnessStack[0] = append(sweepSig.Serialize(), byte(signDesc.HashType)) witnessStack[1] = nil witnessStack[2] = signDesc.WitnessScript @@ -697,7 +738,7 @@ func HtlcSpendSuccess(signer Signer, signDesc *SignDescriptor, // witness script), in order to force execution to the second portion // of the if clause. witnessStack := wire.TxWitness(make([][]byte, 3)) - witnessStack[0] = append(sweepSig, byte(signDesc.HashType)) + witnessStack[0] = append(sweepSig.Serialize(), byte(signDesc.HashType)) witnessStack[1] = nil witnessStack[2] = signDesc.WitnessScript @@ -722,7 +763,7 @@ func HtlcSpendRevoke(signer Signer, signDesc *SignDescriptor, // witness script), in order to force execution to the revocation // clause in the second level HTLC script. witnessStack := wire.TxWitness(make([][]byte, 3)) - witnessStack[0] = append(sweepSig, byte(signDesc.HashType)) + witnessStack[0] = append(sweepSig.Serialize(), byte(signDesc.HashType)) witnessStack[1] = []byte{1} witnessStack[2] = signDesc.WitnessScript @@ -753,7 +794,7 @@ func HtlcSecondLevelSpend(signer Signer, signDesc *SignDescriptor, // witness script), in order to force execution to the second portion // of the if clause. witnessStack := wire.TxWitness(make([][]byte, 3)) - witnessStack[0] = append(sweepSig, byte(txscript.SigHashAll)) + witnessStack[0] = append(sweepSig.Serialize(), byte(txscript.SigHashAll)) witnessStack[1] = nil witnessStack[2] = signDesc.WitnessScript @@ -828,18 +869,6 @@ func CommitScriptToSelf(csvTimeout uint32, selfKey, revokeKey *btcec.PublicKey) return builder.Script() } -// CommitScriptUnencumbered constructs the public key script on the commitment -// transaction paying to the "other" party. The constructed output is a normal -// p2wkh output spendable immediately, requiring no contestation period. -func CommitScriptUnencumbered(key *btcec.PublicKey) ([]byte, error) { - // This script goes to the "other" party, and is spendable immediately. - builder := txscript.NewScriptBuilder() - builder.AddOp(txscript.OP_0) - builder.AddData(btcutil.Hash160(key.SerializeCompressed())) - - return builder.Script() -} - // CommitSpendTimeout constructs a valid witness allowing the owner of a // particular commitment transaction to spend the output returning settled // funds back to themselves after a relative block timeout. In order to @@ -869,7 +898,7 @@ func CommitSpendTimeout(signer Signer, signDesc *SignDescriptor, // place an empty byte in order to ensure our script is still valid // from the PoV of nodes that are enforcing minimal OP_IF/OP_NOTIF. witnessStack := wire.TxWitness(make([][]byte, 3)) - witnessStack[0] = append(sweepSig, byte(signDesc.HashType)) + witnessStack[0] = append(sweepSig.Serialize(), byte(signDesc.HashType)) witnessStack[1] = nil witnessStack[2] = signDesc.WitnessScript @@ -894,7 +923,7 @@ func CommitSpendRevoke(signer Signer, signDesc *SignDescriptor, // Place a 1 as the first item in the evaluated witness stack to // force script execution to the revocation clause. witnessStack := wire.TxWitness(make([][]byte, 3)) - witnessStack[0] = append(sweepSig, byte(signDesc.HashType)) + witnessStack[0] = append(sweepSig.Serialize(), byte(signDesc.HashType)) witnessStack[1] = []byte{1} witnessStack[2] = signDesc.WitnessScript @@ -928,7 +957,7 @@ func CommitSpendNoDelay(signer Signer, signDesc *SignDescriptor, // exact same as a regular p2wkh witness, depending on the value of the // tweakless bool. witness := make([][]byte, 2) - witness[0] = append(sweepSig, byte(signDesc.HashType)) + witness[0] = append(sweepSig.Serialize(), byte(signDesc.HashType)) switch tweakless { // If we're tweaking the key, then we use the tweaked public key as the @@ -948,6 +977,137 @@ func CommitSpendNoDelay(signer Signer, signDesc *SignDescriptor, return witness, nil } +// CommitScriptUnencumbered constructs the public key script on the commitment +// transaction paying to the "other" party. The constructed output is a normal +// p2wkh output spendable immediately, requiring no contestation period. +func CommitScriptUnencumbered(key *btcec.PublicKey) ([]byte, error) { + // This script goes to the "other" party, and is spendable immediately. + builder := txscript.NewScriptBuilder() + builder.AddOp(txscript.OP_0) + builder.AddData(btcutil.Hash160(key.SerializeCompressed())) + + return builder.Script() +} + +// CommitScriptToRemoteConfirmed constructs the script for the output on the +// commitment transaction paying to the remote party of said commitment +// transaction. The money can only be spend after one confirmation. +// +// Possible Input Scripts: +// SWEEP: +// +// Output Script: +// OP_CHECKSIGVERIFY +// 1 OP_CHECKSEQUENCEVERIFY +func CommitScriptToRemoteConfirmed(key *btcec.PublicKey) ([]byte, error) { + builder := txscript.NewScriptBuilder() + + // Only the given key can spend the output. + builder.AddData(key.SerializeCompressed()) + builder.AddOp(txscript.OP_CHECKSIGVERIFY) + + // Check that the it has one confirmation. + builder.AddOp(txscript.OP_1) + builder.AddOp(txscript.OP_CHECKSEQUENCEVERIFY) + + return builder.Script() +} + +// CommitSpendToRemoteConfirmed constructs a valid witness allowing a node to +// spend their settled output on the counterparty's commitment transaction when +// it has one confirmetion. This is used for the anchor channel type. The +// spending key will always be non-tweaked for this output type. +func CommitSpendToRemoteConfirmed(signer Signer, signDesc *SignDescriptor, + sweepTx *wire.MsgTx) (wire.TxWitness, error) { + + if signDesc.KeyDesc.PubKey == nil { + return nil, fmt.Errorf("cannot generate witness with nil " + + "KeyDesc pubkey") + } + + // Similar to non delayed output, only a signature is needed. + sweepSig, err := signer.SignOutputRaw(sweepTx, signDesc) + if err != nil { + return nil, err + } + + // Finally, we'll manually craft the witness. The witness here is the + // signature and the redeem script. + witnessStack := make([][]byte, 2) + witnessStack[0] = append(sweepSig.Serialize(), byte(signDesc.HashType)) + witnessStack[1] = signDesc.WitnessScript + + return witnessStack, nil +} + +// CommitScriptAnchor constructs the script for the anchor output spendable by +// the given key immediately, or by anyone after 16 confirmations. +// +// Possible Input Scripts: +// By owner: +// By anyone (after 16 conf): +// +// Output Script: +// OP_CHECKSIG OP_IFDUP +// OP_NOTIF +// OP_16 OP_CSV +// OP_ENDIF +func CommitScriptAnchor(key *btcec.PublicKey) ([]byte, error) { + builder := txscript.NewScriptBuilder() + + // Spend immediately with key. + builder.AddData(key.SerializeCompressed()) + builder.AddOp(txscript.OP_CHECKSIG) + + // Duplicate the value if true, since it will be consumed by the NOTIF. + builder.AddOp(txscript.OP_IFDUP) + + // Otherwise spendable by anyone after 16 confirmations. + builder.AddOp(txscript.OP_NOTIF) + builder.AddOp(txscript.OP_16) + builder.AddOp(txscript.OP_CHECKSEQUENCEVERIFY) + builder.AddOp(txscript.OP_ENDIF) + + return builder.Script() +} + +// CommitSpendAnchor constructs a valid witness allowing a node to spend their +// anchor output on the commitment transaction using their funding key. This is +// used for the anchor channel type. +func CommitSpendAnchor(signer Signer, signDesc *SignDescriptor, + sweepTx *wire.MsgTx) (wire.TxWitness, error) { + + if signDesc.KeyDesc.PubKey == nil { + return nil, fmt.Errorf("cannot generate witness with nil " + + "KeyDesc pubkey") + } + + // Create a signature. + sweepSig, err := signer.SignOutputRaw(sweepTx, signDesc) + if err != nil { + return nil, err + } + + // The witness here is just a signature and the redeem script. + witnessStack := make([][]byte, 2) + witnessStack[0] = append(sweepSig.Serialize(), byte(signDesc.HashType)) + witnessStack[1] = signDesc.WitnessScript + + return witnessStack, nil +} + +// CommitSpendAnchorAnyone constructs a witness allowing anyone to spend the +// anchor output after it has gotten 16 confirmations. Since no signing is +// required, only knowledge of the redeem script is necessary to spend it. +func CommitSpendAnchorAnyone(script []byte) (wire.TxWitness, error) { + // The witness here is just the redeem script. + witnessStack := make([][]byte, 2) + witnessStack[0] = nil + witnessStack[1] = script + + return witnessStack, nil +} + // SingleTweakBytes computes set of bytes we call the single tweak. The purpose // of the single tweak is to randomize all regular delay and payment base // points. To do this, we generate a hash that binds the commitment point to diff --git a/input/script_utils_test.go b/input/script_utils_test.go index 1c6c5bc9e5..5978d3eec1 100644 --- a/input/script_utils_test.go +++ b/input/script_utils_test.go @@ -15,6 +15,73 @@ import ( "github.com/lightningnetwork/lnd/keychain" ) +// assertEngineExecution executes the VM returned by the newEngine closure, +// asserting the result matches the validity expectation. In the case where it +// doesn't match the expectation, it executes the script step-by-step and +// prints debug information to stdout. +func assertEngineExecution(t *testing.T, testNum int, valid bool, + newEngine func() (*txscript.Engine, error)) { + t.Helper() + + // Get a new VM to execute. + vm, err := newEngine() + if err != nil { + t.Fatalf("unable to create engine: %v", err) + } + + // Execute the VM, only go on to the step-by-step execution if + // it doesn't validate as expected. + vmErr := vm.Execute() + if valid == (vmErr == nil) { + return + } + + // Now that the execution didn't match what we expected, fetch a new VM + // to step through. + vm, err = newEngine() + if err != nil { + t.Fatalf("unable to create engine: %v", err) + } + + // This buffer will trace execution of the Script, dumping out + // to stdout. + var debugBuf bytes.Buffer + + done := false + for !done { + dis, err := vm.DisasmPC() + if err != nil { + t.Fatalf("stepping (%v)\n", err) + } + debugBuf.WriteString(fmt.Sprintf("stepping %v\n", dis)) + + done, err = vm.Step() + if err != nil && valid { + fmt.Println(debugBuf.String()) + t.Fatalf("spend test case #%v failed, spend "+ + "should be valid: %v", testNum, err) + } else if err == nil && !valid && done { + fmt.Println(debugBuf.String()) + t.Fatalf("spend test case #%v succeed, spend "+ + "should be invalid: %v", testNum, err) + } + + debugBuf.WriteString(fmt.Sprintf("Stack: %v", vm.GetStack())) + debugBuf.WriteString(fmt.Sprintf("AltStack: %v", vm.GetAltStack())) + } + + // If we get to this point the unexpected case was not reached + // during step execution, which happens for some checks, like + // the clean-stack rule. + validity := "invalid" + if valid { + validity = "valid" + } + + fmt.Println(debugBuf.String()) + t.Fatalf("%v spend test case #%v execution ended with: %v", validity, testNum, vmErr) +} + // TestRevocationKeyDerivation tests that given a public key, and a revocation // hash, the homomorphic revocation public and private key derivation work // properly. @@ -145,43 +212,6 @@ func TestHTLCSenderSpendValidation(t *testing.T) { // we'll be using Bob's base point for the revocation key. revocationKey := DeriveRevocationPubkey(bobKeyPub, commitPoint) - // Generate the raw HTLC redemption scripts, and its p2wsh counterpart. - htlcWitnessScript, err := SenderHTLCScript(aliceLocalKey, bobLocalKey, - revocationKey, paymentHash[:]) - if err != nil { - t.Fatalf("unable to create htlc sender script: %v", err) - } - htlcPkScript, err := WitnessScriptHash(htlcWitnessScript) - if err != nil { - t.Fatalf("unable to create p2wsh htlc script: %v", err) - } - - // This will be Alice's commitment transaction. In this scenario Alice - // is sending an HTLC to a node she has a path to (could be Bob, could - // be multiple hops down, it doesn't really matter). - htlcOutput := &wire.TxOut{ - Value: int64(paymentAmt), - PkScript: htlcPkScript, - } - senderCommitTx := wire.NewMsgTx(2) - senderCommitTx.AddTxIn(fakeFundingTxIn) - senderCommitTx.AddTxOut(htlcOutput) - - prevOut := &wire.OutPoint{ - Hash: senderCommitTx.TxHash(), - Index: 0, - } - - sweepTx := wire.NewMsgTx(2) - sweepTx.AddTxIn(wire.NewTxIn(prevOut, nil, nil)) - sweepTx.AddTxOut( - &wire.TxOut{ - PkScript: []byte("doesn't matter"), - Value: 1 * 10e8, - }, - ) - sweepTxSigHashes := txscript.NewTxSigHashes(sweepTx) - bobCommitTweak := SingleTweakBytes(commitPoint, bobKeyPub) aliceCommitTweak := SingleTweakBytes(commitPoint, aliceKeyPub) @@ -191,23 +221,99 @@ func TestHTLCSenderSpendValidation(t *testing.T) { bobSigner := &MockSigner{Privkeys: []*btcec.PrivateKey{bobKeyPriv}} aliceSigner := &MockSigner{Privkeys: []*btcec.PrivateKey{aliceKeyPriv}} - // We'll also generate a signature on the sweep transaction above - // that will act as Bob's signature to Alice for the second level HTLC - // transaction. - bobSignDesc := SignDescriptor{ - KeyDesc: keychain.KeyDescriptor{ - PubKey: bobKeyPub, - }, - SingleTweak: bobCommitTweak, - WitnessScript: htlcWitnessScript, - Output: htlcOutput, - HashType: txscript.SigHashAll, - SigHashes: sweepTxSigHashes, - InputIndex: 0, - } - bobRecvrSig, err := bobSigner.SignOutputRaw(sweepTx, &bobSignDesc) - if err != nil { - t.Fatalf("unable to generate alice signature: %v", err) + var ( + htlcWitnessScript, htlcPkScript []byte + htlcOutput *wire.TxOut + sweepTxSigHashes *txscript.TxSigHashes + senderCommitTx, sweepTx *wire.MsgTx + bobRecvrSig *btcec.Signature + bobSigHash txscript.SigHashType + ) + + // genCommitTx generates a commitment tx where the htlc output requires + // confirmation to be spent according to 'confirmed'. + genCommitTx := func(confirmed bool) { + // Generate the raw HTLC redemption scripts, and its p2wsh + // counterpart. + htlcWitnessScript, err = SenderHTLCScript( + aliceLocalKey, bobLocalKey, revocationKey, + paymentHash[:], confirmed, + ) + if err != nil { + t.Fatalf("unable to create htlc sender script: %v", err) + } + htlcPkScript, err = WitnessScriptHash(htlcWitnessScript) + if err != nil { + t.Fatalf("unable to create p2wsh htlc script: %v", err) + } + + // This will be Alice's commitment transaction. In this + // scenario Alice is sending an HTLC to a node she has a path + // to (could be Bob, could be multiple hops down, it doesn't + // really matter). + htlcOutput = &wire.TxOut{ + Value: int64(paymentAmt), + PkScript: htlcPkScript, + } + senderCommitTx = wire.NewMsgTx(2) + senderCommitTx.AddTxIn(fakeFundingTxIn) + senderCommitTx.AddTxOut(htlcOutput) + } + + // genSweepTx generates a sweep of the senderCommitTx, and sets the + // sequence and sighash single|anyonecanspend if confirmed is true. + genSweepTx := func(confirmed bool) { + prevOut := &wire.OutPoint{ + Hash: senderCommitTx.TxHash(), + Index: 0, + } + + sweepTx = wire.NewMsgTx(2) + + sweepTx.AddTxIn(wire.NewTxIn(prevOut, nil, nil)) + if confirmed { + sweepTx.TxIn[0].Sequence = LockTimeToSequence(false, 1) + } + + sweepTx.AddTxOut( + &wire.TxOut{ + PkScript: []byte("doesn't matter"), + Value: 1 * 10e8, + }, + ) + + sweepTxSigHashes = txscript.NewTxSigHashes(sweepTx) + + bobSigHash = txscript.SigHashAll + if confirmed { + bobSigHash = txscript.SigHashSingle | txscript.SigHashAnyOneCanPay + } + + // We'll also generate a signature on the sweep transaction above + // that will act as Bob's signature to Alice for the second level HTLC + // transaction. + bobSignDesc := SignDescriptor{ + KeyDesc: keychain.KeyDescriptor{ + PubKey: bobKeyPub, + }, + SingleTweak: bobCommitTweak, + WitnessScript: htlcWitnessScript, + Output: htlcOutput, + HashType: bobSigHash, + SigHashes: sweepTxSigHashes, + InputIndex: 0, + } + bobSig, err := bobSigner.SignOutputRaw(sweepTx, &bobSignDesc) + if err != nil { + t.Fatalf("unable to generate alice signature: %v", err) + } + + bobRecvrSig, err = btcec.ParseDERSignature( + bobSig.Serialize(), btcec.S256(), + ) + if err != nil { + t.Fatalf("unable to parse signature: %v", err) + } } testCases := []struct { @@ -218,6 +324,9 @@ func TestHTLCSenderSpendValidation(t *testing.T) { // revoke w/ sig // TODO(roasbeef): test invalid revoke makeWitnessTestCase(t, func() (wire.TxWitness, error) { + genCommitTx(false) + genSweepTx(false) + signDesc := &SignDescriptor{ KeyDesc: keychain.KeyDescriptor{ PubKey: bobKeyPub, @@ -238,6 +347,9 @@ func TestHTLCSenderSpendValidation(t *testing.T) { { // HTLC with invalid preimage size makeWitnessTestCase(t, func() (wire.TxWitness, error) { + genCommitTx(false) + genSweepTx(false) + signDesc := &SignDescriptor{ KeyDesc: keychain.KeyDescriptor{ PubKey: bobKeyPub, @@ -261,6 +373,66 @@ func TestHTLCSenderSpendValidation(t *testing.T) { // HTLC with valid preimage size + sig // TODO(roasbeef): invalid preimage makeWitnessTestCase(t, func() (wire.TxWitness, error) { + genCommitTx(false) + genSweepTx(false) + + signDesc := &SignDescriptor{ + KeyDesc: keychain.KeyDescriptor{ + PubKey: bobKeyPub, + }, + SingleTweak: bobCommitTweak, + WitnessScript: htlcWitnessScript, + Output: htlcOutput, + HashType: txscript.SigHashAll, + SigHashes: sweepTxSigHashes, + InputIndex: 0, + } + + return SenderHtlcSpendRedeem(bobSigner, signDesc, + sweepTx, paymentPreimage) + }), + true, + }, + { + // HTLC with valid preimage size + sig, and with + // enforced locktime in HTLC script. + makeWitnessTestCase(t, func() (wire.TxWitness, error) { + // Make a commit tx that needs confirmation for + // HTLC output to be spent. + genCommitTx(true) + + // Generate a sweep with the locktime set. + genSweepTx(true) + + signDesc := &SignDescriptor{ + KeyDesc: keychain.KeyDescriptor{ + PubKey: bobKeyPub, + }, + SingleTweak: bobCommitTweak, + WitnessScript: htlcWitnessScript, + Output: htlcOutput, + HashType: txscript.SigHashAll, + SigHashes: sweepTxSigHashes, + InputIndex: 0, + } + + return SenderHtlcSpendRedeem(bobSigner, signDesc, + sweepTx, paymentPreimage) + }), + true, + }, + { + // HTLC with valid preimage size + sig, but trying to + // spend CSV output without sequence set. + makeWitnessTestCase(t, func() (wire.TxWitness, error) { + // Generate commitment tx with 1 CSV locked + // HTLC. + genCommitTx(true) + + // Generate sweep tx that doesn't have locktime + // enabled. + genSweepTx(false) + signDesc := &SignDescriptor{ KeyDesc: keychain.KeyDescriptor{ PubKey: bobKeyPub, @@ -276,6 +448,34 @@ func TestHTLCSenderSpendValidation(t *testing.T) { return SenderHtlcSpendRedeem(bobSigner, signDesc, sweepTx, paymentPreimage) }), + false, + }, + + { + // valid spend to the transition the state of the HTLC + // output with the second level HTLC timeout + // transaction. + makeWitnessTestCase(t, func() (wire.TxWitness, error) { + genCommitTx(false) + genSweepTx(false) + + signDesc := &SignDescriptor{ + KeyDesc: keychain.KeyDescriptor{ + PubKey: aliceKeyPub, + }, + SingleTweak: aliceCommitTweak, + WitnessScript: htlcWitnessScript, + Output: htlcOutput, + HashType: txscript.SigHashAll, + SigHashes: sweepTxSigHashes, + InputIndex: 0, + } + + return SenderHtlcSpendTimeout( + bobRecvrSig, bobSigHash, aliceSigner, + signDesc, sweepTx, + ) + }), true, }, { @@ -283,6 +483,13 @@ func TestHTLCSenderSpendValidation(t *testing.T) { // output with the second level HTLC timeout // transaction. makeWitnessTestCase(t, func() (wire.TxWitness, error) { + // Make a commit tx that needs confirmation for + // HTLC output to be spent. + genCommitTx(true) + + // Generate a sweep with the locktime set. + genSweepTx(true) + signDesc := &SignDescriptor{ KeyDesc: keychain.KeyDescriptor{ PubKey: aliceKeyPub, @@ -295,11 +502,45 @@ func TestHTLCSenderSpendValidation(t *testing.T) { InputIndex: 0, } - return SenderHtlcSpendTimeout(bobRecvrSig, aliceSigner, - signDesc, sweepTx) + return SenderHtlcSpendTimeout( + bobRecvrSig, bobSigHash, aliceSigner, + signDesc, sweepTx, + ) }), true, }, + { + // valid spend to the transition the state of the HTLC + // output with the second level HTLC timeout + // transaction. + makeWitnessTestCase(t, func() (wire.TxWitness, error) { + // Generate commitment tx with 1 CSV locked + // HTLC. + genCommitTx(true) + + // Generate sweep tx that doesn't have locktime + // enabled. + genSweepTx(false) + + signDesc := &SignDescriptor{ + KeyDesc: keychain.KeyDescriptor{ + PubKey: aliceKeyPub, + }, + SingleTweak: aliceCommitTweak, + WitnessScript: htlcWitnessScript, + Output: htlcOutput, + HashType: txscript.SigHashAll, + SigHashes: sweepTxSigHashes, + InputIndex: 0, + } + + return SenderHtlcSpendTimeout( + bobRecvrSig, bobSigHash, aliceSigner, + signDesc, sweepTx, + ) + }), + false, + }, } // TODO(roasbeef): set of cases to ensure able to sign w/ keypath and @@ -308,39 +549,13 @@ func TestHTLCSenderSpendValidation(t *testing.T) { for i, testCase := range testCases { sweepTx.TxIn[0].Witness = testCase.witness() - vm, err := txscript.NewEngine(htlcPkScript, - sweepTx, 0, txscript.StandardVerifyFlags, nil, - nil, int64(paymentAmt)) - if err != nil { - t.Fatalf("unable to create engine: %v", err) + newEngine := func() (*txscript.Engine, error) { + return txscript.NewEngine(htlcPkScript, + sweepTx, 0, txscript.StandardVerifyFlags, nil, + nil, int64(paymentAmt)) } - // This buffer will trace execution of the Script, only dumping - // out to stdout in the case that a test fails. - var debugBuf bytes.Buffer - - done := false - for !done { - dis, err := vm.DisasmPC() - if err != nil { - t.Fatalf("stepping (%v)\n", err) - } - debugBuf.WriteString(fmt.Sprintf("stepping %v\n", dis)) - - done, err = vm.Step() - if err != nil && testCase.valid { - fmt.Println(debugBuf.String()) - t.Fatalf("spend test case #%v failed, spend "+ - "should be valid: %v", i, err) - } else if err == nil && !testCase.valid && done { - fmt.Println(debugBuf.String()) - t.Fatalf("spend test case #%v succeed, spend "+ - "should be invalid: %v", i, err) - } - - debugBuf.WriteString(fmt.Sprintf("Stack: %v", vm.GetStack())) - debugBuf.WriteString(fmt.Sprintf("AltStack: %v", vm.GetAltStack())) - } + assertEngineExecution(t, i, testCase.valid, newEngine) } } @@ -400,46 +615,6 @@ func TestHTLCReceiverSpendValidation(t *testing.T) { // be using Alice's base point for the revocation key. revocationKey := DeriveRevocationPubkey(aliceKeyPub, commitPoint) - // Generate the raw HTLC redemption scripts, and its p2wsh counterpart. - htlcWitnessScript, err := ReceiverHTLCScript(cltvTimeout, aliceLocalKey, - bobLocalKey, revocationKey, paymentHash[:]) - if err != nil { - t.Fatalf("unable to create htlc sender script: %v", err) - } - htlcPkScript, err := WitnessScriptHash(htlcWitnessScript) - if err != nil { - t.Fatalf("unable to create p2wsh htlc script: %v", err) - } - - // This will be Bob's commitment transaction. In this scenario Alice is - // sending an HTLC to a node she has a path to (could be Bob, could be - // multiple hops down, it doesn't really matter). - htlcOutput := &wire.TxOut{ - Value: int64(paymentAmt), - PkScript: htlcWitnessScript, - } - - receiverCommitTx := wire.NewMsgTx(2) - receiverCommitTx.AddTxIn(fakeFundingTxIn) - receiverCommitTx.AddTxOut(htlcOutput) - - prevOut := &wire.OutPoint{ - Hash: receiverCommitTx.TxHash(), - Index: 0, - } - - sweepTx := wire.NewMsgTx(2) - sweepTx.AddTxIn(&wire.TxIn{ - PreviousOutPoint: *prevOut, - }) - sweepTx.AddTxOut( - &wire.TxOut{ - PkScript: []byte("doesn't matter"), - Value: 1 * 10e8, - }, - ) - sweepTxSigHashes := txscript.NewTxSigHashes(sweepTx) - bobCommitTweak := SingleTweakBytes(commitPoint, bobKeyPub) aliceCommitTweak := SingleTweakBytes(commitPoint, aliceKeyPub) @@ -449,23 +624,95 @@ func TestHTLCReceiverSpendValidation(t *testing.T) { bobSigner := &MockSigner{Privkeys: []*btcec.PrivateKey{bobKeyPriv}} aliceSigner := &MockSigner{Privkeys: []*btcec.PrivateKey{aliceKeyPriv}} - // We'll also generate a signature on the sweep transaction above - // that will act as Alice's signature to Bob for the second level HTLC - // transaction. - aliceSignDesc := SignDescriptor{ - KeyDesc: keychain.KeyDescriptor{ - PubKey: aliceKeyPub, - }, - SingleTweak: aliceCommitTweak, - WitnessScript: htlcWitnessScript, - Output: htlcOutput, - HashType: txscript.SigHashAll, - SigHashes: sweepTxSigHashes, - InputIndex: 0, - } - aliceSenderSig, err := aliceSigner.SignOutputRaw(sweepTx, &aliceSignDesc) - if err != nil { - t.Fatalf("unable to generate alice signature: %v", err) + var ( + htlcWitnessScript, htlcPkScript []byte + htlcOutput *wire.TxOut + receiverCommitTx, sweepTx *wire.MsgTx + sweepTxSigHashes *txscript.TxSigHashes + aliceSenderSig *btcec.Signature + aliceSigHash txscript.SigHashType + ) + + genCommitTx := func(confirmed bool) { + // Generate the raw HTLC redemption scripts, and its p2wsh + // counterpart. + htlcWitnessScript, err = ReceiverHTLCScript( + cltvTimeout, aliceLocalKey, bobLocalKey, revocationKey, + paymentHash[:], confirmed, + ) + if err != nil { + t.Fatalf("unable to create htlc sender script: %v", err) + } + htlcPkScript, err = WitnessScriptHash(htlcWitnessScript) + if err != nil { + t.Fatalf("unable to create p2wsh htlc script: %v", err) + } + + // This will be Bob's commitment transaction. In this scenario Alice is + // sending an HTLC to a node she has a path to (could be Bob, could be + // multiple hops down, it doesn't really matter). + htlcOutput = &wire.TxOut{ + Value: int64(paymentAmt), + PkScript: htlcWitnessScript, + } + + receiverCommitTx = wire.NewMsgTx(2) + receiverCommitTx.AddTxIn(fakeFundingTxIn) + receiverCommitTx.AddTxOut(htlcOutput) + } + + genSweepTx := func(confirmed bool) { + prevOut := &wire.OutPoint{ + Hash: receiverCommitTx.TxHash(), + Index: 0, + } + + sweepTx = wire.NewMsgTx(2) + sweepTx.AddTxIn(&wire.TxIn{ + PreviousOutPoint: *prevOut, + }) + if confirmed { + sweepTx.TxIn[0].Sequence = LockTimeToSequence(false, 1) + } + + sweepTx.AddTxOut( + &wire.TxOut{ + PkScript: []byte("doesn't matter"), + Value: 1 * 10e8, + }, + ) + sweepTxSigHashes = txscript.NewTxSigHashes(sweepTx) + + aliceSigHash = txscript.SigHashAll + if confirmed { + aliceSigHash = txscript.SigHashSingle | txscript.SigHashAnyOneCanPay + } + + // We'll also generate a signature on the sweep transaction above + // that will act as Alice's signature to Bob for the second level HTLC + // transaction. + aliceSignDesc := SignDescriptor{ + KeyDesc: keychain.KeyDescriptor{ + PubKey: aliceKeyPub, + }, + SingleTweak: aliceCommitTweak, + WitnessScript: htlcWitnessScript, + Output: htlcOutput, + HashType: aliceSigHash, + SigHashes: sweepTxSigHashes, + InputIndex: 0, + } + aliceSig, err := aliceSigner.SignOutputRaw(sweepTx, &aliceSignDesc) + if err != nil { + t.Fatalf("unable to generate alice signature: %v", err) + } + + aliceSenderSig, err = btcec.ParseDERSignature( + aliceSig.Serialize(), btcec.S256(), + ) + if err != nil { + t.Fatalf("unable to parse signature: %v", err) + } } // TODO(roasbeef): modify valid to check precise script errors? @@ -476,6 +723,9 @@ func TestHTLCReceiverSpendValidation(t *testing.T) { { // HTLC redemption w/ invalid preimage size makeWitnessTestCase(t, func() (wire.TxWitness, error) { + genCommitTx(false) + genSweepTx(false) + signDesc := &SignDescriptor{ KeyDesc: keychain.KeyDescriptor{ PubKey: bobKeyPub, @@ -488,9 +738,11 @@ func TestHTLCReceiverSpendValidation(t *testing.T) { InputIndex: 0, } - return ReceiverHtlcSpendRedeem(aliceSenderSig, + return ReceiverHtlcSpendRedeem( + aliceSenderSig, aliceSigHash, bytes.Repeat([]byte{1}, 45), bobSigner, - signDesc, sweepTx) + signDesc, sweepTx, + ) }), false, @@ -498,6 +750,9 @@ func TestHTLCReceiverSpendValidation(t *testing.T) { { // HTLC redemption w/ valid preimage size makeWitnessTestCase(t, func() (wire.TxWitness, error) { + genCommitTx(false) + genSweepTx(false) + signDesc := &SignDescriptor{ KeyDesc: keychain.KeyDescriptor{ PubKey: bobKeyPub, @@ -510,15 +765,20 @@ func TestHTLCReceiverSpendValidation(t *testing.T) { InputIndex: 0, } - return ReceiverHtlcSpendRedeem(aliceSenderSig, - paymentPreimage[:], bobSigner, - signDesc, sweepTx) + return ReceiverHtlcSpendRedeem( + aliceSenderSig, aliceSigHash, + paymentPreimage, bobSigner, + signDesc, sweepTx, + ) }), true, }, { // revoke w/ sig makeWitnessTestCase(t, func() (wire.TxWitness, error) { + genCommitTx(false) + genSweepTx(false) + signDesc := &SignDescriptor{ KeyDesc: keychain.KeyDescriptor{ PubKey: aliceKeyPub, @@ -536,9 +796,76 @@ func TestHTLCReceiverSpendValidation(t *testing.T) { }), true, }, + { + // HTLC redemption w/ valid preimage size, and with + // enforced locktime in HTLC scripts. + makeWitnessTestCase(t, func() (wire.TxWitness, error) { + // Make a commit tx that needs confirmation for + // HTLC output to be spent. + genCommitTx(true) + + // Generate a sweep with the locktime set. + genSweepTx(true) + + signDesc := &SignDescriptor{ + KeyDesc: keychain.KeyDescriptor{ + PubKey: bobKeyPub, + }, + SingleTweak: bobCommitTweak, + WitnessScript: htlcWitnessScript, + Output: htlcOutput, + HashType: txscript.SigHashAll, + SigHashes: sweepTxSigHashes, + InputIndex: 0, + } + + return ReceiverHtlcSpendRedeem( + aliceSenderSig, aliceSigHash, + paymentPreimage, bobSigner, + signDesc, sweepTx, + ) + }), + true, + }, + { + // HTLC redemption w/ valid preimage size, but trying + // to spend CSV output without sequence set. + makeWitnessTestCase(t, func() (wire.TxWitness, error) { + // Generate commitment tx with 1 CSV locked + // HTLC. + genCommitTx(true) + + // Generate sweep tx that doesn't have locktime + // enabled. + genSweepTx(false) + + signDesc := &SignDescriptor{ + KeyDesc: keychain.KeyDescriptor{ + PubKey: bobKeyPub, + }, + SingleTweak: bobCommitTweak, + WitnessScript: htlcWitnessScript, + Output: htlcOutput, + HashType: txscript.SigHashAll, + SigHashes: sweepTxSigHashes, + InputIndex: 0, + } + + return ReceiverHtlcSpendRedeem( + aliceSenderSig, aliceSigHash, + paymentPreimage, bobSigner, signDesc, + sweepTx, + ) + }), + false, + }, + { // refund w/ invalid lock time makeWitnessTestCase(t, func() (wire.TxWitness, error) { + genCommitTx(false) + genSweepTx(false) + signDesc := &SignDescriptor{ KeyDesc: keychain.KeyDescriptor{ PubKey: aliceKeyPub, @@ -559,6 +886,37 @@ func TestHTLCReceiverSpendValidation(t *testing.T) { { // refund w/ valid lock time makeWitnessTestCase(t, func() (wire.TxWitness, error) { + genCommitTx(false) + genSweepTx(false) + + signDesc := &SignDescriptor{ + KeyDesc: keychain.KeyDescriptor{ + PubKey: aliceKeyPub, + }, + SingleTweak: aliceCommitTweak, + WitnessScript: htlcWitnessScript, + Output: htlcOutput, + HashType: txscript.SigHashAll, + SigHashes: sweepTxSigHashes, + InputIndex: 0, + } + + return ReceiverHtlcSpendTimeout(aliceSigner, signDesc, + sweepTx, int32(cltvTimeout)) + }), + true, + }, + { + // refund w/ valid lock time, and enforced locktime in + // HTLC scripts. + makeWitnessTestCase(t, func() (wire.TxWitness, error) { + // Make a commit tx that needs confirmation for + // HTLC output to be spent. + genCommitTx(true) + + // Generate a sweep with the locktime set. + genSweepTx(true) + signDesc := &SignDescriptor{ KeyDesc: keychain.KeyDescriptor{ PubKey: aliceKeyPub, @@ -576,42 +934,47 @@ func TestHTLCReceiverSpendValidation(t *testing.T) { }), true, }, + { + // refund w/ valid lock time, but no sequence set in + // sweep tx trying to spend CSV locked HTLC output. + makeWitnessTestCase(t, func() (wire.TxWitness, error) { + // Generate commitment tx with 1 CSV locked + // HTLC. + genCommitTx(true) + + // Generate sweep tx that doesn't have locktime + // enabled. + genSweepTx(false) + + signDesc := &SignDescriptor{ + KeyDesc: keychain.KeyDescriptor{ + PubKey: aliceKeyPub, + }, + SingleTweak: aliceCommitTweak, + WitnessScript: htlcWitnessScript, + Output: htlcOutput, + HashType: txscript.SigHashAll, + SigHashes: sweepTxSigHashes, + InputIndex: 0, + } + + return ReceiverHtlcSpendTimeout(aliceSigner, signDesc, + sweepTx, int32(cltvTimeout)) + }), + false, + }, } for i, testCase := range testCases { sweepTx.TxIn[0].Witness = testCase.witness() - vm, err := txscript.NewEngine(htlcPkScript, - sweepTx, 0, txscript.StandardVerifyFlags, nil, - nil, int64(paymentAmt)) - if err != nil { - t.Fatalf("unable to create engine: %v", err) + newEngine := func() (*txscript.Engine, error) { + return txscript.NewEngine(htlcPkScript, + sweepTx, 0, txscript.StandardVerifyFlags, nil, + nil, int64(paymentAmt)) } - // This buffer will trace execution of the Script, only dumping - // out to stdout in the case that a test fails. - var debugBuf bytes.Buffer - - done := false - for !done { - dis, err := vm.DisasmPC() - if err != nil { - t.Fatalf("stepping (%v)\n", err) - } - debugBuf.WriteString(fmt.Sprintf("stepping %v\n", dis)) - - done, err = vm.Step() - if err != nil && testCase.valid { - fmt.Println(debugBuf.String()) - t.Fatalf("spend test case #%v failed, spend should be valid: %v", i, err) - } else if err == nil && !testCase.valid && done { - fmt.Println(debugBuf.String()) - t.Fatalf("spend test case #%v succeed, spend should be invalid: %v", i, err) - } - - debugBuf.WriteString(fmt.Sprintf("Stack: %v", vm.GetStack())) - debugBuf.WriteString(fmt.Sprintf("AltStack: %v", vm.GetAltStack())) - } + assertEngineExecution(t, i, testCase.valid, newEngine) } } @@ -811,39 +1174,227 @@ func TestSecondLevelHtlcSpends(t *testing.T) { for i, testCase := range testCases { sweepTx.TxIn[0].Witness = testCase.witness() - vm, err := txscript.NewEngine(htlcPkScript, - sweepTx, 0, txscript.StandardVerifyFlags, nil, - nil, int64(htlcAmt)) - if err != nil { - t.Fatalf("unable to create engine: %v", err) + newEngine := func() (*txscript.Engine, error) { + return txscript.NewEngine(htlcPkScript, + sweepTx, 0, txscript.StandardVerifyFlags, nil, + nil, int64(htlcAmt)) + } + + assertEngineExecution(t, i, testCase.valid, newEngine) + } +} + +// TestCommitSpendToRemoteConfirmed checks that the delayed version of the +// to_remote version can only be spent by the owner, and after one +// confirmation. +func TestCommitSpendToRemoteConfirmed(t *testing.T) { + t.Parallel() + + const outputVal = btcutil.Amount(2 * 10e8) + + aliceKeyPriv, aliceKeyPub := btcec.PrivKeyFromBytes(btcec.S256(), + testWalletPrivKey) + + txid, err := chainhash.NewHash(testHdSeed.CloneBytes()) + if err != nil { + t.Fatalf("unable to create txid: %v", err) + } + commitOut := &wire.OutPoint{ + Hash: *txid, + Index: 0, + } + commitScript, err := CommitScriptToRemoteConfirmed(aliceKeyPub) + if err != nil { + t.Fatalf("unable to create htlc script: %v", err) + } + commitPkScript, err := WitnessScriptHash(commitScript) + if err != nil { + t.Fatalf("unable to create htlc output: %v", err) + } + + commitOutput := &wire.TxOut{ + PkScript: commitPkScript, + Value: int64(outputVal), + } + + sweepTx := wire.NewMsgTx(2) + sweepTx.AddTxIn(wire.NewTxIn(commitOut, nil, nil)) + sweepTx.AddTxOut( + &wire.TxOut{ + PkScript: []byte("doesn't matter"), + Value: 1 * 10e8, + }, + ) + + aliceSigner := &MockSigner{Privkeys: []*btcec.PrivateKey{aliceKeyPriv}} + + testCases := []struct { + witness func() wire.TxWitness + valid bool + }{ + { + // Alice can spend after the a CSV delay has passed. + makeWitnessTestCase(t, func() (wire.TxWitness, error) { + sweepTx.TxIn[0].Sequence = LockTimeToSequence(false, 1) + sweepTxSigHashes := txscript.NewTxSigHashes(sweepTx) + + signDesc := &SignDescriptor{ + KeyDesc: keychain.KeyDescriptor{ + PubKey: aliceKeyPub, + }, + WitnessScript: commitScript, + Output: commitOutput, + HashType: txscript.SigHashAll, + SigHashes: sweepTxSigHashes, + InputIndex: 0, + } + + return CommitSpendToRemoteConfirmed(aliceSigner, signDesc, + sweepTx) + }), + true, + }, + { + // Alice cannot spend output without sequence set. + makeWitnessTestCase(t, func() (wire.TxWitness, error) { + sweepTx.TxIn[0].Sequence = wire.MaxTxInSequenceNum + sweepTxSigHashes := txscript.NewTxSigHashes(sweepTx) + + signDesc := &SignDescriptor{ + KeyDesc: keychain.KeyDescriptor{ + PubKey: aliceKeyPub, + }, + WitnessScript: commitScript, + Output: commitOutput, + HashType: txscript.SigHashAll, + SigHashes: sweepTxSigHashes, + InputIndex: 0, + } + + return CommitSpendToRemoteConfirmed(aliceSigner, signDesc, + sweepTx) + }), + false, + }, + } + + for i, testCase := range testCases { + sweepTx.TxIn[0].Witness = testCase.witness() + + newEngine := func() (*txscript.Engine, error) { + return txscript.NewEngine(commitPkScript, + sweepTx, 0, txscript.StandardVerifyFlags, nil, + nil, int64(outputVal)) } - // This buffer will trace execution of the Script, only dumping - // out to stdout in the case that a test fails. - var debugBuf bytes.Buffer - - done := false - for !done { - dis, err := vm.DisasmPC() - if err != nil { - t.Fatalf("stepping (%v)\n", err) - } - debugBuf.WriteString(fmt.Sprintf("stepping %v\n", dis)) - - done, err = vm.Step() - if err != nil && testCase.valid { - fmt.Println(debugBuf.String()) - t.Fatalf("spend test case #%v failed, spend "+ - "should be valid: %v", i, err) - } else if err == nil && !testCase.valid && done { - fmt.Println(debugBuf.String()) - t.Fatalf("spend test case #%v succeed, spend "+ - "should be invalid: %v", i, err) - } - - debugBuf.WriteString(fmt.Sprintf("Stack: %v", vm.GetStack())) - debugBuf.WriteString(fmt.Sprintf("AltStack: %v", vm.GetAltStack())) + assertEngineExecution(t, i, testCase.valid, newEngine) + } +} + +// TestSpendAnchor checks that we can spend the anchors using the various spend +// paths. +func TestSpendAnchor(t *testing.T) { + t.Parallel() + + const anchorSize = 294 + + // First we'll set up some initial key state for Alice. + aliceKeyPriv, aliceKeyPub := btcec.PrivKeyFromBytes(btcec.S256(), + testWalletPrivKey) + + // Create a fake anchor outpoint that we'll use to generate the + // sweeping transaction. + txid, err := chainhash.NewHash(testHdSeed.CloneBytes()) + if err != nil { + t.Fatalf("unable to create txid: %v", err) + } + anchorOutPoint := &wire.OutPoint{ + Hash: *txid, + Index: 0, + } + + sweepTx := wire.NewMsgTx(2) + sweepTx.AddTxIn(wire.NewTxIn(anchorOutPoint, nil, nil)) + sweepTx.AddTxOut( + &wire.TxOut{ + PkScript: []byte("doesn't matter"), + Value: 1 * 10e8, + }, + ) + + // Generate the anchor script that can be spent by Alice immediately, + // or by anyone after 16 blocks. + anchorScript, err := CommitScriptAnchor(aliceKeyPub) + if err != nil { + t.Fatalf("unable to create htlc script: %v", err) + } + anchorPkScript, err := WitnessScriptHash(anchorScript) + if err != nil { + t.Fatalf("unable to create htlc output: %v", err) + } + + anchorOutput := &wire.TxOut{ + PkScript: anchorPkScript, + Value: int64(anchorSize), + } + + // Create mock signer for Alice. + aliceSigner := &MockSigner{Privkeys: []*btcec.PrivateKey{aliceKeyPriv}} + + testCases := []struct { + witness func() wire.TxWitness + valid bool + }{ + { + // Alice can spend immediately. + makeWitnessTestCase(t, func() (wire.TxWitness, error) { + sweepTx.TxIn[0].Sequence = wire.MaxTxInSequenceNum + sweepTxSigHashes := txscript.NewTxSigHashes(sweepTx) + + signDesc := &SignDescriptor{ + KeyDesc: keychain.KeyDescriptor{ + PubKey: aliceKeyPub, + }, + WitnessScript: anchorScript, + Output: anchorOutput, + HashType: txscript.SigHashAll, + SigHashes: sweepTxSigHashes, + InputIndex: 0, + } + + return CommitSpendAnchor(aliceSigner, signDesc, + sweepTx) + }), + true, + }, + { + // Anyone can spend after 16 blocks. + makeWitnessTestCase(t, func() (wire.TxWitness, error) { + sweepTx.TxIn[0].Sequence = LockTimeToSequence(false, 16) + return CommitSpendAnchorAnyone(anchorScript) + }), + true, + }, + { + // Anyone cannot spend before 16 blocks. + makeWitnessTestCase(t, func() (wire.TxWitness, error) { + sweepTx.TxIn[0].Sequence = LockTimeToSequence(false, 15) + return CommitSpendAnchorAnyone(anchorScript) + }), + false, + }, + } + + for i, testCase := range testCases { + sweepTx.TxIn[0].Witness = testCase.witness() + + newEngine := func() (*txscript.Engine, error) { + return txscript.NewEngine(anchorPkScript, + sweepTx, 0, txscript.StandardVerifyFlags, nil, + nil, int64(anchorSize)) } + + assertEngineExecution(t, i, testCase.valid, newEngine) } } diff --git a/input/signdescriptor.go b/input/signdescriptor.go index 0dfb5d464c..2aa2af9c20 100644 --- a/input/signdescriptor.go +++ b/input/signdescriptor.go @@ -17,9 +17,9 @@ var ( ErrTweakOverdose = errors.New("sign descriptor should only have one tweak") ) -// SignDescriptor houses the necessary information required to successfully sign -// a given output. This struct is used by the Signer interface in order to gain -// access to critical data needed to generate a valid signature. +// SignDescriptor houses the necessary information required to successfully +// sign a given segwit output. This struct is used by the Signer interface in +// order to gain access to critical data needed to generate a valid signature. type SignDescriptor struct { // KeyDesc is a descriptor that precisely describes *which* key to use // for signing. This may provide the raw public key directly, or @@ -56,8 +56,9 @@ type SignDescriptor struct { DoubleTweak *btcec.PrivateKey // WitnessScript is the full script required to properly redeem the - // output. This field will only be populated if a p2wsh or a p2sh - // output is being signed. + // output. This field should be set to the full script if a p2wsh + // output is being signed. For p2wkh it should be set to the hashed + // script (PkScript). WitnessScript []byte // Output is the target output which should be signed. The PkScript and diff --git a/input/signer.go b/input/signer.go index cd32025562..86622638ea 100644 --- a/input/signer.go +++ b/input/signer.go @@ -14,7 +14,8 @@ type Signer interface { // according to the data within the passed SignDescriptor. // // NOTE: The resulting signature should be void of a sighash byte. - SignOutputRaw(tx *wire.MsgTx, signDesc *SignDescriptor) ([]byte, error) + SignOutputRaw(tx *wire.MsgTx, + signDesc *SignDescriptor) (Signature, error) // ComputeInputScript generates a complete InputIndex for the passed // transaction with the signature as defined within the passed diff --git a/input/size.go b/input/size.go index 07055dc849..6cebc28241 100644 --- a/input/size.go +++ b/input/size.go @@ -5,15 +5,6 @@ import ( "github.com/btcsuite/btcd/wire" ) -const ( - // CommitWeight is the weight of the base commitment transaction which - // includes: one p2wsh input, out p2wkh output, and one p2wsh output. - CommitWeight int64 = 724 - - // HtlcWeight is the weight of an HTLC output. - HtlcWeight int64 = 172 -) - const ( // witnessScaleFactor determines the level of "discount" witness data // receives compared to "base" data. A scale factor of 4, denotes that @@ -34,12 +25,22 @@ const ( // - PublicKeyHASH160: 20 bytes P2WPKHSize = 1 + 1 + 20 + // NestedP2WPKHSize 23 bytes + // - OP_DATA: 1 byte (P2WPKHSize) + // - P2WPKHWitnessProgram: 22 bytes + NestedP2WPKHSize = 1 + P2WPKHSize + // P2WSHSize 34 bytes // - OP_0: 1 byte // - OP_DATA: 1 byte (WitnessScriptSHA256 length) // - WitnessScriptSHA256: 32 bytes P2WSHSize = 1 + 1 + 32 + // NestedP2WSHSize 35 bytes + // - OP_DATA: 1 byte (P2WSHSize) + // - P2WSHWitnessProgram: 34 bytes + NestedP2WSHSize = 1 + P2WSHSize + // P2PKHOutputSize 34 bytes // - value: 8 bytes // - var_int: 1 byte (pkscript_length) @@ -89,7 +90,7 @@ const ( // - OP_CHECKMULTISIG: 1 byte MultiSigSize = 1 + 1 + 33 + 1 + 33 + 1 + 1 - // WitnessSize 222 bytes + // MultiSigWitnessSize 222 bytes // - NumberOfWitnessElements: 1 byte // - NilLength: 1 byte // - sigAliceLength: 1 byte @@ -98,7 +99,7 @@ const ( // - sigBob: 73 bytes // - WitnessScriptLength: 1 byte // - WitnessScript (MultiSig) - WitnessSize = 1 + 1 + 1 + 73 + 1 + 73 + 1 + MultiSigSize + MultiSigWitnessSize = 1 + 1 + 1 + 73 + 1 + 73 + 1 + MultiSigSize // InputSize 41 bytes // - PreviousOutPoint: @@ -131,6 +132,12 @@ const ( // - PkScript (P2WPKH) CommitmentKeyHashOutput = 8 + 1 + P2WPKHSize + // CommitmentAnchorOutput 43 bytes + // - Value: 8 bytes + // - VarInt: 1 byte (PkScript length) + // - PkScript (P2WSH) + CommitmentAnchorOutput = 8 + 1 + P2WSHSize + // HTLCSize 43 bytes // - Value: 8 bytes // - VarInt: 1 byte (PkScript length) @@ -166,7 +173,33 @@ const ( BaseCommitmentTxWeight = witnessScaleFactor * BaseCommitmentTxSize // WitnessCommitmentTxWeight 224 weight - WitnessCommitmentTxWeight = WitnessHeaderSize + WitnessSize + WitnessCommitmentTxWeight = WitnessHeaderSize + MultiSigWitnessSize + + // BaseAnchorCommitmentTxSize 225 + 43 * num-htlc-outputs bytes + // - Version: 4 bytes + // - WitnessHeader <---- part of the witness data + // - CountTxIn: 1 byte + // - TxIn: 41 bytes + // FundingInput + // - CountTxOut: 3 byte + // - TxOut: 4*43 + 43 * num-htlc-outputs bytes + // OutputPayingToThem, + // OutputPayingToUs, + // AnchorPayingToThem, + // AnchorPayingToUs, + // ....HTLCOutputs... + // - LockTime: 4 bytes + BaseAnchorCommitmentTxSize = 4 + 1 + FundingInputSize + 3 + + 2*CommitmentDelayOutput + 2*CommitmentAnchorOutput + 4 + + // BaseAnchorCommitmentTxWeight 900 weight + BaseAnchorCommitmentTxWeight = witnessScaleFactor * BaseAnchorCommitmentTxSize + + // CommitWeight 724 weight + CommitWeight = BaseCommitmentTxWeight + WitnessCommitmentTxWeight + + // AnchorCommitWeight 1124 weight + AnchorCommitWeight = BaseAnchorCommitmentTxWeight + WitnessCommitmentTxWeight // HTLCWeight 172 weight HTLCWeight = witnessScaleFactor * HTLCSize @@ -179,6 +212,23 @@ const ( // which will transition an incoming HTLC to the delay-and-claim state. HtlcSuccessWeight = 703 + // HtlcConfirmedScriptOverhead is the extra length of an HTLC script + // that requires confirmation before it can be spent. These extra bytes + // is a result of the extra CSV check. + HtlcConfirmedScriptOverhead = 3 + + // HtlcTimeoutWeightConfirmed is the weight of the HTLC timeout + // transaction which will transition an outgoing HTLC to the + // delay-and-claim state, for the confirmed HTLC outputs. It is 3 bytes + // larger because of the additional CSV check in the input script. + HtlcTimeoutWeightConfirmed = HtlcTimeoutWeight + HtlcConfirmedScriptOverhead + + // HtlcSuccessWeightCOnfirmed is the weight of the HTLC success + // transaction which will transition an incoming HTLC to the + // delay-and-claim state, for the confirmed HTLC outputs. It is 3 bytes + // larger because of the cdditional CSV check in the input script. + HtlcSuccessWeightConfirmed = HtlcSuccessWeight + HtlcConfirmedScriptOverhead + // MaxHTLCNumber is the maximum number HTLCs which can be included in a // commitment transaction. This limit was chosen such that, in the case // of a contract breach, the punishment transaction is able to sweep @@ -210,16 +260,33 @@ const ( // - witness_script (to_local_script) ToLocalTimeoutWitnessSize = 1 + 1 + 73 + 1 + 1 + ToLocalScriptSize - // ToLocalPenaltyWitnessSize 156 bytes + // ToLocalPenaltyWitnessSize 157 bytes // - number_of_witness_elements: 1 byte // - revocation_sig_length: 1 byte // - revocation_sig: 73 bytes + // - OP_TRUE_length: 1 byte // - OP_TRUE: 1 byte // - witness_script_length: 1 byte // - witness_script (to_local_script) - ToLocalPenaltyWitnessSize = 1 + 1 + 73 + 1 + 1 + ToLocalScriptSize + ToLocalPenaltyWitnessSize = 1 + 1 + 73 + 1 + 1 + 1 + ToLocalScriptSize + + // ToRemoteConfirmedScriptSize 37 bytes + // - OP_DATA: 1 byte + // - to_remote_key: 33 bytes + // - OP_CHECKSIGVERIFY: 1 byte + // - OP_1: 1 byte + // - OP_CHECKSEQUENCEVERIFY: 1 byte + ToRemoteConfirmedScriptSize = 1 + 33 + 1 + 1 + 1 + + // ToRemoteConfirmedWitnessSize 113 bytes + // - number_of_witness_elements: 1 byte + // - sig_length: 1 byte + // - sig: 73 bytes + // - witness_script_length: 1 byte + // - witness_script (to_remote_delayed_script) + ToRemoteConfirmedWitnessSize = 1 + 1 + 73 + 1 + ToRemoteConfirmedScriptSize - // AcceptedHtlcScriptSize 139 bytes + // AcceptedHtlcScriptSize 143 bytes // - OP_DUP: 1 byte // - OP_HASH160: 1 byte // - OP_DATA: 1 byte (RIPEMD160(SHA256(revocationkey)) length) @@ -232,6 +299,7 @@ const ( // - remotekey: 33 bytes // - OP_SWAP: 1 byte // - OP_SIZE: 1 byte + // - OP_DATA: 1 byte (32 length) // - 32: 1 byte // - OP_EQUAL: 1 byte // - OP_IF: 1 byte @@ -253,11 +321,14 @@ const ( // - OP_DROP: 1 byte // - OP_CHECKSIG: 1 byte // - OP_ENDIF: 1 byte + // - OP_1: 1 byte // These 3 extra bytes are used for both confirmed and regular + // - OP_CSV: 1 byte // HTLC script types. The size won't be correct in all cases, + // - OP_DROP: 1 byte // but it is just an upper bound used for fee estimation in any case. // - OP_ENDIF: 1 byte - AcceptedHtlcScriptSize = 3*1 + 20 + 5*1 + 33 + 7*1 + 20 + 4*1 + - 33 + 5*1 + 4 + 5*1 + AcceptedHtlcScriptSize = 3*1 + 20 + 5*1 + 33 + 8*1 + 20 + 4*1 + + 33 + 5*1 + 4 + 8*1 - // AcceptedHtlcTimeoutWitnessSize 216 + // AcceptedHtlcTimeoutWitnessSize 219 // - number_of_witness_elements: 1 byte // - sender_sig_length: 1 byte // - sender_sig: 73 bytes @@ -266,6 +337,16 @@ const ( // - witness_script: (accepted_htlc_script) AcceptedHtlcTimeoutWitnessSize = 1 + 1 + 73 + 1 + 1 + AcceptedHtlcScriptSize + // AcceptedHtlcPenaltyWitnessSize 252 bytes + // - number_of_witness_elements: 1 byte + // - revocation_sig_length: 1 byte + // - revocation_sig: 73 bytes + // - revocation_key_length: 1 byte + // - revocation_key: 33 bytes + // - witness_script_length: 1 byte + // - witness_script (accepted_htlc_script) + AcceptedHtlcPenaltyWitnessSize = 1 + 1 + 73 + 1 + 33 + 1 + AcceptedHtlcScriptSize + // AcceptedHtlcSuccessWitnessSize 322 bytes // - number_of_witness_elements: 1 byte // - nil_length: 1 byte @@ -277,19 +358,10 @@ const ( // - preimage: 32 bytes // - witness_script_length: 1 byte // - witness_script (accepted_htlc_script) - AcceptedHtlcSuccessWitnessSize = 1 + 1 + 73 + 1 + 73 + 1 + 32 + 1 + AcceptedHtlcScriptSize - - // AcceptedHtlcPenaltyWitnessSize 249 bytes - // - number_of_witness_elements: 1 byte - // - revocation_sig_length: 1 byte - // - revocation_sig: 73 bytes - // - revocation_key_length: 1 byte - // - revocation_key: 33 bytes - // - witness_script_length: 1 byte - // - witness_script (accepted_htlc_script) - AcceptedHtlcPenaltyWitnessSize = 1 + 1 + 73 + 1 + 33 + 1 + AcceptedHtlcScriptSize + AcceptedHtlcSuccessWitnessSize = 1 + 1 + 1 + 73 + 1 + 73 + 1 + 32 + 1 + + AcceptedHtlcScriptSize - // OfferedHtlcScriptSize 133 bytes + // OfferedHtlcScriptSize 136 bytes // - OP_DUP: 1 byte // - OP_HASH160: 1 byte // - OP_DATA: 1 byte (RIPEMD160(SHA256(revocationkey)) length) @@ -320,8 +392,21 @@ const ( // - OP_EQUALVERIFY: 1 byte // - OP_CHECKSIG: 1 byte // - OP_ENDIF: 1 byte + // - OP_1: 1 byte + // - OP_CSV: 1 byte + // - OP_DROP: 1 byte // - OP_ENDIF: 1 byte - OfferedHtlcScriptSize = 3*1 + 20 + 5*1 + 33 + 10*1 + 33 + 5*1 + 20 + 4*1 + OfferedHtlcScriptSize = 3*1 + 20 + 5*1 + 33 + 10*1 + 33 + 5*1 + 20 + 7*1 + + // OfferedHtlcSuccessWitnessSize 245 bytes + // - number_of_witness_elements: 1 byte + // - receiver_sig_length: 1 byte + // - receiver_sig: 73 bytes + // - payment_preimage_length: 1 byte + // - payment_preimage: 32 bytes + // - witness_script_length: 1 byte + // - witness_script (offered_htlc_script) + OfferedHtlcSuccessWitnessSize = 1 + 1 + 73 + 1 + 32 + 1 + OfferedHtlcScriptSize // OfferedHtlcTimeoutWitnessSize 285 bytes // - number_of_witness_elements: 1 byte @@ -335,20 +420,7 @@ const ( // - witness_script (offered_htlc_script) OfferedHtlcTimeoutWitnessSize = 1 + 1 + 1 + 73 + 1 + 73 + 1 + 1 + OfferedHtlcScriptSize - // OfferedHtlcSuccessWitnessSize 317 bytes - // - number_of_witness_elements: 1 byte - // - nil_length: 1 byte - // - receiver_sig_length: 1 byte - // - receiver_sig: 73 bytes - // - sender_sig_length: 1 byte - // - sender_sig: 73 bytes - // - payment_preimage_length: 1 byte - // - payment_preimage: 32 bytes - // - witness_script_length: 1 byte - // - witness_script (offered_htlc_script) - OfferedHtlcSuccessWitnessSize = 1 + 1 + 1 + 73 + 1 + 73 + 1 + 32 + 1 + OfferedHtlcScriptSize - - // OfferedHtlcPenaltyWitnessSize 243 bytes + // OfferedHtlcPenaltyWitnessSize 246 bytes // - number_of_witness_elements: 1 byte // - revocation_sig_length: 1 byte // - revocation_sig: 73 bytes @@ -357,6 +429,25 @@ const ( // - witness_script_length: 1 byte // - witness_script (offered_htlc_script) OfferedHtlcPenaltyWitnessSize = 1 + 1 + 73 + 1 + 33 + 1 + OfferedHtlcScriptSize + + // AnchorScriptSize 40 bytes + // - pubkey_length: 1 byte + // - pubkey: 33 bytes + // - OP_CHECKSIG: 1 byte + // - OP_IFDUP: 1 byte + // - OP_NOTIF: 1 byte + // - OP_16: 1 byte + // - OP_CSV 1 byte + // - OP_ENDIF: 1 byte + AnchorScriptSize = 1 + 33 + 6*1 + + // AnchorWitnessSize 116 bytes + // - number_of_witnes_elements: 1 byte + // - signature_length: 1 byte + // - signature: 73 bytes + // - witness_script_length: 1 byte + // - witness_script (anchor_script) + AnchorWitnessSize = 1 + 1 + 73 + 1 + AnchorScriptSize ) // EstimateCommitTxWeight estimate commitment transaction weight depending on @@ -423,9 +514,9 @@ func (twe *TxWeightEstimator) AddWitnessInput(witnessSize int) *TxWeightEstimato // AddNestedP2WKHInput updates the weight estimate to account for an additional // input spending a P2SH output with a nested P2WKH redeem script. func (twe *TxWeightEstimator) AddNestedP2WKHInput() *TxWeightEstimator { - twe.inputSize += InputSize + P2WPKHSize + twe.inputSize += InputSize + NestedP2WPKHSize twe.inputWitnessSize += P2WKHWitnessSize - twe.inputSize++ + twe.inputCount++ twe.hasWitness = true return twe @@ -434,9 +525,9 @@ func (twe *TxWeightEstimator) AddNestedP2WKHInput() *TxWeightEstimator { // AddNestedP2WSHInput updates the weight estimate to account for an additional // input spending a P2SH output with a nested P2WSH redeem script. func (twe *TxWeightEstimator) AddNestedP2WSHInput(witnessSize int) *TxWeightEstimator { - twe.inputSize += InputSize + P2WSHSize + twe.inputSize += InputSize + NestedP2WSHSize twe.inputWitnessSize += witnessSize - twe.inputSize++ + twe.inputCount++ twe.hasWitness = true return twe diff --git a/input/size_test.go b/input/size_test.go new file mode 100644 index 0000000000..c7c2dc1b3a --- /dev/null +++ b/input/size_test.go @@ -0,0 +1,847 @@ +package input_test + +import ( + "math/big" + "testing" + + "github.com/btcsuite/btcd/blockchain" + "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/txscript" + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" + + "github.com/lightningnetwork/lnd/input" + "github.com/lightningnetwork/lnd/keychain" +) + +const ( + testCSVDelay = (1 << 31) - 1 + + testCLTVExpiry = 500000000 + + // maxDERSignatureSize is the largest possible DER-encoded signature + // without the trailing sighash flag. + maxDERSignatureSize = 72 +) + +var ( + testPubkeyBytes = make([]byte, 33) + + testHash160 = make([]byte, 20) + testPreimage = make([]byte, 32) + + // testPubkey is a pubkey used in script size calculation. + testPubkey = &btcec.PublicKey{ + X: &big.Int{}, + Y: &big.Int{}, + } + + testPrivkey, _ = btcec.PrivKeyFromBytes(btcec.S256(), make([]byte, 32)) + + testTx = wire.NewMsgTx(2) +) + +// TestTxWeightEstimator tests that transaction weight estimates are calculated +// correctly by comparing against an actual (though invalid) transaction +// matching the template. +func TestTxWeightEstimator(t *testing.T) { + netParams := &chaincfg.MainNetParams + + p2pkhAddr, err := btcutil.NewAddressPubKeyHash( + make([]byte, 20), netParams) + if err != nil { + t.Fatalf("Failed to generate address: %v", err) + } + p2pkhScript, err := txscript.PayToAddrScript(p2pkhAddr) + if err != nil { + t.Fatalf("Failed to generate scriptPubKey: %v", err) + } + + p2wkhAddr, err := btcutil.NewAddressWitnessPubKeyHash( + make([]byte, 20), netParams) + if err != nil { + t.Fatalf("Failed to generate address: %v", err) + } + p2wkhScript, err := txscript.PayToAddrScript(p2wkhAddr) + if err != nil { + t.Fatalf("Failed to generate scriptPubKey: %v", err) + } + + p2wshAddr, err := btcutil.NewAddressWitnessScriptHash( + make([]byte, 32), netParams) + if err != nil { + t.Fatalf("Failed to generate address: %v", err) + } + p2wshScript, err := txscript.PayToAddrScript(p2wshAddr) + if err != nil { + t.Fatalf("Failed to generate scriptPubKey: %v", err) + } + + p2shAddr, err := btcutil.NewAddressScriptHash([]byte{0}, netParams) + if err != nil { + t.Fatalf("Failed to generate address: %v", err) + } + p2shScript, err := txscript.PayToAddrScript(p2shAddr) + if err != nil { + t.Fatalf("Failed to generate scriptPubKey: %v", err) + } + + testCases := []struct { + numP2PKHInputs int + numP2WKHInputs int + numP2WSHInputs int + numNestedP2WKHInputs int + numNestedP2WSHInputs int + numP2PKHOutputs int + numP2WKHOutputs int + numP2WSHOutputs int + numP2SHOutputs int + }{ + // Assert base txn size. + {}, + + // Assert single input/output sizes. + { + numP2PKHInputs: 1, + }, + { + numP2WKHInputs: 1, + }, + { + numP2WSHInputs: 1, + }, + { + numNestedP2WKHInputs: 1, + }, + { + numNestedP2WSHInputs: 1, + }, + { + numP2WKHOutputs: 1, + }, + { + numP2PKHOutputs: 1, + }, + { + numP2WSHOutputs: 1, + }, + { + numP2SHOutputs: 1, + }, + + // Assert each input/output increments input/output counts. + { + numP2PKHInputs: 253, + }, + { + numP2WKHInputs: 253, + }, + { + numP2WSHInputs: 253, + }, + { + numNestedP2WKHInputs: 253, + }, + { + numNestedP2WSHInputs: 253, + }, + { + numP2WKHOutputs: 253, + }, + { + numP2PKHOutputs: 253, + }, + { + numP2WSHOutputs: 253, + }, + { + numP2SHOutputs: 253, + }, + + // Assert basic combinations of inputs and outputs. + { + numP2PKHInputs: 1, + numP2PKHOutputs: 2, + }, + { + numP2PKHInputs: 1, + numP2WKHInputs: 1, + numP2WKHOutputs: 1, + numP2WSHOutputs: 1, + }, + { + numP2WKHInputs: 1, + numP2WKHOutputs: 1, + numP2WSHOutputs: 1, + }, + { + numP2WKHInputs: 2, + numP2WKHOutputs: 1, + numP2WSHOutputs: 1, + }, + { + numP2WSHInputs: 1, + numP2WKHOutputs: 1, + }, + { + numP2PKHInputs: 1, + numP2SHOutputs: 1, + }, + { + numNestedP2WKHInputs: 1, + numP2WKHOutputs: 1, + }, + { + numNestedP2WSHInputs: 1, + numP2WKHOutputs: 1, + }, + + // Assert disparate input/output types increment total + // input/output counts. + { + numP2PKHInputs: 50, + numP2WKHInputs: 50, + numP2WSHInputs: 51, + numNestedP2WKHInputs: 51, + numNestedP2WSHInputs: 51, + numP2WKHOutputs: 1, + }, + { + numP2WKHInputs: 1, + numP2WKHOutputs: 63, + numP2PKHOutputs: 63, + numP2WSHOutputs: 63, + numP2SHOutputs: 64, + }, + { + numP2PKHInputs: 50, + numP2WKHInputs: 50, + numP2WSHInputs: 51, + numNestedP2WKHInputs: 51, + numNestedP2WSHInputs: 51, + numP2WKHOutputs: 63, + numP2PKHOutputs: 63, + numP2WSHOutputs: 63, + numP2SHOutputs: 64, + }, + } + + for i, test := range testCases { + var weightEstimate input.TxWeightEstimator + tx := wire.NewMsgTx(1) + + for j := 0; j < test.numP2PKHInputs; j++ { + weightEstimate.AddP2PKHInput() + + signature := make([]byte, maxDERSignatureSize+1) + compressedPubKey := make([]byte, 33) + scriptSig, err := txscript.NewScriptBuilder().AddData(signature). + AddData(compressedPubKey).Script() + if err != nil { + t.Fatalf("Failed to generate scriptSig: %v", err) + } + + tx.AddTxIn(&wire.TxIn{SignatureScript: scriptSig}) + } + for j := 0; j < test.numP2WKHInputs; j++ { + weightEstimate.AddP2WKHInput() + + signature := make([]byte, maxDERSignatureSize+1) + compressedPubKey := make([]byte, 33) + witness := wire.TxWitness{signature, compressedPubKey} + tx.AddTxIn(&wire.TxIn{Witness: witness}) + } + for j := 0; j < test.numP2WSHInputs; j++ { + weightEstimate.AddWitnessInput(42) + + witnessScript := make([]byte, 40) + witness := wire.TxWitness{witnessScript} + tx.AddTxIn(&wire.TxIn{Witness: witness}) + } + for j := 0; j < test.numNestedP2WKHInputs; j++ { + weightEstimate.AddNestedP2WKHInput() + + signature := make([]byte, maxDERSignatureSize+1) + compressedPubKey := make([]byte, 33) + witness := wire.TxWitness{signature, compressedPubKey} + scriptSig, err := txscript.NewScriptBuilder().AddData(p2wkhScript). + Script() + if err != nil { + t.Fatalf("Failed to generate scriptSig: %v", err) + } + + tx.AddTxIn(&wire.TxIn{SignatureScript: scriptSig, Witness: witness}) + } + for j := 0; j < test.numNestedP2WSHInputs; j++ { + weightEstimate.AddNestedP2WSHInput(42) + + witnessScript := make([]byte, 40) + witness := wire.TxWitness{witnessScript} + scriptSig, err := txscript.NewScriptBuilder().AddData(p2wshScript). + Script() + if err != nil { + t.Fatalf("Failed to generate scriptSig: %v", err) + } + + tx.AddTxIn(&wire.TxIn{SignatureScript: scriptSig, Witness: witness}) + } + for j := 0; j < test.numP2PKHOutputs; j++ { + weightEstimate.AddP2PKHOutput() + tx.AddTxOut(&wire.TxOut{PkScript: p2pkhScript}) + } + for j := 0; j < test.numP2WKHOutputs; j++ { + weightEstimate.AddP2WKHOutput() + tx.AddTxOut(&wire.TxOut{PkScript: p2wkhScript}) + } + for j := 0; j < test.numP2WSHOutputs; j++ { + weightEstimate.AddP2WSHOutput() + tx.AddTxOut(&wire.TxOut{PkScript: p2wshScript}) + } + for j := 0; j < test.numP2SHOutputs; j++ { + weightEstimate.AddP2SHOutput() + tx.AddTxOut(&wire.TxOut{PkScript: p2shScript}) + } + + expectedWeight := blockchain.GetTransactionWeight(btcutil.NewTx(tx)) + if weightEstimate.Weight() != int(expectedWeight) { + t.Errorf("Case %d: Got wrong weight: expected %d, got %d", + i, expectedWeight, weightEstimate.Weight()) + } + } +} + +type maxDERSignature struct{} + +func (s *maxDERSignature) Serialize() []byte { + // Always return worst-case signature length, excluding the one byte + // sighash flag. + return make([]byte, maxDERSignatureSize) +} + +func (s *maxDERSignature) Verify(_ []byte, _ *btcec.PublicKey) bool { + return true +} + +// dummySigner is a fake signer used for size (upper bound) calculations. +type dummySigner struct { + input.Signer +} + +// SignOutputRaw generates a signature for the passed transaction according to +// the data within the passed SignDescriptor. +func (s *dummySigner) SignOutputRaw(tx *wire.MsgTx, + signDesc *input.SignDescriptor) (input.Signature, error) { + + return &maxDERSignature{}, nil +} + +type witnessSizeTest struct { + name string + expSize int + genWitness func(t *testing.T) wire.TxWitness +} + +var witnessSizeTests = []witnessSizeTest{ + { + name: "funding", + expSize: input.MultiSigWitnessSize, + genWitness: func(t *testing.T) wire.TxWitness { + witnessScript, _, err := input.GenFundingPkScript( + testPubkeyBytes, testPubkeyBytes, 1, + ) + if err != nil { + t.Fatal(err) + } + + return input.SpendMultiSig( + witnessScript, + testPubkeyBytes, &maxDERSignature{}, + testPubkeyBytes, &maxDERSignature{}, + ) + }, + }, + { + name: "to local timeout", + expSize: input.ToLocalTimeoutWitnessSize, + genWitness: func(t *testing.T) wire.TxWitness { + witnessScript, err := input.CommitScriptToSelf( + testCSVDelay, testPubkey, testPubkey, + ) + if err != nil { + t.Fatal(err) + } + + signDesc := &input.SignDescriptor{ + WitnessScript: witnessScript, + } + + witness, err := input.CommitSpendTimeout( + &dummySigner{}, signDesc, testTx, + ) + if err != nil { + t.Fatal(err) + } + + return witness + }, + }, + { + name: "to local revoke", + expSize: input.ToLocalPenaltyWitnessSize, + genWitness: func(t *testing.T) wire.TxWitness { + witnessScript, err := input.CommitScriptToSelf( + testCSVDelay, testPubkey, testPubkey, + ) + if err != nil { + t.Fatal(err) + } + + signDesc := &input.SignDescriptor{ + WitnessScript: witnessScript, + } + + witness, err := input.CommitSpendRevoke( + &dummySigner{}, signDesc, testTx, + ) + if err != nil { + t.Fatal(err) + } + + return witness + }, + }, + { + name: "to remote confirmed", + expSize: input.ToRemoteConfirmedWitnessSize, + genWitness: func(t *testing.T) wire.TxWitness { + witScript, err := input.CommitScriptToRemoteConfirmed( + testPubkey, + ) + if err != nil { + t.Fatal(err) + } + + signDesc := &input.SignDescriptor{ + WitnessScript: witScript, + KeyDesc: keychain.KeyDescriptor{ + PubKey: testPubkey, + }, + } + + witness, err := input.CommitSpendToRemoteConfirmed( + &dummySigner{}, signDesc, testTx, + ) + if err != nil { + t.Fatal(err) + } + + return witness + }, + }, + { + name: "anchor", + expSize: input.AnchorWitnessSize, + genWitness: func(t *testing.T) wire.TxWitness { + witScript, err := input.CommitScriptAnchor( + testPubkey, + ) + if err != nil { + t.Fatal(err) + } + + signDesc := &input.SignDescriptor{ + WitnessScript: witScript, + KeyDesc: keychain.KeyDescriptor{ + PubKey: testPubkey, + }, + } + + witness, err := input.CommitSpendAnchor( + &dummySigner{}, signDesc, testTx, + ) + if err != nil { + t.Fatal(err) + } + + return witness + }, + }, + { + name: "anchor anyone", + expSize: 43, + genWitness: func(t *testing.T) wire.TxWitness { + witScript, err := input.CommitScriptAnchor( + testPubkey, + ) + if err != nil { + t.Fatal(err) + } + + witness, _ := input.CommitSpendAnchorAnyone(witScript) + + return witness + }, + }, + { + name: "offered htlc revoke", + expSize: input.OfferedHtlcPenaltyWitnessSize - 3, + genWitness: func(t *testing.T) wire.TxWitness { + witScript, err := input.SenderHTLCScript( + testPubkey, testPubkey, testPubkey, + testHash160, false, + ) + if err != nil { + t.Fatal(err) + } + + signDesc := &input.SignDescriptor{ + WitnessScript: witScript, + KeyDesc: keychain.KeyDescriptor{ + PubKey: testPubkey, + }, + DoubleTweak: testPrivkey, + } + + witness, err := input.SenderHtlcSpendRevoke( + &dummySigner{}, signDesc, testTx, + ) + if err != nil { + t.Fatal(err) + } + + return witness + }, + }, + { + name: "offered htlc revoke confirmed", + expSize: input.OfferedHtlcPenaltyWitnessSize, + genWitness: func(t *testing.T) wire.TxWitness { + hash := make([]byte, 20) + + witScript, err := input.SenderHTLCScript( + testPubkey, testPubkey, testPubkey, + hash, true, + ) + if err != nil { + t.Fatal(err) + } + + signDesc := &input.SignDescriptor{ + WitnessScript: witScript, + KeyDesc: keychain.KeyDescriptor{ + PubKey: testPubkey, + }, + DoubleTweak: testPrivkey, + } + + witness, err := input.SenderHtlcSpendRevoke( + &dummySigner{}, signDesc, testTx, + ) + if err != nil { + t.Fatal(err) + } + + return witness + }, + }, + { + name: "offered htlc timeout", + expSize: input.OfferedHtlcTimeoutWitnessSize - 3, + genWitness: func(t *testing.T) wire.TxWitness { + witScript, err := input.SenderHTLCScript( + testPubkey, testPubkey, testPubkey, + testHash160, false, + ) + if err != nil { + t.Fatal(err) + } + + signDesc := &input.SignDescriptor{ + WitnessScript: witScript, + } + + witness, err := input.SenderHtlcSpendTimeout( + &maxDERSignature{}, txscript.SigHashAll, + &dummySigner{}, signDesc, testTx, + ) + if err != nil { + t.Fatal(err) + } + + return witness + }, + }, + { + name: "offered htlc timeout confirmed", + expSize: input.OfferedHtlcTimeoutWitnessSize, + genWitness: func(t *testing.T) wire.TxWitness { + witScript, err := input.SenderHTLCScript( + testPubkey, testPubkey, testPubkey, + testHash160, true, + ) + if err != nil { + t.Fatal(err) + } + + signDesc := &input.SignDescriptor{ + WitnessScript: witScript, + } + + witness, err := input.SenderHtlcSpendTimeout( + &maxDERSignature{}, txscript.SigHashAll, + &dummySigner{}, signDesc, testTx, + ) + if err != nil { + t.Fatal(err) + } + + return witness + }, + }, + { + name: "offered htlc success", + expSize: input.OfferedHtlcSuccessWitnessSize - 3, + genWitness: func(t *testing.T) wire.TxWitness { + witScript, err := input.SenderHTLCScript( + testPubkey, testPubkey, testPubkey, + testHash160, false, + ) + if err != nil { + t.Fatal(err) + } + + signDesc := &input.SignDescriptor{ + WitnessScript: witScript, + } + + witness, err := input.SenderHtlcSpendRedeem( + &dummySigner{}, signDesc, testTx, testPreimage, + ) + if err != nil { + t.Fatal(err) + } + + return witness + }, + }, + { + name: "offered htlc success confirmed", + expSize: input.OfferedHtlcSuccessWitnessSize, + genWitness: func(t *testing.T) wire.TxWitness { + witScript, err := input.SenderHTLCScript( + testPubkey, testPubkey, testPubkey, + testHash160, true, + ) + if err != nil { + t.Fatal(err) + } + + signDesc := &input.SignDescriptor{ + WitnessScript: witScript, + } + + witness, err := input.SenderHtlcSpendRedeem( + &dummySigner{}, signDesc, testTx, testPreimage, + ) + if err != nil { + t.Fatal(err) + } + + return witness + }, + }, + { + name: "accepted htlc revoke", + expSize: input.AcceptedHtlcPenaltyWitnessSize - 3, + genWitness: func(t *testing.T) wire.TxWitness { + witScript, err := input.ReceiverHTLCScript( + testCLTVExpiry, testPubkey, testPubkey, + testPubkey, testHash160, false, + ) + if err != nil { + t.Fatal(err) + } + + signDesc := &input.SignDescriptor{ + WitnessScript: witScript, + KeyDesc: keychain.KeyDescriptor{ + PubKey: testPubkey, + }, + DoubleTweak: testPrivkey, + } + + witness, err := input.ReceiverHtlcSpendRevoke( + &dummySigner{}, signDesc, testTx, + ) + if err != nil { + t.Fatal(err) + } + + return witness + }, + }, + { + name: "accepted htlc revoke confirmed", + expSize: input.AcceptedHtlcPenaltyWitnessSize, + genWitness: func(t *testing.T) wire.TxWitness { + witScript, err := input.ReceiverHTLCScript( + testCLTVExpiry, testPubkey, testPubkey, + testPubkey, testHash160, true, + ) + if err != nil { + t.Fatal(err) + } + + signDesc := &input.SignDescriptor{ + WitnessScript: witScript, + KeyDesc: keychain.KeyDescriptor{ + PubKey: testPubkey, + }, + DoubleTweak: testPrivkey, + } + + witness, err := input.ReceiverHtlcSpendRevoke( + &dummySigner{}, signDesc, testTx, + ) + if err != nil { + t.Fatal(err) + } + + return witness + }, + }, + { + name: "accepted htlc timeout", + expSize: input.AcceptedHtlcTimeoutWitnessSize - 3, + genWitness: func(t *testing.T) wire.TxWitness { + + witScript, err := input.ReceiverHTLCScript( + testCLTVExpiry, testPubkey, testPubkey, + testPubkey, testHash160, false, + ) + if err != nil { + t.Fatal(err) + } + + signDesc := &input.SignDescriptor{ + WitnessScript: witScript, + } + + witness, err := input.ReceiverHtlcSpendTimeout( + &dummySigner{}, signDesc, testTx, + testCLTVExpiry, + ) + if err != nil { + t.Fatal(err) + } + + return witness + }, + }, + { + name: "accepted htlc timeout confirmed", + expSize: input.AcceptedHtlcTimeoutWitnessSize, + genWitness: func(t *testing.T) wire.TxWitness { + witScript, err := input.ReceiverHTLCScript( + testCLTVExpiry, testPubkey, testPubkey, + testPubkey, testHash160, true, + ) + if err != nil { + t.Fatal(err) + } + + signDesc := &input.SignDescriptor{ + WitnessScript: witScript, + } + + witness, err := input.ReceiverHtlcSpendTimeout( + &dummySigner{}, signDesc, testTx, + testCLTVExpiry, + ) + if err != nil { + t.Fatal(err) + } + + return witness + }, + }, + { + name: "accepted htlc success", + expSize: input.AcceptedHtlcSuccessWitnessSize - 3, + genWitness: func(t *testing.T) wire.TxWitness { + witScript, err := input.ReceiverHTLCScript( + testCLTVExpiry, testPubkey, testPubkey, + testPubkey, testHash160, false, + ) + if err != nil { + t.Fatal(err) + } + + signDesc := &input.SignDescriptor{ + WitnessScript: witScript, + KeyDesc: keychain.KeyDescriptor{ + PubKey: testPubkey, + }, + } + + witness, err := input.ReceiverHtlcSpendRedeem( + &maxDERSignature{}, txscript.SigHashAll, + testPreimage, &dummySigner{}, signDesc, testTx, + ) + if err != nil { + t.Fatal(err) + } + + return witness + }, + }, + { + name: "accepted htlc success confirmed", + expSize: input.AcceptedHtlcSuccessWitnessSize, + genWitness: func(t *testing.T) wire.TxWitness { + witScript, err := input.ReceiverHTLCScript( + testCLTVExpiry, testPubkey, testPubkey, + testPubkey, testHash160, true, + ) + if err != nil { + t.Fatal(err) + } + + signDesc := &input.SignDescriptor{ + WitnessScript: witScript, + KeyDesc: keychain.KeyDescriptor{ + PubKey: testPubkey, + }, + } + + witness, err := input.ReceiverHtlcSpendRedeem( + &maxDERSignature{}, txscript.SigHashAll, + testPreimage, &dummySigner{}, signDesc, testTx, + ) + if err != nil { + t.Fatal(err) + } + + return witness + }, + }, +} + +// TestWitnessSizes asserts the correctness of our magic witness constants. +// Witnesses involving signatures will have maxDERSignatures injected so that we +// can determine upper bounds for the witness sizes. These constants are +// predominately used for fee estimation, so we want to be certain that we +// aren't under estimating or our transactions could get stuck. +func TestWitnessSizes(t *testing.T) { + for _, test := range witnessSizeTests { + test := test + t.Run(test.name, func(t *testing.T) { + size := test.genWitness(t).SerializeSize() + if size != test.expSize { + t.Fatalf("size mismatch, want: %v, got: %v", + test.expSize, size) + } + }) + } +} diff --git a/input/test_utils.go b/input/test_utils.go index 45c11f116e..5b00441726 100644 --- a/input/test_utils.go +++ b/input/test_utils.go @@ -50,7 +50,9 @@ type MockSigner struct { // SignOutputRaw generates a signature for the passed transaction according to // the data within the passed SignDescriptor. -func (m *MockSigner) SignOutputRaw(tx *wire.MsgTx, signDesc *SignDescriptor) ([]byte, error) { +func (m *MockSigner) SignOutputRaw(tx *wire.MsgTx, + signDesc *SignDescriptor) (Signature, error) { + pubkey := signDesc.KeyDesc.PubKey switch { case signDesc.SingleTweak != nil: @@ -67,12 +69,12 @@ func (m *MockSigner) SignOutputRaw(tx *wire.MsgTx, signDesc *SignDescriptor) ([] sig, err := txscript.RawTxInWitnessSignature(tx, signDesc.SigHashes, signDesc.InputIndex, signDesc.Output.Value, signDesc.WitnessScript, - txscript.SigHashAll, privKey) + signDesc.HashType, privKey) if err != nil { return nil, err } - return sig[:len(sig)-1], nil + return btcec.ParseDERSignature(sig[:len(sig)-1], btcec.S256()) } // ComputeInputScript generates a complete InputIndex for the passed transaction diff --git a/input/witnessgen.go b/input/witnessgen.go index 28a02d086e..1a1fba6ea9 100644 --- a/input/witnessgen.go +++ b/input/witnessgen.go @@ -7,91 +7,144 @@ import ( "github.com/btcsuite/btcd/wire" ) -// WitnessType determines how an output's witness will be generated. The -// default commitmentTimeLock type will generate a witness that will allow -// spending of a time-locked transaction enforced by CheckSequenceVerify. -type WitnessType uint16 +// WitnessGenerator represents a function that is able to generate the final +// witness for a particular public key script. Additionally, if required, this +// function will also return the sigScript for spending nested P2SH witness +// outputs. This function acts as an abstraction layer, hiding the details of +// the underlying script. +type WitnessGenerator func(tx *wire.MsgTx, hc *txscript.TxSigHashes, + inputIndex int) (*Script, error) + +// WitnessType determines how an output's witness will be generated. This +// interface can be implemented to be used for custom sweep scripts if the +// pre-defined StandardWitnessType list doesn't provide a suitable one. +type WitnessType interface { + // String returns a human readable version of the WitnessType. + String() string + + // WitnessGenerator will return a WitnessGenerator function that an + // output uses to generate the witness and optionally the sigScript for + // a sweep transaction. + WitnessGenerator(signer Signer, + descriptor *SignDescriptor) WitnessGenerator + + // SizeUpperBound returns the maximum length of the witness of this + // WitnessType if it would be included in a tx. It also returns if the + // output itself is a nested p2sh output, if so then we need to take + // into account the extra sigScript data size. + SizeUpperBound() (int, bool, error) + + // AddWeightEstimation adds the estimated size of the witness in bytes + // to the given weight estimator. + AddWeightEstimation(e *TxWeightEstimator) error +} + +// StandardWitnessType is a numeric representation of standard pre-defined types +// of witness configurations. +type StandardWitnessType uint16 + +// A compile time check to ensure StandardWitnessType implements the +// WitnessType interface. +var _ WitnessType = (StandardWitnessType)(0) const ( - // CommitmentTimeLock is a witness that allows us to spend the output of - // a commitment transaction after a relative lock-time lockout. - CommitmentTimeLock WitnessType = 0 + // CommitmentTimeLock is a witness that allows us to spend our output + // on our local commitment transaction after a relative lock-time + // lockout. + CommitmentTimeLock StandardWitnessType = 0 // CommitmentNoDelay is a witness that allows us to spend a settled // no-delay output immediately on a counterparty's commitment // transaction. - CommitmentNoDelay WitnessType = 1 + CommitmentNoDelay StandardWitnessType = 1 // CommitmentRevoke is a witness that allows us to sweep the settled // output of a malicious counterparty's who broadcasts a revoked // commitment transaction. - CommitmentRevoke WitnessType = 2 + CommitmentRevoke StandardWitnessType = 2 // HtlcOfferedRevoke is a witness that allows us to sweep an HTLC which // we offered to the remote party in the case that they broadcast a // revoked commitment state. - HtlcOfferedRevoke WitnessType = 3 + HtlcOfferedRevoke StandardWitnessType = 3 // HtlcAcceptedRevoke is a witness that allows us to sweep an HTLC // output sent to us in the case that the remote party broadcasts a // revoked commitment state. - HtlcAcceptedRevoke WitnessType = 4 + HtlcAcceptedRevoke StandardWitnessType = 4 // HtlcOfferedTimeoutSecondLevel is a witness that allows us to sweep // an HTLC output that we extended to a party, but was never fulfilled. // This HTLC output isn't directly on the commitment transaction, but // is the result of a confirmed second-level HTLC transaction. As a // result, we can only spend this after a CSV delay. - HtlcOfferedTimeoutSecondLevel WitnessType = 5 + HtlcOfferedTimeoutSecondLevel StandardWitnessType = 5 // HtlcAcceptedSuccessSecondLevel is a witness that allows us to sweep // an HTLC output that was offered to us, and for which we have a // payment preimage. This HTLC output isn't directly on our commitment // transaction, but is the result of confirmed second-level HTLC // transaction. As a result, we can only spend this after a CSV delay. - HtlcAcceptedSuccessSecondLevel WitnessType = 6 + HtlcAcceptedSuccessSecondLevel StandardWitnessType = 6 // HtlcOfferedRemoteTimeout is a witness that allows us to sweep an // HTLC that we offered to the remote party which lies in the // commitment transaction of the remote party. We can spend this output // after the absolute CLTV timeout of the HTLC as passed. - HtlcOfferedRemoteTimeout WitnessType = 7 + HtlcOfferedRemoteTimeout StandardWitnessType = 7 // HtlcAcceptedRemoteSuccess is a witness that allows us to sweep an // HTLC that was offered to us by the remote party. We use this witness // in the case that the remote party goes to chain, and we know the // pre-image to the HTLC. We can sweep this without any additional // timeout. - HtlcAcceptedRemoteSuccess WitnessType = 8 + HtlcAcceptedRemoteSuccess StandardWitnessType = 8 // HtlcSecondLevelRevoke is a witness that allows us to sweep an HTLC // from the remote party's commitment transaction in the case that the // broadcast a revoked commitment, but then also immediately attempt to // go to the second level to claim the HTLC. - HtlcSecondLevelRevoke WitnessType = 9 + HtlcSecondLevelRevoke StandardWitnessType = 9 // WitnessKeyHash is a witness type that allows us to spend a regular // p2wkh output that's sent to an output which is under complete // control of the backing wallet. - WitnessKeyHash WitnessType = 10 + WitnessKeyHash StandardWitnessType = 10 // NestedWitnessKeyHash is a witness type that allows us to sweep an // output that sends to a nested P2SH script that pays to a key solely // under our control. The witness generated needs to include the - NestedWitnessKeyHash WitnessType = 11 + NestedWitnessKeyHash StandardWitnessType = 11 // CommitSpendNoDelayTweakless is similar to the CommitSpendNoDelay // type, but it omits the tweak that randomizes the key we need to // spend with a channel peer supplied set of randomness. - CommitSpendNoDelayTweakless = 12 + CommitSpendNoDelayTweakless StandardWitnessType = 12 + + // CommitmentToRemoteConfirmed is a witness that allows us to spend our + // output on the counterparty's commitment transaction after a + // confirmation. + CommitmentToRemoteConfirmed StandardWitnessType = 13 + + // CommitmentAnchor is a witness that allows us to spend our anchor on + // the commitment transaction. + CommitmentAnchor StandardWitnessType = 14 ) -// Stirng returns a human readable version of the target WitnessType. -func (wt WitnessType) String() string { +// String returns a human readable version of the target WitnessType. +// +// NOTE: This is part of the WitnessType interface. +func (wt StandardWitnessType) String() string { switch wt { case CommitmentTimeLock: return "CommitmentTimeLock" + case CommitmentToRemoteConfirmed: + return "CommitmentToRemoteConfirmed" + + case CommitmentAnchor: + return "CommitmentAnchor" + case CommitmentNoDelay: return "CommitmentNoDelay" @@ -122,24 +175,24 @@ func (wt WitnessType) String() string { case HtlcSecondLevelRevoke: return "HtlcSecondLevelRevoke" + case WitnessKeyHash: + return "WitnessKeyHash" + + case NestedWitnessKeyHash: + return "NestedWitnessKeyHash" + default: return fmt.Sprintf("Unknown WitnessType: %v", uint32(wt)) } } -// WitnessGenerator represents a function which is able to generate the final -// witness for a particular public key script. Additionally, if required, this -// function will also return the sigScript for spending nested P2SH witness -// outputs. This function acts as an abstraction layer, hiding the details of -// the underlying script. -type WitnessGenerator func(tx *wire.MsgTx, hc *txscript.TxSigHashes, - inputIndex int) (*Script, error) - -// GenWitnessFunc will return a WitnessGenerator function that an output uses +// WitnessGenerator will return a WitnessGenerator function that an output uses // to generate the witness and optionally the sigScript for a sweep // transaction. The sigScript will be generated if the witness type warrants // one for spending, such as the NestedWitnessKeyHash witness type. -func (wt WitnessType) GenWitnessFunc(signer Signer, +// +// NOTE: This is part of the WitnessType interface. +func (wt StandardWitnessType) WitnessGenerator(signer Signer, descriptor *SignDescriptor) WitnessGenerator { return func(tx *wire.MsgTx, hc *txscript.TxSigHashes, @@ -160,6 +213,28 @@ func (wt WitnessType) GenWitnessFunc(signer Signer, Witness: witness, }, nil + case CommitmentToRemoteConfirmed: + witness, err := CommitSpendToRemoteConfirmed( + signer, desc, tx, + ) + if err != nil { + return nil, err + } + + return &Script{ + Witness: witness, + }, nil + + case CommitmentAnchor: + witness, err := CommitSpendAnchor(signer, desc, tx) + if err != nil { + return nil, err + } + + return &Script{ + Witness: witness, + }, nil + case CommitmentNoDelay: witness, err := CommitSpendNoDelay(signer, desc, tx, false) if err != nil { @@ -262,5 +337,107 @@ func (wt WitnessType) GenWitnessFunc(signer Signer, return nil, fmt.Errorf("unknown witness type: %v", wt) } } +} + +// SizeUpperBound returns the maximum length of the witness of this witness +// type if it would be included in a tx. We also return if the output itself is +// a nested p2sh output, if so then we need to take into account the extra +// sigScript data size. +// +// NOTE: This is part of the WitnessType interface. +func (wt StandardWitnessType) SizeUpperBound() (int, bool, error) { + switch wt { + + // Outputs on a remote commitment transaction that pay directly to us. + case CommitSpendNoDelayTweakless: + fallthrough + case WitnessKeyHash: + fallthrough + case CommitmentNoDelay: + return P2WKHWitnessSize, false, nil + + // Outputs on a past commitment transaction that pay directly + // to us. + case CommitmentTimeLock: + return ToLocalTimeoutWitnessSize, false, nil + + // 1 CSV time locked output to us on remote commitment. + case CommitmentToRemoteConfirmed: + return ToRemoteConfirmedWitnessSize, false, nil + + // Anchor output on the commitment transaction. + case CommitmentAnchor: + return AnchorWitnessSize, false, nil + + // Outgoing second layer HTLC's that have confirmed within the + // chain, and the output they produced is now mature enough to + // sweep. + case HtlcOfferedTimeoutSecondLevel: + return ToLocalTimeoutWitnessSize, false, nil + + // Incoming second layer HTLC's that have confirmed within the + // chain, and the output they produced is now mature enough to + // sweep. + case HtlcAcceptedSuccessSecondLevel: + return ToLocalTimeoutWitnessSize, false, nil + + // An HTLC on the commitment transaction of the remote party, + // that has had its absolute timelock expire. + case HtlcOfferedRemoteTimeout: + return AcceptedHtlcTimeoutWitnessSize, false, nil + + // An HTLC on the commitment transaction of the remote party, + // that can be swept with the preimage. + case HtlcAcceptedRemoteSuccess: + return OfferedHtlcSuccessWitnessSize, false, nil + + // A nested P2SH input that has a p2wkh witness script. We'll mark this + // as nested P2SH so the caller can estimate the weight properly + // including the sigScript. + case NestedWitnessKeyHash: + return P2WKHWitnessSize, true, nil + + // The revocation output on a revoked commitment transaction. + case CommitmentRevoke: + return ToLocalPenaltyWitnessSize, false, nil + + // The revocation output on a revoked HTLC that we offered to the remote + // party. + case HtlcOfferedRevoke: + return OfferedHtlcPenaltyWitnessSize, false, nil + + // The revocation output on a revoked HTLC that was sent to us. + case HtlcAcceptedRevoke: + return AcceptedHtlcPenaltyWitnessSize, false, nil + + // The revocation output of a second level output of an HTLC. + case HtlcSecondLevelRevoke: + return ToLocalPenaltyWitnessSize, false, nil + } + + return 0, false, fmt.Errorf("unexpected witness type: %v", wt) +} + +// AddWeightEstimation adds the estimated size of the witness in bytes to the +// given weight estimator. +// +// NOTE: This is part of the WitnessType interface. +func (wt StandardWitnessType) AddWeightEstimation(e *TxWeightEstimator) error { + // For fee estimation purposes, we'll now attempt to obtain an + // upper bound on the weight this input will add when fully + // populated. + size, isNestedP2SH, err := wt.SizeUpperBound() + if err != nil { + return err + } + + // If this is a nested P2SH input, then we'll need to factor in + // the additional data push within the sigScript. + if isNestedP2SH { + e.AddNestedP2WSHInput(size) + } else { + e.AddWitnessInput(size) + } + return nil } diff --git a/invoices/interface.go b/invoices/interface.go new file mode 100644 index 0000000000..c511ddedf3 --- /dev/null +++ b/invoices/interface.go @@ -0,0 +1,17 @@ +package invoices + +import ( + "github.com/lightningnetwork/lnd/record" +) + +// Payload abstracts access to any additional fields provided in the final hop's +// TLV onion payload. +type Payload interface { + // MultiPath returns the record corresponding the option_mpp parsed from + // the onion payload. + MultiPath() *record.MPP + + // CustomRecords returns the custom tlv type records that were parsed + // from the payload. + CustomRecords() record.CustomSet +} diff --git a/invoices/invoice_expiry_watcher.go b/invoices/invoice_expiry_watcher.go new file mode 100644 index 0000000000..08c57f3964 --- /dev/null +++ b/invoices/invoice_expiry_watcher.go @@ -0,0 +1,240 @@ +package invoices + +import ( + "fmt" + "sync" + "time" + + "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/clock" + "github.com/lightningnetwork/lnd/lntypes" + "github.com/lightningnetwork/lnd/queue" + "github.com/lightningnetwork/lnd/zpay32" +) + +// invoiceExpiry holds and invoice's payment hash and its expiry. This +// is used to order invoices by their expiry for cancellation. +type invoiceExpiry struct { + PaymentHash lntypes.Hash + Expiry time.Time +} + +// Less implements PriorityQueueItem.Less such that the top item in the +// priorty queue will be the one that expires next. +func (e invoiceExpiry) Less(other queue.PriorityQueueItem) bool { + return e.Expiry.Before(other.(*invoiceExpiry).Expiry) +} + +// InvoiceExpiryWatcher handles automatic invoice cancellation of expried +// invoices. Upon start InvoiceExpiryWatcher will retrieve all pending (not yet +// settled or canceled) invoices invoices to its watcing queue. When a new +// invoice is added to the InvoiceRegistry, it'll be forarded to the +// InvoiceExpiryWatcher and will end up in the watching queue as well. +// If any of the watched invoices expire, they'll be removed from the watching +// queue and will be cancelled through InvoiceRegistry.CancelInvoice(). +type InvoiceExpiryWatcher struct { + sync.Mutex + started bool + + // clock is the clock implementation that InvoiceExpiryWatcher uses. + // It is useful for testing. + clock clock.Clock + + // cancelInvoice is a template method that cancels an expired invoice. + cancelInvoice func(lntypes.Hash) error + + // expiryQueue holds invoiceExpiry items and is used to find the next + // invoice to expire. + expiryQueue queue.PriorityQueue + + // newInvoices channel is used to wake up the main loop when a new invoices + // is added. + newInvoices chan []*invoiceExpiry + + wg sync.WaitGroup + + // quit signals InvoiceExpiryWatcher to stop. + quit chan struct{} +} + +// NewInvoiceExpiryWatcher creates a new InvoiceExpiryWatcher instance. +func NewInvoiceExpiryWatcher(clock clock.Clock) *InvoiceExpiryWatcher { + return &InvoiceExpiryWatcher{ + clock: clock, + newInvoices: make(chan []*invoiceExpiry), + quit: make(chan struct{}), + } +} + +// Start starts the the subscription handler and the main loop. Start() will +// return with error if InvoiceExpiryWatcher is already started. Start() +// expects a cancellation function passed that will be use to cancel expired +// invoices by their payment hash. +func (ew *InvoiceExpiryWatcher) Start( + cancelInvoice func(lntypes.Hash) error) error { + + ew.Lock() + defer ew.Unlock() + + if ew.started { + return fmt.Errorf("InvoiceExpiryWatcher already started") + } + + ew.started = true + ew.cancelInvoice = cancelInvoice + ew.wg.Add(1) + go ew.mainLoop() + + return nil +} + +// Stop quits the expiry handler loop and waits for InvoiceExpiryWatcher to +// fully stop. +func (ew *InvoiceExpiryWatcher) Stop() { + ew.Lock() + defer ew.Unlock() + + if ew.started { + // Signal subscriptionHandler to quit and wait for it to return. + close(ew.quit) + ew.wg.Wait() + ew.started = false + } +} + +// prepareInvoice checks if the passed invoice may be canceled and calculates +// the expiry time. +func (ew *InvoiceExpiryWatcher) prepareInvoice( + paymentHash lntypes.Hash, invoice *channeldb.Invoice) *invoiceExpiry { + + if invoice.State != channeldb.ContractOpen { + log.Debugf("Invoice not added to expiry watcher: %v", paymentHash) + return nil + } + + realExpiry := invoice.Terms.Expiry + if realExpiry == 0 { + realExpiry = zpay32.DefaultInvoiceExpiry + } + + expiry := invoice.CreationDate.Add(realExpiry) + return &invoiceExpiry{ + PaymentHash: paymentHash, + Expiry: expiry, + } +} + +// AddInvoices adds multiple invoices to the InvoiceExpiryWatcher. +func (ew *InvoiceExpiryWatcher) AddInvoices( + invoices []channeldb.InvoiceWithPaymentHash) { + + invoicesWithExpiry := make([]*invoiceExpiry, 0, len(invoices)) + for _, invoiceWithPaymentHash := range invoices { + newInvoiceExpiry := ew.prepareInvoice( + invoiceWithPaymentHash.PaymentHash, &invoiceWithPaymentHash.Invoice, + ) + if newInvoiceExpiry != nil { + invoicesWithExpiry = append(invoicesWithExpiry, newInvoiceExpiry) + } + } + + if len(invoicesWithExpiry) > 0 { + log.Debugf("Added %d invoices to the expiry watcher", + len(invoicesWithExpiry)) + select { + case ew.newInvoices <- invoicesWithExpiry: + // Select on quit too so that callers won't get blocked in case + // of concurrent shutdown. + case <-ew.quit: + } + } +} + +// AddInvoice adds a new invoice to the InvoiceExpiryWatcher. This won't check +// if the invoice is already added and will only add invoices with ContractOpen +// state. +func (ew *InvoiceExpiryWatcher) AddInvoice( + paymentHash lntypes.Hash, invoice *channeldb.Invoice) { + + newInvoiceExpiry := ew.prepareInvoice(paymentHash, invoice) + if newInvoiceExpiry != nil { + log.Debugf("Adding invoice '%v' to expiry watcher, expiration: %v", + paymentHash, newInvoiceExpiry.Expiry) + + select { + case ew.newInvoices <- []*invoiceExpiry{newInvoiceExpiry}: + // Select on quit too so that callers won't get blocked in case + // of concurrent shutdown. + case <-ew.quit: + } + } +} + +// nextExpiry returns a Time chan to wait on until the next invoice expires. +// If there are no active invoices, then it'll simply wait indefinitely. +func (ew *InvoiceExpiryWatcher) nextExpiry() <-chan time.Time { + if !ew.expiryQueue.Empty() { + top := ew.expiryQueue.Top().(*invoiceExpiry) + return ew.clock.TickAfter(top.Expiry.Sub(ew.clock.Now())) + } + + return nil +} + +// cancelNextExpiredInvoice will cancel the next expired invoice and removes +// it from the expiry queue. +func (ew *InvoiceExpiryWatcher) cancelNextExpiredInvoice() { + if !ew.expiryQueue.Empty() { + top := ew.expiryQueue.Top().(*invoiceExpiry) + if !top.Expiry.Before(ew.clock.Now()) { + return + } + + err := ew.cancelInvoice(top.PaymentHash) + if err != nil && err != channeldb.ErrInvoiceAlreadySettled && + err != channeldb.ErrInvoiceAlreadyCanceled { + + log.Errorf("Unable to cancel invoice: %v", top.PaymentHash) + } + + ew.expiryQueue.Pop() + } +} + +// mainLoop is a goroutine that receives new invoices and handles cancellation +// of expired invoices. +func (ew *InvoiceExpiryWatcher) mainLoop() { + defer ew.wg.Done() + + for { + // Cancel any invoices that may have expired. + ew.cancelNextExpiredInvoice() + + select { + + case invoicesWithExpiry := <-ew.newInvoices: + // Take newly forwarded invoices with higher priority + // in order to not block the newInvoices channel. + for _, invoiceWithExpiry := range invoicesWithExpiry { + ew.expiryQueue.Push(invoiceWithExpiry) + } + continue + + default: + select { + + case <-ew.nextExpiry(): + // Wait until the next invoice expires. + continue + + case invoicesWithExpiry := <-ew.newInvoices: + for _, invoiceWithExpiry := range invoicesWithExpiry { + ew.expiryQueue.Push(invoiceWithExpiry) + } + + case <-ew.quit: + return + } + } + } +} diff --git a/invoices/invoice_expiry_watcher_test.go b/invoices/invoice_expiry_watcher_test.go new file mode 100644 index 0000000000..e7e28b5002 --- /dev/null +++ b/invoices/invoice_expiry_watcher_test.go @@ -0,0 +1,180 @@ +package invoices + +import ( + "sync" + "testing" + "time" + + "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/clock" + "github.com/lightningnetwork/lnd/lntypes" +) + +// invoiceExpiryWatcherTest holds a test fixture and implements checks +// for InvoiceExpiryWatcher tests. +type invoiceExpiryWatcherTest struct { + t *testing.T + wg sync.WaitGroup + watcher *InvoiceExpiryWatcher + testData invoiceExpiryTestData + canceledInvoices []lntypes.Hash +} + +// newInvoiceExpiryWatcherTest creates a new InvoiceExpiryWatcher test fixture +// and sets up the test environment. +func newInvoiceExpiryWatcherTest(t *testing.T, now time.Time, + numExpiredInvoices, numPendingInvoices int) *invoiceExpiryWatcherTest { + + test := &invoiceExpiryWatcherTest{ + watcher: NewInvoiceExpiryWatcher(clock.NewTestClock(testTime)), + testData: generateInvoiceExpiryTestData( + t, now, 0, numExpiredInvoices, numPendingInvoices, + ), + } + + test.wg.Add(numExpiredInvoices) + + err := test.watcher.Start(func(paymentHash lntypes.Hash) error { + test.canceledInvoices = append(test.canceledInvoices, paymentHash) + test.wg.Done() + return nil + }) + + if err != nil { + t.Fatalf("cannot start InvoiceExpiryWatcher: %v", err) + } + + return test +} + +func (t *invoiceExpiryWatcherTest) waitForFinish(timeout time.Duration) { + done := make(chan struct{}) + + // Wait for all cancels. + go func() { + t.wg.Wait() + close(done) + }() + + select { + case <-done: + case <-time.After(timeout): + t.t.Fatalf("test timeout") + } +} + +func (t *invoiceExpiryWatcherTest) checkExpectations() { + // Check that invoices that got canceled during the test are the ones + // that expired. + if len(t.canceledInvoices) != len(t.testData.expiredInvoices) { + t.t.Fatalf("expected %v cancellations, got %v", + len(t.testData.expiredInvoices), len(t.canceledInvoices)) + } + + for i := range t.canceledInvoices { + if _, ok := t.testData.expiredInvoices[t.canceledInvoices[i]]; !ok { + t.t.Fatalf("wrong invoice canceled") + } + } +} + +// Tests that InvoiceExpiryWatcher can be started and stopped. +func TestInvoiceExpiryWatcherStartStop(t *testing.T) { + watcher := NewInvoiceExpiryWatcher(clock.NewTestClock(testTime)) + cancel := func(lntypes.Hash) error { + t.Fatalf("unexpected call") + return nil + } + + if err := watcher.Start(cancel); err != nil { + t.Fatalf("unexpected error upon start: %v", err) + } + + if err := watcher.Start(cancel); err == nil { + t.Fatalf("expected error upon second start") + } + + watcher.Stop() + + if err := watcher.Start(cancel); err != nil { + t.Fatalf("unexpected error upon start: %v", err) + } +} + +// Tests that no invoices will expire from an empty InvoiceExpiryWatcher. +func TestInvoiceExpiryWithNoInvoices(t *testing.T) { + t.Parallel() + + test := newInvoiceExpiryWatcherTest(t, testTime, 0, 0) + + test.waitForFinish(testTimeout) + test.watcher.Stop() + test.checkExpectations() +} + +// Tests that if all add invoices are expired, then all invoices +// will be canceled. +func TestInvoiceExpiryWithOnlyExpiredInvoices(t *testing.T) { + t.Parallel() + + test := newInvoiceExpiryWatcherTest(t, testTime, 0, 5) + + for paymentHash, invoice := range test.testData.pendingInvoices { + test.watcher.AddInvoice(paymentHash, invoice) + } + + test.waitForFinish(testTimeout) + test.watcher.Stop() + test.checkExpectations() +} + +// Tests that if some invoices are expired, then those invoices +// will be canceled. +func TestInvoiceExpiryWithPendingAndExpiredInvoices(t *testing.T) { + t.Parallel() + + test := newInvoiceExpiryWatcherTest(t, testTime, 5, 5) + + for paymentHash, invoice := range test.testData.expiredInvoices { + test.watcher.AddInvoice(paymentHash, invoice) + } + + for paymentHash, invoice := range test.testData.pendingInvoices { + test.watcher.AddInvoice(paymentHash, invoice) + } + + test.waitForFinish(testTimeout) + test.watcher.Stop() + test.checkExpectations() +} + +// Tests adding multiple invoices at once. +func TestInvoiceExpiryWhenAddingMultipleInvoices(t *testing.T) { + t.Parallel() + + test := newInvoiceExpiryWatcherTest(t, testTime, 5, 5) + var invoices []channeldb.InvoiceWithPaymentHash + + for hash, invoice := range test.testData.expiredInvoices { + invoices = append(invoices, + channeldb.InvoiceWithPaymentHash{ + Invoice: *invoice, + PaymentHash: hash, + }, + ) + } + + for hash, invoice := range test.testData.pendingInvoices { + invoices = append(invoices, + channeldb.InvoiceWithPaymentHash{ + Invoice: *invoice, + PaymentHash: hash, + }, + ) + } + + test.watcher.AddInvoices(invoices) + test.waitForFinish(testTimeout) + test.watcher.Stop() + test.checkExpectations() +} diff --git a/invoices/invoiceregistry.go b/invoices/invoiceregistry.go index 3f35954f88..662a8a82ec 100644 --- a/invoices/invoiceregistry.go +++ b/invoices/invoiceregistry.go @@ -2,14 +2,17 @@ package invoices import ( "errors" + "fmt" "sync" "sync/atomic" + "time" - "github.com/davecgh/go-spew/spew" "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/clock" "github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/queue" + "github.com/lightningnetwork/lnd/record" ) var ( @@ -24,24 +27,56 @@ var ( // ErrShuttingDown is returned when an operation failed because the // invoice registry is shutting down. ErrShuttingDown = errors.New("invoice registry shutting down") +) - // errNoUpdate is returned when no invoice updated is required. - errNoUpdate = errors.New("no update needed") +const ( + // DefaultHtlcHoldDuration defines the default for how long mpp htlcs + // are held while waiting for the other set members to arrive. + DefaultHtlcHoldDuration = 120 * time.Second ) -// HodlEvent describes how an htlc should be resolved. If HodlEvent.Preimage is -// set, the event indicates a settle event. If Preimage is nil, it is a cancel -// event. -type HodlEvent struct { - // Preimage is the htlc preimage. Its value is nil in case of a cancel. - Preimage *lntypes.Preimage +// RegistryConfig contains the configuration parameters for invoice registry. +type RegistryConfig struct { + // FinalCltvRejectDelta defines the number of blocks before the expiry + // of the htlc where we no longer settle it as an exit hop and instead + // cancel it back. Normally this value should be lower than the cltv + // expiry of any invoice we create and the code effectuating this should + // not be hit. + FinalCltvRejectDelta int32 + + // HtlcHoldDuration defines for how long mpp htlcs are held while + // waiting for the other set members to arrive. + HtlcHoldDuration time.Duration + + // Clock holds the clock implementation that is used to provide + // Now() and TickAfter() and is useful to stub out the clock functions + // during testing. + Clock clock.Clock - // CircuitKey is the key of the htlc for which we have a resolution - // decision. - CircuitKey channeldb.CircuitKey + // AcceptKeySend indicates whether we want to accept spontaneous key + // send payments. + AcceptKeySend bool +} + +// htlcReleaseEvent describes an htlc auto-release event. It is used to release +// mpp htlcs for which the complete set didn't arrive in time. +type htlcReleaseEvent struct { + // hash is the payment hash of the htlc to release. + hash lntypes.Hash + + // key is the circuit key of the htlc to release. + key channeldb.CircuitKey - // AcceptHeight is the original height at which the htlc was accepted. - AcceptHeight int32 + // releaseTime is the time at which to release the htlc. + releaseTime time.Time +} + +// Less is used to order PriorityQueueItem's by their release time such that +// items with the older release time are at the top of the queue. +// +// NOTE: Part of the queue.PriorityQueueItem interface. +func (r *htlcReleaseEvent) Less(other queue.PriorityQueueItem) bool { + return r.releaseTime.Before(other.(*htlcReleaseEvent).releaseTime) } // InvoiceRegistry is a central registry of all the outstanding invoices @@ -52,6 +87,9 @@ type InvoiceRegistry struct { cdb *channeldb.DB + // cfg contains the registry's configuration parameters. + cfg *RegistryConfig + clientMtx sync.Mutex nextClientID uint32 notificationClients map[uint32]*InvoiceSubscription @@ -72,12 +110,11 @@ type InvoiceRegistry struct { // subscriber. This is used to unsubscribe from all hashes efficiently. hodlReverseSubscriptions map[chan<- interface{}]map[channeldb.CircuitKey]struct{} - // finalCltvRejectDelta defines the number of blocks before the expiry - // of the htlc where we no longer settle it as an exit hop and instead - // cancel it back. Normally this value should be lower than the cltv - // expiry of any invoice we create and the code effectuating this should - // not be hit. - finalCltvRejectDelta int32 + // htlcAutoReleaseChan contains the new htlcs that need to be + // auto-released. + htlcAutoReleaseChan chan *htlcReleaseEvent + + expiryWatcher *InvoiceExpiryWatcher wg sync.WaitGroup quit chan struct{} @@ -87,7 +124,8 @@ type InvoiceRegistry struct { // wraps the persistent on-disk invoice storage with an additional in-memory // layer. The in-memory layer is in place such that debug invoices can be added // which are volatile yet available system wide within the daemon. -func NewRegistry(cdb *channeldb.DB, finalCltvRejectDelta int32) *InvoiceRegistry { +func NewRegistry(cdb *channeldb.DB, expiryWatcher *InvoiceExpiryWatcher, + cfg *RegistryConfig) *InvoiceRegistry { return &InvoiceRegistry{ cdb: cdb, @@ -98,22 +136,61 @@ func NewRegistry(cdb *channeldb.DB, finalCltvRejectDelta int32) *InvoiceRegistry invoiceEvents: make(chan interface{}, 100), hodlSubscriptions: make(map[channeldb.CircuitKey]map[chan<- interface{}]struct{}), hodlReverseSubscriptions: make(map[chan<- interface{}]map[channeldb.CircuitKey]struct{}), - finalCltvRejectDelta: finalCltvRejectDelta, + cfg: cfg, + htlcAutoReleaseChan: make(chan *htlcReleaseEvent), + expiryWatcher: expiryWatcher, quit: make(chan struct{}), } } +// populateExpiryWatcher fetches all active invoices and their corresponding +// payment hashes from ChannelDB and adds them to the expiry watcher. +func (i *InvoiceRegistry) populateExpiryWatcher() error { + pendingOnly := true + pendingInvoices, err := i.cdb.FetchAllInvoicesWithPaymentHash(pendingOnly) + if err != nil && err != channeldb.ErrNoInvoicesCreated { + log.Errorf( + "Error while prefetching active invoices from the database: %v", err, + ) + return err + } + + log.Debugf("Adding %d pending invoices to the expiry watcher", + len(pendingInvoices)) + i.expiryWatcher.AddInvoices(pendingInvoices) + return nil +} + // Start starts the registry and all goroutines it needs to carry out its task. func (i *InvoiceRegistry) Start() error { + // Start InvoiceExpiryWatcher and prepopulate it with existing active + // invoices. + err := i.expiryWatcher.Start(func(paymentHash lntypes.Hash) error { + cancelIfAccepted := false + return i.cancelInvoiceImpl(paymentHash, cancelIfAccepted) + }) + + if err != nil { + return err + } + i.wg.Add(1) + go i.invoiceEventLoop() - go i.invoiceEventNotifier() + // Now prefetch all pending invoices to the expiry watcher. + err = i.populateExpiryWatcher() + if err != nil { + i.Stop() + return err + } return nil } // Stop signals the registry for a graceful shutdown. func (i *InvoiceRegistry) Stop() { + i.expiryWatcher.Stop() + close(i.quit) i.wg.Wait() @@ -127,13 +204,31 @@ type invoiceEvent struct { invoice *channeldb.Invoice } -// invoiceEventNotifier is the dedicated goroutine responsible for accepting +// tickAt returns a channel that ticks at the specified time. If the time has +// already passed, it will tick immediately. +func (i *InvoiceRegistry) tickAt(t time.Time) <-chan time.Time { + now := i.cfg.Clock.Now() + return i.cfg.Clock.TickAfter(t.Sub(now)) +} + +// invoiceEventLoop is the dedicated goroutine responsible for accepting // new notification subscriptions, cancelling old subscriptions, and // dispatching new invoice events. -func (i *InvoiceRegistry) invoiceEventNotifier() { +func (i *InvoiceRegistry) invoiceEventLoop() { defer i.wg.Done() + // Set up a heap for htlc auto-releases. + autoReleaseHeap := &queue.PriorityQueue{} + for { + // If there is something to release, set up a release tick + // channel. + var nextReleaseTick <-chan time.Time + if autoReleaseHeap.Len() > 0 { + head := autoReleaseHeap.Top().(*htlcReleaseEvent) + nextReleaseTick = i.tickAt(head.releaseTime) + } + select { // A new invoice subscription for all invoices has just arrived! // We'll query for any backlog notifications, then add it to the @@ -179,7 +274,7 @@ func (i *InvoiceRegistry) invoiceEventNotifier() { // For backwards compatibility, do not notify // all invoice subscribers of cancel and accept // events. - state := e.invoice.Terms.State + state := e.invoice.State if state != channeldb.ContractCanceled && state != channeldb.ContractAccepted { @@ -199,6 +294,29 @@ func (i *InvoiceRegistry) invoiceEventNotifier() { i.singleNotificationClients[e.id] = e } + // A new htlc came in for auto-release. + case event := <-i.htlcAutoReleaseChan: + log.Debugf("Scheduling auto-release for htlc: "+ + "hash=%v, key=%v at %v", + event.hash, event.key, event.releaseTime) + + // We use an independent timer for every htlc rather + // than a set timer that is reset with every htlc coming + // in. Otherwise the sender could keep resetting the + // timer until the broadcast window is entered and our + // channel is force closed. + autoReleaseHeap.Push(event) + + // The htlc at the top of the heap needs to be auto-released. + case <-nextReleaseTick: + event := autoReleaseHeap.Pop().(*htlcReleaseEvent) + err := i.cancelSingleHtlc( + event.hash, event.key, ResultMppTimeout, + ) + if err != nil { + log.Errorf("HTLC timer: %v", err) + } + case <-i.quit: return } @@ -231,7 +349,7 @@ func (i *InvoiceRegistry) dispatchToClients(event *invoiceEvent) { // ensure we don't duplicate any events. // TODO(joostjager): Refactor switches. - state := event.invoice.Terms.State + state := event.invoice.State switch { // If we've already sent this settle event to // the client, then we can skip this. @@ -277,14 +395,14 @@ func (i *InvoiceRegistry) dispatchToClients(event *invoiceEvent) { // the latest add/settle index it has. We'll use this to ensure // we don't send a notification twice, which can happen if a new // event is added while we're catching up a new client. - switch event.invoice.Terms.State { + switch event.invoice.State { case channeldb.ContractSettled: client.settleIndex = invoice.SettleIndex case channeldb.ContractOpen: client.addIndex = invoice.AddIndex default: log.Errorf("unexpected invoice state: %v", - event.invoice.Terms.State) + event.invoice.State) } } } @@ -383,22 +501,25 @@ func (i *InvoiceRegistry) AddInvoice(invoice *channeldb.Invoice, paymentHash lntypes.Hash) (uint64, error) { i.Lock() - defer i.Unlock() - log.Debugf("Invoice(%v): added %v", paymentHash, - newLogClosure(func() string { - return spew.Sdump(invoice) - }), - ) + log.Debugf("Invoice(%v): added with terms %v", paymentHash, + invoice.Terms) addIndex, err := i.cdb.AddInvoice(invoice, paymentHash) if err != nil { + i.Unlock() return 0, err } // Now that we've added the invoice, we'll send dispatch a message to // notify the clients of this new invoice. i.notifyClients(paymentHash, invoice, channeldb.ContractOpen) + i.Unlock() + + // InvoiceExpiryWatcher.AddInvoice must not be locked by InvoiceRegistry + // to avoid deadlock when a new invoice is added while an other is being + // canceled. + i.expiryWatcher.AddInvoice(paymentHash, invoice) return addIndex, nil } @@ -415,9 +536,185 @@ func (i *InvoiceRegistry) LookupInvoice(rHash lntypes.Hash) (channeldb.Invoice, return i.cdb.LookupInvoice(rHash) } -// NotifyExitHopHtlc attempts to mark an invoice as settled. If the invoice is a -// debug invoice, then this method is a noop as debug invoices are never fully -// settled. The return value describes how the htlc should be resolved. +// startHtlcTimer starts a new timer via the invoice registry main loop that +// cancels a single htlc on an invoice when the htlc hold duration has passed. +func (i *InvoiceRegistry) startHtlcTimer(hash lntypes.Hash, + key channeldb.CircuitKey, acceptTime time.Time) error { + + releaseTime := acceptTime.Add(i.cfg.HtlcHoldDuration) + event := &htlcReleaseEvent{ + hash: hash, + key: key, + releaseTime: releaseTime, + } + + select { + case i.htlcAutoReleaseChan <- event: + return nil + + case <-i.quit: + return ErrShuttingDown + } +} + +// cancelSingleHtlc cancels a single accepted htlc on an invoice. It takes +// a resolution result which will be used to notify subscribed links and +// resolvers of the details of the htlc cancellation. +func (i *InvoiceRegistry) cancelSingleHtlc(hash lntypes.Hash, + key channeldb.CircuitKey, result FailResolutionResult) error { + + i.Lock() + defer i.Unlock() + + updateInvoice := func(invoice *channeldb.Invoice) ( + *channeldb.InvoiceUpdateDesc, error) { + + // Only allow individual htlc cancelation on open invoices. + if invoice.State != channeldb.ContractOpen { + log.Debugf("cancelSingleHtlc: invoice %v no longer "+ + "open", hash) + + return nil, nil + } + + // Lookup the current status of the htlc in the database. + htlc, ok := invoice.Htlcs[key] + if !ok { + return nil, fmt.Errorf("htlc %v not found", key) + } + + // Cancelation is only possible if the htlc wasn't already + // resolved. + if htlc.State != channeldb.HtlcStateAccepted { + log.Debugf("cancelSingleHtlc: htlc %v on invoice %v "+ + "is already resolved", key, hash) + + return nil, nil + } + + log.Debugf("cancelSingleHtlc: cancelling htlc %v on invoice %v", + key, hash) + + // Return an update descriptor that cancels htlc and keeps + // invoice open. + canceledHtlcs := map[channeldb.CircuitKey]struct{}{ + key: {}, + } + + return &channeldb.InvoiceUpdateDesc{ + CancelHtlcs: canceledHtlcs, + }, nil + } + + // Try to mark the specified htlc as canceled in the invoice database. + // Intercept the update descriptor to set the local updated variable. If + // no invoice update is performed, we can return early. + var updated bool + invoice, err := i.cdb.UpdateInvoice(hash, + func(invoice *channeldb.Invoice) ( + *channeldb.InvoiceUpdateDesc, error) { + + updateDesc, err := updateInvoice(invoice) + if err != nil { + return nil, err + } + updated = updateDesc != nil + + return updateDesc, err + }, + ) + if err != nil { + return err + } + if !updated { + return nil + } + + // The invoice has been updated. Notify subscribers of the htlc + // resolution. + htlc, ok := invoice.Htlcs[key] + if !ok { + return fmt.Errorf("htlc %v not found", key) + } + if htlc.State == channeldb.HtlcStateCanceled { + resolution := NewFailResolution( + key, int32(htlc.AcceptHeight), result, + ) + + i.notifyHodlSubscribers(resolution) + } + return nil +} + +// processKeySend just-in-time inserts an invoice if this htlc is a keysend +// htlc. +func (i *InvoiceRegistry) processKeySend(ctx invoiceUpdateCtx) error { + + // Retrieve keysend record if present. + preimageSlice, ok := ctx.customRecords[record.KeySendType] + if !ok { + return nil + } + + // Cancel htlc is preimage is invalid. + preimage, err := lntypes.MakePreimage(preimageSlice) + if err != nil || preimage.Hash() != ctx.hash { + return errors.New("invalid keysend preimage") + } + + // Don't accept zero preimages as those have a special meaning in our + // database for hodl invoices. + if preimage == channeldb.UnknownPreimage { + return errors.New("invalid keysend preimage") + } + + // Only allow keysend for non-mpp payments. + if ctx.mpp != nil { + return errors.New("no mpp keysend supported") + } + + // Create an invoice for the htlc amount. + amt := ctx.amtPaid + + // Set tlv optional feature vector on the invoice. Otherwise we wouldn't + // be able to pay to it with keysend. + rawFeatures := lnwire.NewRawFeatureVector( + lnwire.TLVOnionPayloadOptional, + ) + features := lnwire.NewFeatureVector(rawFeatures, lnwire.Features) + + // Use the minimum block delta that we require for settling htlcs. + finalCltvDelta := i.cfg.FinalCltvRejectDelta + + // Pre-check expiry here to prevent inserting an invoice that will not + // be settled. + if ctx.expiry < uint32(ctx.currentHeight+finalCltvDelta) { + return errors.New("final expiry too soon") + } + + // Create placeholder invoice. + invoice := &channeldb.Invoice{ + CreationDate: i.cfg.Clock.Now(), + Terms: channeldb.ContractTerm{ + FinalCltvDelta: finalCltvDelta, + Value: amt, + PaymentPreimage: preimage, + Features: features, + }, + } + + // Insert invoice into database. Ignore duplicates, because this + // may be a replay. + _, err = i.AddInvoice(invoice, ctx.hash) + if err != nil && err != channeldb.ErrDuplicateInvoice { + return err + } + + return nil +} + +// NotifyExitHopHtlc attempts to mark an invoice as settled. The return value +// describes how the htlc should be resolved. // // When the preimage of the invoice is not yet known (hodl invoice), this // function moves the invoice to the accepted state. When SettleHoldInvoice is @@ -426,163 +723,215 @@ func (i *InvoiceRegistry) LookupInvoice(rHash lntypes.Hash) (channeldb.Invoice, // to be taken on the htlc (settle or cancel). The caller needs to ensure that // the channel is either buffered or received on from another goroutine to // prevent deadlock. +// +// In the case that the htlc is part of a larger set of htlcs that pay to the +// same invoice (multi-path payment), the htlc is held until the set is +// complete. If the set doesn't fully arrive in time, a timer will cancel the +// held htlc. func (i *InvoiceRegistry) NotifyExitHopHtlc(rHash lntypes.Hash, amtPaid lnwire.MilliSatoshi, expiry uint32, currentHeight int32, circuitKey channeldb.CircuitKey, hodlChan chan<- interface{}, - eob []byte) (*HodlEvent, error) { + payload Payload) (HtlcResolution, error) { + + mpp := payload.MultiPath() + + // Create the update context containing the relevant details of the + // incoming htlc. + updateCtx := invoiceUpdateCtx{ + hash: rHash, + circuitKey: circuitKey, + amtPaid: amtPaid, + expiry: expiry, + currentHeight: currentHeight, + finalCltvRejectDelta: i.cfg.FinalCltvRejectDelta, + customRecords: payload.CustomRecords(), + mpp: mpp, + } - i.Lock() - defer i.Unlock() + // Process keysend if present. Do this outside of the lock, because + // AddInvoice obtains its own lock. This is no problem, because the + // operation is idempotent. + if i.cfg.AcceptKeySend { + err := i.processKeySend(updateCtx) + if err != nil { + updateCtx.log(fmt.Sprintf("keysend error: %v", err)) + + return NewFailResolution( + circuitKey, currentHeight, ResultKeySendError, + ), nil + } + } - debugLog := func(s string) { - log.Debugf("Invoice(%x): %v, amt=%v, expiry=%v, circuit=%v", - rHash[:], s, amtPaid, expiry, circuitKey) + // Execute locked notify exit hop logic. + i.Lock() + resolution, err := i.notifyExitHopHtlcLocked(&updateCtx, hodlChan) + i.Unlock() + if err != nil { + return nil, err } - // Default is to not update subscribers after the invoice update. - updateSubscribers := false + switch r := resolution.(type) { + // The htlc is held. Start a timer outside the lock if the htlc should + // be auto-released, because otherwise a deadlock may happen with the + // main event loop. + case *htlcAcceptResolution: + if r.autoRelease { + err := i.startHtlcTimer(rHash, circuitKey, r.acceptTime) + if err != nil { + return nil, err + } + } - updateInvoice := func(inv *channeldb.Invoice) ( - *channeldb.InvoiceUpdateDesc, error) { + // We return a nil resolution because htlc acceptances are + // represented as nil resolutions externally. + // TODO(carla) update calling code to handle accept resolutions. + return nil, nil - // Don't update the invoice when this is a replayed htlc. - htlc, ok := inv.Htlcs[circuitKey] - if ok { - switch htlc.State { - case channeldb.HtlcStateCanceled: - debugLog("replayed htlc to canceled invoice") + // A direct resolution was received for this htlc. + case HtlcResolution: + return r, nil + + // Fail if an unknown resolution type was received. + default: + return nil, errors.New("invalid resolution type") + } +} - case channeldb.HtlcStateAccepted: - debugLog("replayed htlc to accepted invoice") +// notifyExitHopHtlcLocked is the internal implementation of NotifyExitHopHtlc +// that should be executed inside the registry lock. +func (i *InvoiceRegistry) notifyExitHopHtlcLocked( + ctx *invoiceUpdateCtx, hodlChan chan<- interface{}) ( + HtlcResolution, error) { - case channeldb.HtlcStateSettled: - debugLog("replayed htlc to settled invoice") + // We'll attempt to settle an invoice matching this rHash on disk (if + // one exists). The callback will update the invoice state and/or htlcs. + var ( + resolution HtlcResolution + updateSubscribers bool + ) + invoice, err := i.cdb.UpdateInvoice( + ctx.hash, + func(inv *channeldb.Invoice) ( + *channeldb.InvoiceUpdateDesc, error) { - default: - return nil, errors.New("unexpected htlc state") + updateDesc, res, err := updateInvoice(ctx, inv) + if err != nil { + return nil, err } - return nil, errNoUpdate - } + // Only send an update if the invoice state was changed. + updateSubscribers = updateDesc != nil && + updateDesc.State != nil - // If the invoice is already canceled, there is no further - // checking to do. - if inv.Terms.State == channeldb.ContractCanceled { - debugLog("invoice already canceled") - return nil, errNoUpdate - } + // Assign resolution to outer scope variable. + resolution = res - // If an invoice amount is specified, check that enough - // is paid. Also check this for duplicate payments if - // the invoice is already settled or accepted. - if inv.Terms.Value > 0 && amtPaid < inv.Terms.Value { - debugLog("amount too low") - return nil, errNoUpdate - } + return updateDesc, nil + }, + ) + switch err { + case channeldb.ErrInvoiceNotFound: + // If the invoice was not found, return a failure resolution + // with an invoice not found result. + return NewFailResolution( + ctx.circuitKey, ctx.currentHeight, + ResultInvoiceNotFound, + ), nil - // The invoice is still open. Check the expiry. - if expiry < uint32(currentHeight+i.finalCltvRejectDelta) { - debugLog("expiry too soon") - return nil, errNoUpdate - } + case nil: - if expiry < uint32(currentHeight+inv.FinalCltvDelta) { - debugLog("expiry too soon") - return nil, errNoUpdate - } + default: + ctx.log(err.Error()) + return nil, err + } - // Record HTLC in the invoice database. - newHtlcs := map[channeldb.CircuitKey]*channeldb.HtlcAcceptDesc{ - circuitKey: { - Amt: amtPaid, - Expiry: expiry, - AcceptHeight: currentHeight, - }, - } + if updateSubscribers { + i.notifyClients(ctx.hash, invoice, invoice.State) + } - update := channeldb.InvoiceUpdateDesc{ - Htlcs: newHtlcs, + switch res := resolution.(type) { + case *HtlcFailResolution: + // Inspect latest htlc state on the invoice. If it is found, + // we will update the accept height as it was recorded in the + // invoice database (which occurs in the case where the htlc + // reached the database in a previous call). If the htlc was + // not found on the invoice, it was immediately failed so we + // send the failure resolution as is, which has the current + // height set as the accept height. + invoiceHtlc, ok := invoice.Htlcs[ctx.circuitKey] + if ok { + res.AcceptHeight = int32(invoiceHtlc.AcceptHeight) } - // Don't update invoice state if we are accepting a duplicate - // payment. We do accept or settle the HTLC. - switch inv.Terms.State { - case channeldb.ContractAccepted: - debugLog("accepting duplicate payment to accepted invoice") - update.State = channeldb.ContractAccepted - return &update, nil + ctx.log(fmt.Sprintf("failure resolution result "+ + "outcome: %v, at accept height: %v", + res.Outcome, res.AcceptHeight)) - case channeldb.ContractSettled: - debugLog("accepting duplicate payment to settled invoice") - update.State = channeldb.ContractSettled - return &update, nil - } + return res, nil - // Check to see if we can settle or this is an hold invoice and - // we need to wait for the preimage. - holdInvoice := inv.Terms.PaymentPreimage == channeldb.UnknownPreimage - if holdInvoice { - debugLog("accepted") - update.State = channeldb.ContractAccepted - } else { - debugLog("settled") - update.Preimage = inv.Terms.PaymentPreimage - update.State = channeldb.ContractSettled - } + // If the htlc was settled, we will settle any previously accepted + // htlcs and notify our peer to settle them. + case *HtlcSettleResolution: + ctx.log(fmt.Sprintf("settle resolution result "+ + "outcome: %v, at accept height: %v", + res.Outcome, res.AcceptHeight)) - updateSubscribers = true + // Also settle any previously accepted htlcs. If a htlc is + // marked as settled, we should follow now and settle the htlc + // with our peer. + for key, htlc := range invoice.Htlcs { + if htlc.State != channeldb.HtlcStateSettled { + continue + } - return &update, nil - } + // Notify subscribers that the htlcs should be settled + // with our peer. Note that the outcome of the + // resolution is set based on the outcome of the single + // htlc that we just settled, so may not be accurate + // for all htlcs. + htlcSettleResolution := NewSettleResolution( + res.Preimage, key, + int32(htlc.AcceptHeight), res.Outcome, + ) + + // Notify subscribers that the htlc should be settled + // with our peer. + i.notifyHodlSubscribers(htlcSettleResolution) + } - // We'll attempt to settle an invoice matching this rHash on disk (if - // one exists). The callback will set the resolution action that is - // returned to the link or contract resolver. - invoice, err := i.cdb.UpdateInvoice(rHash, updateInvoice) - if err != nil && err != errNoUpdate { - debugLog(err.Error()) + return resolution, nil - return nil, err - } + // If we accepted the htlc, subscribe to the hodl invoice and return + // an accept resolution with the htlc's accept time on it. + case *htlcAcceptResolution: + invoiceHtlc, ok := invoice.Htlcs[ctx.circuitKey] + if !ok { + return nil, fmt.Errorf("accepted htlc: %v not"+ + " present on invoice: %x", ctx.circuitKey, + ctx.hash[:]) + } - if updateSubscribers { - i.notifyClients(rHash, invoice, invoice.Terms.State) - } + // Determine accepted height of this htlc. If the htlc reached + // the invoice database (possibly in a previous call to the + // invoice registry), we'll take the original accepted height + // as it was recorded in the database. + acceptHeight := int32(invoiceHtlc.AcceptHeight) - // Inspect latest htlc state on the invoice. - invoiceHtlc, ok := invoice.Htlcs[circuitKey] + ctx.log(fmt.Sprintf("accept resolution result "+ + "outcome: %v, at accept height: %v", + res.outcome, acceptHeight)) - // If it isn't recorded, cancel htlc. - if !ok { - return &HodlEvent{ - CircuitKey: circuitKey, - AcceptHeight: currentHeight, - }, nil - } + // Auto-release the htlc if the invoice is still open. It can + // only happen for mpp payments that there are htlcs in state + // Accepted while the invoice is Open. + if invoice.State == channeldb.ContractOpen { + res.acceptTime = invoiceHtlc.AcceptTime + res.autoRelease = true - // Determine accepted height of this htlc. If the htlc reached the - // invoice database (possibly in a previous call to the invoice - // registry), we'll take the original accepted height as it was recorded - // in the database. - acceptHeight := int32(invoiceHtlc.AcceptHeight) - - switch invoiceHtlc.State { - case channeldb.HtlcStateCanceled: - return &HodlEvent{ - CircuitKey: circuitKey, - AcceptHeight: acceptHeight, - }, nil + } - case channeldb.HtlcStateSettled: - return &HodlEvent{ - CircuitKey: circuitKey, - Preimage: &invoice.Terms.PaymentPreimage, - AcceptHeight: acceptHeight, - }, nil - - case channeldb.HtlcStateAccepted: - i.hodlSubscribe(hodlChan, circuitKey) - return nil, nil + i.hodlSubscribe(hodlChan, ctx.circuitKey) + return res, nil default: panic("unknown action") @@ -597,7 +946,7 @@ func (i *InvoiceRegistry) SettleHodlInvoice(preimage lntypes.Preimage) error { updateInvoice := func(invoice *channeldb.Invoice) ( *channeldb.InvoiceUpdateDesc, error) { - switch invoice.Terms.State { + switch invoice.State { case channeldb.ContractOpen: return nil, channeldb.ErrInvoiceStillOpen case channeldb.ContractCanceled: @@ -607,15 +956,19 @@ func (i *InvoiceRegistry) SettleHodlInvoice(preimage lntypes.Preimage) error { } return &channeldb.InvoiceUpdateDesc{ - State: channeldb.ContractSettled, - Preimage: preimage, + State: &channeldb.InvoiceStateUpdateDesc{ + NewState: channeldb.ContractSettled, + Preimage: preimage, + }, }, nil } hash := preimage.Hash() invoice, err := i.cdb.UpdateInvoice(hash, updateInvoice) if err != nil { - log.Errorf("SettleHodlInvoice with preimage %v: %v", preimage, err) + log.Errorf("SettleHodlInvoice with preimage %v: %v", + preimage, err) + return err } @@ -633,13 +986,13 @@ func (i *InvoiceRegistry) SettleHodlInvoice(preimage lntypes.Preimage) error { continue } - i.notifyHodlSubscribers(HodlEvent{ - CircuitKey: key, - Preimage: &preimage, - AcceptHeight: int32(htlc.AcceptHeight), - }) + resolution := NewSettleResolution( + preimage, key, int32(htlc.AcceptHeight), ResultSettled, + ) + + i.notifyHodlSubscribers(resolution) } - i.notifyClients(hash, invoice, invoice.Terms.State) + i.notifyClients(hash, invoice, invoice.State) return nil } @@ -647,6 +1000,16 @@ func (i *InvoiceRegistry) SettleHodlInvoice(preimage lntypes.Preimage) error { // CancelInvoice attempts to cancel the invoice corresponding to the passed // payment hash. func (i *InvoiceRegistry) CancelInvoice(payHash lntypes.Hash) error { + return i.cancelInvoiceImpl(payHash, true) +} + +// cancelInvoice attempts to cancel the invoice corresponding to the passed +// payment hash. Accepted invoices will only be canceled if explicitly +// requested to do so. It notifies subscribing links and resolvers that +// the associated htlcs were canceled if they change state. +func (i *InvoiceRegistry) cancelInvoiceImpl(payHash lntypes.Hash, + cancelAccepted bool) error { + i.Lock() defer i.Unlock() @@ -655,39 +1018,19 @@ func (i *InvoiceRegistry) CancelInvoice(payHash lntypes.Hash) error { updateInvoice := func(invoice *channeldb.Invoice) ( *channeldb.InvoiceUpdateDesc, error) { - switch invoice.Terms.State { - case channeldb.ContractSettled: - return nil, channeldb.ErrInvoiceAlreadySettled - case channeldb.ContractCanceled: - return nil, channeldb.ErrInvoiceAlreadyCanceled - } - - // Mark individual held htlcs as canceled. - canceledHtlcs := make( - map[channeldb.CircuitKey]*channeldb.HtlcAcceptDesc, - ) - for key, htlc := range invoice.Htlcs { - switch htlc.State { - - // If we get here, there shouldn't be any settled htlcs. - case channeldb.HtlcStateSettled: - return nil, errors.New("cannot cancel " + - "invoice with settled htlc(s)") - - // Don't cancel htlcs that were already canceled, - // because it would incorrectly modify the invoice paid - // amt. - case channeldb.HtlcStateCanceled: - continue - } - - canceledHtlcs[key] = nil + // Only cancel the invoice in ContractAccepted state if explicitly + // requested to do so. + if invoice.State == channeldb.ContractAccepted && !cancelAccepted { + return nil, nil } - // Move invoice to the canceled state. + // Move invoice to the canceled state. Rely on validation in + // channeldb to return an error if the invoice is already + // settled or canceled. return &channeldb.InvoiceUpdateDesc{ - Htlcs: canceledHtlcs, - State: channeldb.ContractCanceled, + State: &channeldb.InvoiceStateUpdateDesc{ + NewState: channeldb.ContractCanceled, + }, }, nil } @@ -703,6 +1046,13 @@ func (i *InvoiceRegistry) CancelInvoice(payHash lntypes.Hash) error { return err } + // Return without cancellation if the invoice state is ContractAccepted. + if invoice.State == channeldb.ContractAccepted { + log.Debugf("Invoice(%v): remains accepted as cancel wasn't"+ + "explicitly requested.", payHash) + return nil + } + log.Debugf("Invoice(%v): canceled", payHash) // In the callback, some htlcs may have been moved to the canceled @@ -715,10 +1065,11 @@ func (i *InvoiceRegistry) CancelInvoice(payHash lntypes.Hash) error { continue } - i.notifyHodlSubscribers(HodlEvent{ - CircuitKey: key, - AcceptHeight: int32(htlc.AcceptHeight), - }) + i.notifyHodlSubscribers( + NewFailResolution( + key, int32(htlc.AcceptHeight), ResultCanceled, + ), + ) } i.notifyClients(payHash, invoice, channeldb.ContractCanceled) @@ -868,7 +1219,7 @@ func (i *InvoiceRegistry) SubscribeNotifications(addIndex, settleIndex uint64) * invoiceEvent := ntfn.(*invoiceEvent) var targetChan chan *channeldb.Invoice - state := invoiceEvent.invoice.Terms.State + state := invoiceEvent.invoice.State switch state { case channeldb.ContractOpen: targetChan = client.NewInvoices @@ -984,9 +1335,10 @@ func (i *InvoiceRegistry) SubscribeSingleInvoice( return client, nil } -// notifyHodlSubscribers sends out the hodl event to all current subscribers. -func (i *InvoiceRegistry) notifyHodlSubscribers(hodlEvent HodlEvent) { - subscribers, ok := i.hodlSubscriptions[hodlEvent.CircuitKey] +// notifyHodlSubscribers sends out the htlc resolution to all current +// subscribers. +func (i *InvoiceRegistry) notifyHodlSubscribers(htlcResolution HtlcResolution) { + subscribers, ok := i.hodlSubscriptions[htlcResolution.CircuitKey()] if !ok { return } @@ -996,18 +1348,18 @@ func (i *InvoiceRegistry) notifyHodlSubscribers(hodlEvent HodlEvent) { // single resolution for each hash. for subscriber := range subscribers { select { - case subscriber <- hodlEvent: + case subscriber <- htlcResolution: case <-i.quit: return } delete( i.hodlReverseSubscriptions[subscriber], - hodlEvent.CircuitKey, + htlcResolution.CircuitKey(), ) } - delete(i.hodlSubscriptions, hodlEvent.CircuitKey) + delete(i.hodlSubscriptions, htlcResolution.CircuitKey()) } // hodlSubscribe adds a new invoice subscription. diff --git a/invoices/invoiceregistry_test.go b/invoices/invoiceregistry_test.go index a137963e0e..319c30cf05 100644 --- a/invoices/invoiceregistry_test.go +++ b/invoices/invoiceregistry_test.go @@ -1,95 +1,37 @@ package invoices import ( - "io/ioutil" - "os" "testing" "time" "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/clock" "github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/record" ) -var ( - testTimeout = 5 * time.Second - - preimage = lntypes.Preimage{ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, - } - - hash = preimage.Hash() - - testHtlcExpiry = uint32(5) - - testInvoiceCltvDelta = uint32(4) - - testFinalCltvRejectDelta = int32(4) - - testCurrentHeight = int32(1) -) - -var ( - testInvoice = &channeldb.Invoice{ - Terms: channeldb.ContractTerm{ - PaymentPreimage: preimage, - Value: lnwire.MilliSatoshi(100000), - }, - } -) - -func newTestContext(t *testing.T) (*InvoiceRegistry, func()) { - cdb, cleanup, err := newDB() - if err != nil { - t.Fatal(err) - } - - // Instantiate and start the invoice registry. - registry := NewRegistry(cdb, testFinalCltvRejectDelta) - - err = registry.Start() - if err != nil { - cleanup() - t.Fatal(err) - } - - return registry, func() { - registry.Stop() - cleanup() - } -} - -func getCircuitKey(htlcID uint64) channeldb.CircuitKey { - return channeldb.CircuitKey{ - ChanID: lnwire.ShortChannelID{ - BlockHeight: 1, TxIndex: 2, TxPosition: 3, - }, - HtlcID: htlcID, - } -} - // TestSettleInvoice tests settling of an invoice and related notifications. func TestSettleInvoice(t *testing.T) { - registry, cleanup := newTestContext(t) - defer cleanup() + ctx := newTestContext(t) + defer ctx.cleanup() - allSubscriptions := registry.SubscribeNotifications(0, 0) + allSubscriptions := ctx.registry.SubscribeNotifications(0, 0) defer allSubscriptions.Cancel() // Subscribe to the not yet existing invoice. - subscription, err := registry.SubscribeSingleInvoice(hash) + subscription, err := ctx.registry.SubscribeSingleInvoice(testInvoicePaymentHash) if err != nil { t.Fatal(err) } defer subscription.Cancel() - if subscription.hash != hash { + if subscription.hash != testInvoicePaymentHash { t.Fatalf("expected subscription for provided hash") } // Add the invoice. - addIdx, err := registry.AddInvoice(testInvoice, hash) + addIdx, err := ctx.registry.AddInvoice(testInvoice, testInvoicePaymentHash) if err != nil { t.Fatal(err) } @@ -102,9 +44,9 @@ func TestSettleInvoice(t *testing.T) { // We expect the open state to be sent to the single invoice subscriber. select { case update := <-subscription.Updates: - if update.Terms.State != channeldb.ContractOpen { + if update.State != channeldb.ContractOpen { t.Fatalf("expected state ContractOpen, but got %v", - update.Terms.State) + update.State) } case <-time.After(testTimeout): t.Fatal("no update received") @@ -113,9 +55,9 @@ func TestSettleInvoice(t *testing.T) { // We expect a new invoice notification to be sent out. select { case newInvoice := <-allSubscriptions.NewInvoices: - if newInvoice.Terms.State != channeldb.ContractOpen { + if newInvoice.State != channeldb.ContractOpen { t.Fatalf("expected state ContractOpen, but got %v", - newInvoice.Terms.State) + newInvoice.State) } case <-time.After(testTimeout): t.Fatal("no update received") @@ -124,39 +66,55 @@ func TestSettleInvoice(t *testing.T) { hodlChan := make(chan interface{}, 1) // Try to settle invoice with an htlc that expires too soon. - event, err := registry.NotifyExitHopHtlc( - hash, testInvoice.Terms.Value, + resolution, err := ctx.registry.NotifyExitHopHtlc( + testInvoicePaymentHash, testInvoice.Terms.Value, uint32(testCurrentHeight)+testInvoiceCltvDelta-1, - testCurrentHeight, getCircuitKey(10), hodlChan, nil, + testCurrentHeight, getCircuitKey(10), hodlChan, testPayload, ) if err != nil { t.Fatal(err) } - if event.Preimage != nil { - t.Fatal("expected cancel event") + failResolution, ok := resolution.(*HtlcFailResolution) + if !ok { + t.Fatalf("expected fail resolution, got: %T", + resolution) } - if event.AcceptHeight != testCurrentHeight { + if failResolution.AcceptHeight != testCurrentHeight { t.Fatalf("expected acceptHeight %v, but got %v", - testCurrentHeight, event.AcceptHeight) + testCurrentHeight, failResolution.AcceptHeight) + } + if failResolution.Outcome != ResultExpiryTooSoon { + t.Fatalf("expected expiry too soon, got: %v", + failResolution.Outcome) } // Settle invoice with a slightly higher amount. amtPaid := lnwire.MilliSatoshi(100500) - _, err = registry.NotifyExitHopHtlc( - hash, amtPaid, testHtlcExpiry, testCurrentHeight, - getCircuitKey(0), hodlChan, nil, + resolution, err = ctx.registry.NotifyExitHopHtlc( + testInvoicePaymentHash, amtPaid, testHtlcExpiry, + testCurrentHeight, getCircuitKey(0), hodlChan, + testPayload, ) if err != nil { t.Fatal(err) } + settleResolution, ok := resolution.(*HtlcSettleResolution) + if !ok { + t.Fatalf("expected settle resolution, got: %T", + resolution) + } + if settleResolution.Outcome != ResultSettled { + t.Fatalf("expected settled, got: %v", + settleResolution.Outcome) + } // We expect the settled state to be sent to the single invoice // subscriber. select { case update := <-subscription.Updates: - if update.Terms.State != channeldb.ContractSettled { + if update.State != channeldb.ContractSettled { t.Fatalf("expected state ContractOpen, but got %v", - update.Terms.State) + update.State) } if update.AmtPaid != amtPaid { t.Fatal("invoice AmtPaid incorrect") @@ -168,9 +126,9 @@ func TestSettleInvoice(t *testing.T) { // We expect a settled notification to be sent out. select { case settledInvoice := <-allSubscriptions.SettledInvoices: - if settledInvoice.Terms.State != channeldb.ContractSettled { + if settledInvoice.State != channeldb.ContractSettled { t.Fatalf("expected state ContractOpen, but got %v", - settledInvoice.Terms.State) + settledInvoice.State) } case <-time.After(testTimeout): t.Fatal("no update received") @@ -178,47 +136,65 @@ func TestSettleInvoice(t *testing.T) { // Try to settle again with the same htlc id. We need this idempotent // behaviour after a restart. - event, err = registry.NotifyExitHopHtlc( - hash, amtPaid, testHtlcExpiry, testCurrentHeight, - getCircuitKey(0), hodlChan, nil, + resolution, err = ctx.registry.NotifyExitHopHtlc( + testInvoicePaymentHash, amtPaid, testHtlcExpiry, testCurrentHeight, + getCircuitKey(0), hodlChan, testPayload, ) if err != nil { t.Fatalf("unexpected NotifyExitHopHtlc error: %v", err) } - if event.Preimage == nil { - t.Fatal("expected settle event") + settleResolution, ok = resolution.(*HtlcSettleResolution) + if !ok { + t.Fatalf("expected settle resolution, got: %T", + resolution) + } + if settleResolution.Outcome != ResultReplayToSettled { + t.Fatalf("expected replay settled, got: %v", + settleResolution.Outcome) } // Try to settle again with a new higher-valued htlc. This payment // should also be accepted, to prevent any change in behaviour for a // paid invoice that may open up a probe vector. - event, err = registry.NotifyExitHopHtlc( - hash, amtPaid+600, testHtlcExpiry, testCurrentHeight, - getCircuitKey(1), hodlChan, nil, + resolution, err = ctx.registry.NotifyExitHopHtlc( + testInvoicePaymentHash, amtPaid+600, testHtlcExpiry, testCurrentHeight, + getCircuitKey(1), hodlChan, testPayload, ) if err != nil { t.Fatalf("unexpected NotifyExitHopHtlc error: %v", err) } - if event.Preimage == nil { - t.Fatal("expected settle event") + settleResolution, ok = resolution.(*HtlcSettleResolution) + if !ok { + t.Fatalf("expected settle resolution, got: %T", + resolution) + } + if settleResolution.Outcome != ResultDuplicateToSettled { + t.Fatalf("expected duplicate settled, got: %v", + settleResolution.Outcome) } // Try to settle again with a lower amount. This should fail just as it // would have failed if it were the first payment. - event, err = registry.NotifyExitHopHtlc( - hash, amtPaid-600, testHtlcExpiry, testCurrentHeight, - getCircuitKey(2), hodlChan, nil, + resolution, err = ctx.registry.NotifyExitHopHtlc( + testInvoicePaymentHash, amtPaid-600, testHtlcExpiry, testCurrentHeight, + getCircuitKey(2), hodlChan, testPayload, ) if err != nil { t.Fatalf("unexpected NotifyExitHopHtlc error: %v", err) } - if event.Preimage != nil { - t.Fatal("expected cancel event") + failResolution, ok = resolution.(*HtlcFailResolution) + if !ok { + t.Fatalf("expected fail resolution, got: %T", + resolution) + } + if failResolution.Outcome != ResultAmountTooLow { + t.Fatalf("expected amount too low, got: %v", + failResolution.Outcome) } // Check that settled amount is equal to the sum of values of the htlcs // 0 and 1. - inv, err := registry.LookupInvoice(hash) + inv, err := ctx.registry.LookupInvoice(testInvoicePaymentHash) if err != nil { t.Fatal(err) } @@ -227,7 +203,7 @@ func TestSettleInvoice(t *testing.T) { } // Try to cancel. - err = registry.CancelInvoice(hash) + err = ctx.registry.CancelInvoice(testInvoicePaymentHash) if err != channeldb.ErrInvoiceAlreadySettled { t.Fatal("expected cancelation of a settled invoice to fail") } @@ -235,39 +211,39 @@ func TestSettleInvoice(t *testing.T) { // As this is a direct sette, we expect nothing on the hodl chan. select { case <-hodlChan: - t.Fatal("unexpected event") + t.Fatal("unexpected resolution") default: } } // TestCancelInvoice tests cancelation of an invoice and related notifications. func TestCancelInvoice(t *testing.T) { - registry, cleanup := newTestContext(t) - defer cleanup() + ctx := newTestContext(t) + defer ctx.cleanup() - allSubscriptions := registry.SubscribeNotifications(0, 0) + allSubscriptions := ctx.registry.SubscribeNotifications(0, 0) defer allSubscriptions.Cancel() // Try to cancel the not yet existing invoice. This should fail. - err := registry.CancelInvoice(hash) + err := ctx.registry.CancelInvoice(testInvoicePaymentHash) if err != channeldb.ErrInvoiceNotFound { t.Fatalf("expected ErrInvoiceNotFound, but got %v", err) } // Subscribe to the not yet existing invoice. - subscription, err := registry.SubscribeSingleInvoice(hash) + subscription, err := ctx.registry.SubscribeSingleInvoice(testInvoicePaymentHash) if err != nil { t.Fatal(err) } defer subscription.Cancel() - if subscription.hash != hash { + if subscription.hash != testInvoicePaymentHash { t.Fatalf("expected subscription for provided hash") } // Add the invoice. amt := lnwire.MilliSatoshi(100000) - _, err = registry.AddInvoice(testInvoice, hash) + _, err = ctx.registry.AddInvoice(testInvoice, testInvoicePaymentHash) if err != nil { t.Fatal(err) } @@ -275,10 +251,10 @@ func TestCancelInvoice(t *testing.T) { // We expect the open state to be sent to the single invoice subscriber. select { case update := <-subscription.Updates: - if update.Terms.State != channeldb.ContractOpen { + if update.State != channeldb.ContractOpen { t.Fatalf( "expected state ContractOpen, but got %v", - update.Terms.State, + update.State, ) } case <-time.After(testTimeout): @@ -288,10 +264,10 @@ func TestCancelInvoice(t *testing.T) { // We expect a new invoice notification to be sent out. select { case newInvoice := <-allSubscriptions.NewInvoices: - if newInvoice.Terms.State != channeldb.ContractOpen { + if newInvoice.State != channeldb.ContractOpen { t.Fatalf( "expected state ContractOpen, but got %v", - newInvoice.Terms.State, + newInvoice.State, ) } case <-time.After(testTimeout): @@ -299,7 +275,7 @@ func TestCancelInvoice(t *testing.T) { } // Cancel invoice. - err = registry.CancelInvoice(hash) + err = ctx.registry.CancelInvoice(testInvoicePaymentHash) if err != nil { t.Fatal(err) } @@ -308,10 +284,10 @@ func TestCancelInvoice(t *testing.T) { // subscriber. select { case update := <-subscription.Updates: - if update.Terms.State != channeldb.ContractCanceled { + if update.State != channeldb.ContractCanceled { t.Fatalf( "expected state ContractCanceled, but got %v", - update.Terms.State, + update.State, ) } case <-time.After(testTimeout): @@ -322,44 +298,53 @@ func TestCancelInvoice(t *testing.T) { // subscribers (backwards compatibility). // Try to cancel again. - err = registry.CancelInvoice(hash) + err = ctx.registry.CancelInvoice(testInvoicePaymentHash) if err != nil { t.Fatal("expected cancelation of a canceled invoice to succeed") } // Notify arrival of a new htlc paying to this invoice. This should - // result in a cancel event. + // result in a cancel resolution. hodlChan := make(chan interface{}) - event, err := registry.NotifyExitHopHtlc( - hash, amt, testHtlcExpiry, testCurrentHeight, - getCircuitKey(0), hodlChan, nil, + resolution, err := ctx.registry.NotifyExitHopHtlc( + testInvoicePaymentHash, amt, testHtlcExpiry, testCurrentHeight, + getCircuitKey(0), hodlChan, testPayload, ) if err != nil { t.Fatal("expected settlement of a canceled invoice to succeed") } - - if event.Preimage != nil { - t.Fatal("expected cancel hodl event") + failResolution, ok := resolution.(*HtlcFailResolution) + if !ok { + t.Fatalf("expected fail resolution, got: %T", + resolution) } - if event.AcceptHeight != testCurrentHeight { + if failResolution.AcceptHeight != testCurrentHeight { t.Fatalf("expected acceptHeight %v, but got %v", - testCurrentHeight, event.AcceptHeight) + testCurrentHeight, failResolution.AcceptHeight) + } + if failResolution.Outcome != ResultInvoiceAlreadyCanceled { + t.Fatalf("expected expiry too soon, got: %v", + failResolution.Outcome) } } // TestSettleHoldInvoice tests settling of a hold invoice and related // notifications. func TestSettleHoldInvoice(t *testing.T) { - defer timeout(t)() + defer timeout()() - cdb, cleanup, err := newDB() + cdb, cleanup, err := newTestChannelDB(clock.NewTestClock(time.Time{})) if err != nil { t.Fatal(err) } defer cleanup() - // Instantiate and start the invoice registry. - registry := NewRegistry(cdb, testFinalCltvRejectDelta) + // Instantiate and start the invoice ctx.registry. + cfg := RegistryConfig{ + FinalCltvRejectDelta: testFinalCltvRejectDelta, + Clock: clock.NewTestClock(testTime), + } + registry := NewRegistry(cdb, NewInvoiceExpiryWatcher(cfg.Clock), &cfg) err = registry.Start() if err != nil { @@ -371,41 +356,34 @@ func TestSettleHoldInvoice(t *testing.T) { defer allSubscriptions.Cancel() // Subscribe to the not yet existing invoice. - subscription, err := registry.SubscribeSingleInvoice(hash) + subscription, err := registry.SubscribeSingleInvoice(testInvoicePaymentHash) if err != nil { t.Fatal(err) } defer subscription.Cancel() - if subscription.hash != hash { + if subscription.hash != testInvoicePaymentHash { t.Fatalf("expected subscription for provided hash") } // Add the invoice. - invoice := &channeldb.Invoice{ - Terms: channeldb.ContractTerm{ - PaymentPreimage: channeldb.UnknownPreimage, - Value: lnwire.MilliSatoshi(100000), - }, - } - - _, err = registry.AddInvoice(invoice, hash) + _, err = registry.AddInvoice(testHodlInvoice, testInvoicePaymentHash) if err != nil { t.Fatal(err) } // We expect the open state to be sent to the single invoice subscriber. update := <-subscription.Updates - if update.Terms.State != channeldb.ContractOpen { + if update.State != channeldb.ContractOpen { t.Fatalf("expected state ContractOpen, but got %v", - update.Terms.State) + update.State) } // We expect a new invoice notification to be sent out. newInvoice := <-allSubscriptions.NewInvoices - if newInvoice.Terms.State != channeldb.ContractOpen { + if newInvoice.State != channeldb.ContractOpen { t.Fatalf("expected state ContractOpen, but got %v", - newInvoice.Terms.State) + newInvoice.State) } // Use slightly higher amount for accept/settle. @@ -415,88 +393,103 @@ func TestSettleHoldInvoice(t *testing.T) { // NotifyExitHopHtlc without a preimage present in the invoice registry // should be possible. - event, err := registry.NotifyExitHopHtlc( - hash, amtPaid, testHtlcExpiry, testCurrentHeight, - getCircuitKey(0), hodlChan, nil, + resolution, err := registry.NotifyExitHopHtlc( + testInvoicePaymentHash, amtPaid, testHtlcExpiry, testCurrentHeight, + getCircuitKey(0), hodlChan, testPayload, ) if err != nil { t.Fatalf("expected settle to succeed but got %v", err) } - if event != nil { + if resolution != nil { t.Fatalf("expected htlc to be held") } // Test idempotency. - event, err = registry.NotifyExitHopHtlc( - hash, amtPaid, testHtlcExpiry, testCurrentHeight, - getCircuitKey(0), hodlChan, nil, + resolution, err = registry.NotifyExitHopHtlc( + testInvoicePaymentHash, amtPaid, testHtlcExpiry, testCurrentHeight, + getCircuitKey(0), hodlChan, testPayload, ) if err != nil { t.Fatalf("expected settle to succeed but got %v", err) } - if event != nil { + if resolution != nil { t.Fatalf("expected htlc to be held") } // Test replay at a higher height. We expect the same result because it // is a replay. - event, err = registry.NotifyExitHopHtlc( - hash, amtPaid, testHtlcExpiry, testCurrentHeight+10, - getCircuitKey(0), hodlChan, nil, + resolution, err = registry.NotifyExitHopHtlc( + testInvoicePaymentHash, amtPaid, testHtlcExpiry, testCurrentHeight+10, + getCircuitKey(0), hodlChan, testPayload, ) if err != nil { t.Fatalf("expected settle to succeed but got %v", err) } - if event != nil { + if resolution != nil { t.Fatalf("expected htlc to be held") } // Test a new htlc coming in that doesn't meet the final cltv delta // requirement. It should be rejected. - event, err = registry.NotifyExitHopHtlc( - hash, amtPaid, 1, testCurrentHeight, - getCircuitKey(1), hodlChan, nil, + resolution, err = registry.NotifyExitHopHtlc( + testInvoicePaymentHash, amtPaid, 1, testCurrentHeight, + getCircuitKey(1), hodlChan, testPayload, ) if err != nil { t.Fatalf("expected settle to succeed but got %v", err) } - if event == nil || event.Preimage != nil { - t.Fatalf("expected htlc to be canceled") + failResolution, ok := resolution.(*HtlcFailResolution) + if !ok { + t.Fatalf("expected fail resolution, got: %T", + resolution) + } + if failResolution.Outcome != ResultExpiryTooSoon { + t.Fatalf("expected expiry too soon, got: %v", + failResolution.Outcome) } // We expect the accepted state to be sent to the single invoice // subscriber. For all invoice subscribers, we don't expect an update. // Those only get notified on settle. update = <-subscription.Updates - if update.Terms.State != channeldb.ContractAccepted { + if update.State != channeldb.ContractAccepted { t.Fatalf("expected state ContractAccepted, but got %v", - update.Terms.State) + update.State) } if update.AmtPaid != amtPaid { t.Fatal("invoice AmtPaid incorrect") } // Settling with preimage should succeed. - err = registry.SettleHodlInvoice(preimage) + err = registry.SettleHodlInvoice(testInvoicePreimage) if err != nil { t.Fatal("expected set preimage to succeed") } - hodlEvent := (<-hodlChan).(HodlEvent) - if *hodlEvent.Preimage != preimage { - t.Fatal("unexpected preimage in hodl event") + htlcResolution := (<-hodlChan).(HtlcResolution) + settleResolution, ok := htlcResolution.(*HtlcSettleResolution) + if !ok { + t.Fatalf("expected settle resolution, got: %T", + htlcResolution) + } + if settleResolution.Preimage != testInvoicePreimage { + t.Fatal("unexpected preimage in hodl resolution") } - if hodlEvent.AcceptHeight != testCurrentHeight { + if settleResolution.AcceptHeight != testCurrentHeight { t.Fatalf("expected acceptHeight %v, but got %v", - testCurrentHeight, event.AcceptHeight) + testCurrentHeight, settleResolution.AcceptHeight) + } + if settleResolution.Outcome != ResultSettled { + t.Fatalf("expected result settled, got: %v", + settleResolution.Outcome) } // We expect a settled notification to be sent out for both all and // single invoice subscribers. settledInvoice := <-allSubscriptions.SettledInvoices - if settledInvoice.Terms.State != channeldb.ContractSettled { + if settledInvoice.State != channeldb.ContractSettled { t.Fatalf("expected state ContractSettled, but got %v", - settledInvoice.Terms.State) + settledInvoice.State) } if settledInvoice.AmtPaid != amtPaid { t.Fatalf("expected amount to be %v, but got %v", @@ -504,19 +497,19 @@ func TestSettleHoldInvoice(t *testing.T) { } update = <-subscription.Updates - if update.Terms.State != channeldb.ContractSettled { + if update.State != channeldb.ContractSettled { t.Fatalf("expected state ContractSettled, but got %v", - update.Terms.State) + update.State) } // Idempotency. - err = registry.SettleHodlInvoice(preimage) + err = registry.SettleHodlInvoice(testInvoicePreimage) if err != channeldb.ErrInvoiceAlreadySettled { t.Fatalf("expected ErrInvoiceAlreadySettled but got %v", err) } // Try to cancel. - err = registry.CancelInvoice(hash) + err = registry.CancelInvoice(testInvoicePaymentHash) if err == nil { t.Fatal("expected cancelation of a settled invoice to fail") } @@ -525,16 +518,20 @@ func TestSettleHoldInvoice(t *testing.T) { // TestCancelHoldInvoice tests canceling of a hold invoice and related // notifications. func TestCancelHoldInvoice(t *testing.T) { - defer timeout(t)() + defer timeout()() - cdb, cleanup, err := newDB() + cdb, cleanup, err := newTestChannelDB(clock.NewTestClock(time.Time{})) if err != nil { t.Fatal(err) } defer cleanup() - // Instantiate and start the invoice registry. - registry := NewRegistry(cdb, testFinalCltvRejectDelta) + // Instantiate and start the invoice ctx.registry. + cfg := RegistryConfig{ + FinalCltvRejectDelta: testFinalCltvRejectDelta, + Clock: clock.NewTestClock(testTime), + } + registry := NewRegistry(cdb, NewInvoiceExpiryWatcher(cfg.Clock), &cfg) err = registry.Start() if err != nil { @@ -543,14 +540,7 @@ func TestCancelHoldInvoice(t *testing.T) { defer registry.Stop() // Add the invoice. - invoice := &channeldb.Invoice{ - Terms: channeldb.ContractTerm{ - PaymentPreimage: channeldb.UnknownPreimage, - Value: lnwire.MilliSatoshi(100000), - }, - } - - _, err = registry.AddInvoice(invoice, hash) + _, err = registry.AddInvoice(testHodlInvoice, testInvoicePaymentHash) if err != nil { t.Fatal(err) } @@ -560,88 +550,395 @@ func TestCancelHoldInvoice(t *testing.T) { // NotifyExitHopHtlc without a preimage present in the invoice registry // should be possible. - event, err := registry.NotifyExitHopHtlc( - hash, amtPaid, testHtlcExpiry, testCurrentHeight, - getCircuitKey(0), hodlChan, nil, + resolution, err := registry.NotifyExitHopHtlc( + testInvoicePaymentHash, amtPaid, testHtlcExpiry, testCurrentHeight, + getCircuitKey(0), hodlChan, testPayload, ) if err != nil { t.Fatalf("expected settle to succeed but got %v", err) } - if event != nil { + if resolution != nil { t.Fatalf("expected htlc to be held") } // Cancel invoice. - err = registry.CancelInvoice(hash) + err = registry.CancelInvoice(testInvoicePaymentHash) if err != nil { t.Fatal("cancel invoice failed") } - hodlEvent := (<-hodlChan).(HodlEvent) - if hodlEvent.Preimage != nil { - t.Fatal("expected cancel hodl event") + htlcResolution := (<-hodlChan).(HtlcResolution) + _, ok := htlcResolution.(*HtlcFailResolution) + if !ok { + t.Fatalf("expected fail resolution, got: %T", + htlcResolution) } // Offering the same htlc again at a higher height should still result // in a rejection. The accept height is expected to be the original // accept height. - event, err = registry.NotifyExitHopHtlc( - hash, amtPaid, testHtlcExpiry, testCurrentHeight+1, - getCircuitKey(0), hodlChan, nil, + resolution, err = registry.NotifyExitHopHtlc( + testInvoicePaymentHash, amtPaid, testHtlcExpiry, testCurrentHeight+1, + getCircuitKey(0), hodlChan, testPayload, ) if err != nil { t.Fatalf("expected settle to succeed but got %v", err) } - if event.Preimage != nil { - t.Fatalf("expected htlc to be canceled") + failResolution, ok := resolution.(*HtlcFailResolution) + if !ok { + t.Fatalf("expected fail resolution, got: %T", + resolution) } - if event.AcceptHeight != testCurrentHeight { + if failResolution.AcceptHeight != testCurrentHeight { t.Fatalf("expected acceptHeight %v, but got %v", - testCurrentHeight, event.AcceptHeight) + testCurrentHeight, failResolution.AcceptHeight) + } + if failResolution.Outcome != ResultReplayToCanceled { + t.Fatalf("expected replay to canceled, got %v", + failResolution.Outcome) + } +} + +// TestUnknownInvoice tests that invoice registry returns an error when the +// invoice is unknown. This is to guard against returning a cancel htlc +// resolution for forwarded htlcs. In the link, NotifyExitHopHtlc is only called +// if we are the exit hop, but in htlcIncomingContestResolver it is called with +// forwarded htlc hashes as well. +func TestUnknownInvoice(t *testing.T) { + ctx := newTestContext(t) + defer ctx.cleanup() + + // Notify arrival of a new htlc paying to this invoice. This should + // succeed. + hodlChan := make(chan interface{}) + amt := lnwire.MilliSatoshi(100000) + resolution, err := ctx.registry.NotifyExitHopHtlc( + testInvoicePaymentHash, amt, testHtlcExpiry, testCurrentHeight, + getCircuitKey(0), hodlChan, testPayload, + ) + if err != nil { + t.Fatal("unexpected error") + } + failResolution, ok := resolution.(*HtlcFailResolution) + if !ok { + t.Fatalf("expected fail resolution, got: %T", + resolution) + } + if failResolution.Outcome != ResultInvoiceNotFound { + t.Fatalf("expected ResultInvoiceNotFound, got: %v", + failResolution.Outcome) + } +} + +// TestKeySend tests receiving a spontaneous payment with and without keysend +// enabled. +func TestKeySend(t *testing.T) { + t.Run("enabled", func(t *testing.T) { + testKeySend(t, true) + }) + t.Run("disabled", func(t *testing.T) { + testKeySend(t, false) + }) +} + +// testKeySend is the inner test function that tests keysend for a particular +// enabled state on the receiver end. +func testKeySend(t *testing.T, keySendEnabled bool) { + defer timeout()() + + ctx := newTestContext(t) + defer ctx.cleanup() + + ctx.registry.cfg.AcceptKeySend = keySendEnabled + + allSubscriptions := ctx.registry.SubscribeNotifications(0, 0) + defer allSubscriptions.Cancel() + + hodlChan := make(chan interface{}, 1) + + amt := lnwire.MilliSatoshi(1000) + expiry := uint32(testCurrentHeight + 20) + + // Create key for keysend. + preimage := lntypes.Preimage{1, 2, 3} + hash := preimage.Hash() + + // Try to settle invoice with an invalid keysend htlc. + invalidKeySendPayload := &mockPayload{ + customRecords: map[uint64][]byte{ + record.KeySendType: {1, 2, 3}, + }, + } + + resolution, err := ctx.registry.NotifyExitHopHtlc( + hash, amt, expiry, + testCurrentHeight, getCircuitKey(10), hodlChan, + invalidKeySendPayload, + ) + if err != nil { + t.Fatal(err) + } + failResolution, ok := resolution.(*HtlcFailResolution) + if !ok { + t.Fatalf("expected fail resolution, got: %T", + resolution) + } + + switch { + case !keySendEnabled && failResolution.Outcome != ResultInvoiceNotFound: + t.Fatal("expected invoice not found outcome") + + case keySendEnabled && failResolution.Outcome != ResultKeySendError: + t.Fatal("expected keysend error") + } + + // Try to settle invoice with a valid keysend htlc. + keySendPayload := &mockPayload{ + customRecords: map[uint64][]byte{ + record.KeySendType: preimage[:], + }, + } + + resolution, err = ctx.registry.NotifyExitHopHtlc( + hash, amt, expiry, + testCurrentHeight, getCircuitKey(10), hodlChan, keySendPayload, + ) + if err != nil { + t.Fatal(err) + } + + // Expect a cancel resolution if keysend is disabled. + if !keySendEnabled { + failResolution, ok = resolution.(*HtlcFailResolution) + if !ok { + t.Fatalf("expected fail resolution, got: %T", + resolution) + } + if failResolution.Outcome != ResultInvoiceNotFound { + t.Fatal("expected keysend payment not to be accepted") + } + return + } + + // Otherwise we expect no error and a settle resolution for the htlc. + settleResolution, ok := resolution.(*HtlcSettleResolution) + if !ok { + t.Fatalf("expected settle resolution, got: %T", + resolution) + } + if settleResolution.Preimage != preimage { + t.Fatalf("expected settle with matching preimage") + } + + // We expect a new invoice notification to be sent out. + newInvoice := <-allSubscriptions.NewInvoices + if newInvoice.State != channeldb.ContractOpen { + t.Fatalf("expected state ContractOpen, but got %v", + newInvoice.State) + } + + // We expect a settled notification to be sent out. + settledInvoice := <-allSubscriptions.SettledInvoices + if settledInvoice.State != channeldb.ContractSettled { + t.Fatalf("expected state ContractOpen, but got %v", + settledInvoice.State) } } -func newDB() (*channeldb.DB, func(), error) { - // First, create a temporary directory to be used for the duration of - // this test. - tempDirName, err := ioutil.TempDir("", "channeldb") +// TestMppPayment tests settling of an invoice with multiple partial payments. +// It covers the case where there is a mpp timeout before the whole invoice is +// paid and the case where the invoice is settled in time. +func TestMppPayment(t *testing.T) { + defer timeout()() + + ctx := newTestContext(t) + defer ctx.cleanup() + + // Add the invoice. + _, err := ctx.registry.AddInvoice(testInvoice, testInvoicePaymentHash) + if err != nil { + t.Fatal(err) + } + + mppPayload := &mockPayload{ + mpp: record.NewMPP(testInvoiceAmt, [32]byte{}), + } + + // Send htlc 1. + hodlChan1 := make(chan interface{}, 1) + resolution, err := ctx.registry.NotifyExitHopHtlc( + testInvoicePaymentHash, testInvoice.Terms.Value/2, + testHtlcExpiry, + testCurrentHeight, getCircuitKey(10), hodlChan1, mppPayload, + ) if err != nil { - return nil, nil, err + t.Fatal(err) + } + if resolution != nil { + t.Fatal("expected no direct resolution") + } + + // Simulate mpp timeout releasing htlc 1. + ctx.clock.SetTime(testTime.Add(30 * time.Second)) + + htlcResolution := (<-hodlChan1).(HtlcResolution) + failResolution, ok := htlcResolution.(*HtlcFailResolution) + if !ok { + t.Fatalf("expected fail resolution, got: %T", + resolution) + } + if failResolution.Outcome != ResultMppTimeout { + t.Fatalf("expected mpp timeout, got: %v", + failResolution.Outcome) } - // Next, create channeldb for the first time. - cdb, err := channeldb.Open(tempDirName) + // Send htlc 2. + hodlChan2 := make(chan interface{}, 1) + resolution, err = ctx.registry.NotifyExitHopHtlc( + testInvoicePaymentHash, testInvoice.Terms.Value/2, + testHtlcExpiry, + testCurrentHeight, getCircuitKey(11), hodlChan2, mppPayload, + ) if err != nil { - os.RemoveAll(tempDirName) - return nil, nil, err + t.Fatal(err) + } + if resolution != nil { + t.Fatal("expected no direct resolution") } - cleanUp := func() { - cdb.Close() - os.RemoveAll(tempDirName) + // Send htlc 3. + hodlChan3 := make(chan interface{}, 1) + resolution, err = ctx.registry.NotifyExitHopHtlc( + testInvoicePaymentHash, testInvoice.Terms.Value/2, + testHtlcExpiry, + testCurrentHeight, getCircuitKey(12), hodlChan3, mppPayload, + ) + if err != nil { + t.Fatal(err) + } + settleResolution, ok := resolution.(*HtlcSettleResolution) + if !ok { + t.Fatalf("expected settle resolution, got: %T", + htlcResolution) + } + if settleResolution.Outcome != ResultSettled { + t.Fatalf("expected result settled, got: %v", + settleResolution.Outcome) } - return cdb, cleanUp, nil + // Check that settled amount is equal to the sum of values of the htlcs + // 2 and 3. + inv, err := ctx.registry.LookupInvoice(testInvoicePaymentHash) + if err != nil { + t.Fatal(err) + } + if inv.State != channeldb.ContractSettled { + t.Fatal("expected invoice to be settled") + } + if inv.AmtPaid != testInvoice.Terms.Value { + t.Fatalf("amount incorrect, expected %v but got %v", + testInvoice.Terms.Value, inv.AmtPaid) + } } -// TestUnknownInvoice tests that invoice registry returns an error when the -// invoice is unknown. This is to guard against returning a cancel hodl event -// for forwarded htlcs. In the link, NotifyExitHopHtlc is only called if we are -// the exit hop, but in htlcIncomingContestResolver it is called with forwarded -// htlc hashes as well. -func TestUnknownInvoice(t *testing.T) { - registry, cleanup := newTestContext(t) +// Tests that invoices are canceled after expiration. +func TestInvoiceExpiryWithRegistry(t *testing.T) { + t.Parallel() + + cdb, cleanup, err := newTestChannelDB(clock.NewTestClock(time.Time{})) defer cleanup() - // Notify arrival of a new htlc paying to this invoice. This should - // succeed. - hodlChan := make(chan interface{}) - amt := lnwire.MilliSatoshi(100000) - _, err := registry.NotifyExitHopHtlc( - hash, amt, testHtlcExpiry, testCurrentHeight, - getCircuitKey(0), hodlChan, nil, + if err != nil { + t.Fatal(err) + } + + testClock := clock.NewTestClock(testTime) + + cfg := RegistryConfig{ + FinalCltvRejectDelta: testFinalCltvRejectDelta, + Clock: testClock, + } + + expiryWatcher := NewInvoiceExpiryWatcher(cfg.Clock) + registry := NewRegistry(cdb, expiryWatcher, &cfg) + + // First prefill the Channel DB with some pre-existing invoices, + // half of them still pending, half of them expired. + const numExpired = 5 + const numPending = 5 + existingInvoices := generateInvoiceExpiryTestData( + t, testTime, 0, numExpired, numPending, ) - if err != channeldb.ErrInvoiceNotFound { - t.Fatal("expected invoice not found error") + + var expectedCancellations []lntypes.Hash + + for paymentHash, expiredInvoice := range existingInvoices.expiredInvoices { + if _, err := cdb.AddInvoice(expiredInvoice, paymentHash); err != nil { + t.Fatalf("cannot add invoice to channel db: %v", err) + } + expectedCancellations = append(expectedCancellations, paymentHash) + } + + for paymentHash, pendingInvoice := range existingInvoices.pendingInvoices { + if _, err := cdb.AddInvoice(pendingInvoice, paymentHash); err != nil { + t.Fatalf("cannot add invoice to channel db: %v", err) + } + } + + if err = registry.Start(); err != nil { + t.Fatalf("cannot start registry: %v", err) + } + + // Now generate pending and invoices and add them to the registry while + // it is up and running. We'll manipulate the clock to let them expire. + newInvoices := generateInvoiceExpiryTestData( + t, testTime, numExpired+numPending, 0, numPending, + ) + + var invoicesThatWillCancel []lntypes.Hash + for paymentHash, pendingInvoice := range newInvoices.pendingInvoices { + _, err := registry.AddInvoice(pendingInvoice, paymentHash) + invoicesThatWillCancel = append(invoicesThatWillCancel, paymentHash) + if err != nil { + t.Fatal(err) + } + } + + // Check that they are really not canceled until before the clock is + // advanced. + for i := range invoicesThatWillCancel { + invoice, err := registry.LookupInvoice(invoicesThatWillCancel[i]) + if err != nil { + t.Fatalf("cannot find invoice: %v", err) + } + + if invoice.State == channeldb.ContractCanceled { + t.Fatalf("expected pending invoice, got canceled") + } + } + + // Fwd time 1 day. + testClock.SetTime(testTime.Add(24 * time.Hour)) + + // Give some time to the watcher to cancel everything. + time.Sleep(500 * time.Millisecond) + registry.Stop() + + // Create the expected cancellation set before the final check. + expectedCancellations = append( + expectedCancellations, invoicesThatWillCancel..., + ) + + // Retrospectively check that all invoices that were expected to be canceled + // are indeed canceled. + for i := range expectedCancellations { + invoice, err := registry.LookupInvoice(expectedCancellations[i]) + if err != nil { + t.Fatalf("cannot find invoice: %v", err) + } + + if invoice.State != channeldb.ContractCanceled { + t.Fatalf("expected canceled invoice, got: %v", invoice.State) + } } } diff --git a/invoices/resolution.go b/invoices/resolution.go new file mode 100644 index 0000000000..e6f3133036 --- /dev/null +++ b/invoices/resolution.go @@ -0,0 +1,125 @@ +package invoices + +import ( + "time" + + "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/lntypes" +) + +// HtlcResolution describes how an htlc should be resolved. +type HtlcResolution interface { + // CircuitKey returns the circuit key for the htlc that we have a + // resolution for. + CircuitKey() channeldb.CircuitKey +} + +// HtlcFailResolution is an implementation of the HtlcResolution interface +// which is returned when a htlc is failed. +type HtlcFailResolution struct { + // circuitKey is the key of the htlc for which we have a resolution. + circuitKey channeldb.CircuitKey + + // AcceptHeight is the original height at which the htlc was accepted. + AcceptHeight int32 + + // Outcome indicates the outcome of the invoice registry update. + Outcome FailResolutionResult +} + +// NewFailResolution returns a htlc failure resolution. +func NewFailResolution(key channeldb.CircuitKey, + acceptHeight int32, outcome FailResolutionResult) *HtlcFailResolution { + + return &HtlcFailResolution{ + circuitKey: key, + AcceptHeight: acceptHeight, + Outcome: outcome, + } +} + +// CircuitKey returns the circuit key for the htlc that we have a +// resolution for. +// +// Note: it is part of the HtlcResolution interface. +func (f *HtlcFailResolution) CircuitKey() channeldb.CircuitKey { + return f.circuitKey +} + +// HtlcSettleResolution is an implementation of the HtlcResolution interface +// which is returned when a htlc is settled. +type HtlcSettleResolution struct { + // Preimage is the htlc preimage. Its value is nil in case of a cancel. + Preimage lntypes.Preimage + + // circuitKey is the key of the htlc for which we have a resolution. + circuitKey channeldb.CircuitKey + + // acceptHeight is the original height at which the htlc was accepted. + AcceptHeight int32 + + // Outcome indicates the outcome of the invoice registry update. + Outcome SettleResolutionResult +} + +// NewSettleResolution returns a htlc resolution which is associated with a +// settle. +func NewSettleResolution(preimage lntypes.Preimage, + key channeldb.CircuitKey, acceptHeight int32, + outcome SettleResolutionResult) *HtlcSettleResolution { + + return &HtlcSettleResolution{ + Preimage: preimage, + circuitKey: key, + AcceptHeight: acceptHeight, + Outcome: outcome, + } +} + +// CircuitKey returns the circuit key for the htlc that we have a +// resolution for. +// +// Note: it is part of the HtlcResolution interface. +func (s *HtlcSettleResolution) CircuitKey() channeldb.CircuitKey { + return s.circuitKey +} + +// htlcAcceptResolution is an implementation of the HtlcResolution interface +// which is returned when a htlc is accepted. This struct is not exported +// because the codebase uses a nil resolution to indicate that a htlc was +// accepted. This struct is used internally in the invoice registry to +// surface accept resolution results. When an invoice update returns an +// acceptResolution, a nil resolution should be surfaced. +type htlcAcceptResolution struct { + // circuitKey is the key of the htlc for which we have a resolution. + circuitKey channeldb.CircuitKey + + // autoRelease signals that the htlc should be automatically released + // after a timeout. + autoRelease bool + + // acceptTime is the time at which this htlc was accepted. + acceptTime time.Time + + // outcome indicates the outcome of the invoice registry update. + outcome acceptResolutionResult +} + +// newAcceptResolution returns a htlc resolution which is associated with a +// htlc accept. +func newAcceptResolution(key channeldb.CircuitKey, + outcome acceptResolutionResult) *htlcAcceptResolution { + + return &htlcAcceptResolution{ + circuitKey: key, + outcome: outcome, + } +} + +// CircuitKey returns the circuit key for the htlc that we have a +// resolution for. +// +// Note: it is part of the HtlcResolution interface. +func (a *htlcAcceptResolution) CircuitKey() channeldb.CircuitKey { + return a.circuitKey +} diff --git a/invoices/resolution_result.go b/invoices/resolution_result.go new file mode 100644 index 0000000000..41da902a61 --- /dev/null +++ b/invoices/resolution_result.go @@ -0,0 +1,202 @@ +package invoices + +// acceptResolutionResult provides metadata which about a htlc that was +// accepted by the registry. +type acceptResolutionResult uint8 + +const ( + resultInvalidAccept acceptResolutionResult = iota + + // resultReplayToAccepted is returned when we replay an accepted + // invoice. + resultReplayToAccepted + + // resultDuplicateToAccepted is returned when we accept a duplicate + // htlc. + resultDuplicateToAccepted + + // resultAccepted is returned when we accept a hodl invoice. + resultAccepted + + // resultPartialAccepted is returned when we have partially received + // payment. + resultPartialAccepted +) + +// String returns a string representation of the result. +func (a acceptResolutionResult) String() string { + switch a { + case resultInvalidAccept: + return "invalid accept result" + + case resultReplayToAccepted: + return "replayed htlc to accepted invoice" + + case resultDuplicateToAccepted: + return "accepting duplicate payment to accepted invoice" + + case resultAccepted: + return "accepted" + + case resultPartialAccepted: + return "partial payment accepted" + + default: + return "unknown accept resolution result" + } +} + +// FailResolutionResult provides metadata about a htlc that was failed by +// the registry. It can be used to take custom actions on resolution of the +// htlc. +type FailResolutionResult uint8 + +const ( + resultInvalidFailure FailResolutionResult = iota + + // ResultReplayToCanceled is returned when we replay a canceled invoice. + ResultReplayToCanceled + + // ResultInvoiceAlreadyCanceled is returned when trying to pay an + // invoice that is already canceled. + ResultInvoiceAlreadyCanceled + + // ResultAmountTooLow is returned when an invoice is underpaid. + ResultAmountTooLow + + // ResultExpiryTooSoon is returned when we do not accept an invoice + // payment because it expires too soon. + ResultExpiryTooSoon + + // ResultCanceled is returned when we cancel an invoice and its + // associated htlcs. + ResultCanceled + + // ResultInvoiceNotOpen is returned when a mpp invoice is not open. + ResultInvoiceNotOpen + + // ResultMppTimeout is returned when an invoice paid with multiple + // partial payments times out before it is fully paid. + ResultMppTimeout + + // ResultAddressMismatch is returned when the payment address for a mpp + // invoice does not match. + ResultAddressMismatch + + // ResultHtlcSetTotalMismatch is returned when the amount paid by a + // htlc does not match its set total. + ResultHtlcSetTotalMismatch + + // ResultHtlcSetTotalTooLow is returned when a mpp set total is too low + // for an invoice. + ResultHtlcSetTotalTooLow + + // ResultHtlcSetOverpayment is returned when a mpp set is overpaid. + ResultHtlcSetOverpayment + + // ResultInvoiceNotFound is returned when an attempt is made to pay an + // invoice that is unknown to us. + ResultInvoiceNotFound + + // ResultKeySendError is returned when we receive invalid keysend + // parameters. + ResultKeySendError + + // ResultMppInProgress is returned when we are busy receiving a mpp + // payment. + ResultMppInProgress +) + +// FailureString returns a string representation of the result. +// +// Note: it is part of the FailureDetail interface. +func (f FailResolutionResult) FailureString() string { + switch f { + case resultInvalidFailure: + return "invalid failure result" + + case ResultReplayToCanceled: + return "replayed htlc to canceled invoice" + + case ResultInvoiceAlreadyCanceled: + return "invoice already canceled" + + case ResultAmountTooLow: + return "amount too low" + + case ResultExpiryTooSoon: + return "expiry too soon" + + case ResultCanceled: + return "canceled" + + case ResultInvoiceNotOpen: + return "invoice no longer open" + + case ResultMppTimeout: + return "mpp timeout" + + case ResultAddressMismatch: + return "payment address mismatch" + + case ResultHtlcSetTotalMismatch: + return "htlc total amt doesn't match set total" + + case ResultHtlcSetTotalTooLow: + return "set total too low for invoice" + + case ResultHtlcSetOverpayment: + return "mpp is overpaying set total" + + case ResultInvoiceNotFound: + return "invoice not found" + + case ResultKeySendError: + return "invalid keysend parameters" + + case ResultMppInProgress: + return "mpp reception in progress" + + default: + return "unknown failure resolution result" + } +} + +// SettleResolutionResult provides metadata which about a htlc that was failed +// by the registry. It can be used to take custom actions on resolution of the +// htlc. +type SettleResolutionResult uint8 + +const ( + resultInvalidSettle SettleResolutionResult = iota + + // ResultSettled is returned when we settle an invoice. + ResultSettled + + // ResultReplayToSettled is returned when we replay a settled invoice. + ResultReplayToSettled + + // ResultDuplicateToSettled is returned when we settle an invoice which + // has already been settled at least once. + ResultDuplicateToSettled +) + +// String returns a string representation of the result. +func (s SettleResolutionResult) String() string { + switch s { + case resultInvalidSettle: + return "invalid settle result" + + case ResultSettled: + return "settled" + + case ResultReplayToSettled: + return "replayed htlc to settled invoice" + + case ResultDuplicateToSettled: + return "accepting duplicate payment to settled invoice" + + default: + return "unknown settle resolution result" + } +} diff --git a/invoices/test_utils_test.go b/invoices/test_utils_test.go new file mode 100644 index 0000000000..cf0f14ea1b --- /dev/null +++ b/invoices/test_utils_test.go @@ -0,0 +1,288 @@ +package invoices + +import ( + "encoding/binary" + "encoding/hex" + "fmt" + "io/ioutil" + "os" + "runtime/pprof" + "testing" + "time" + + "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/chaincfg" + "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/clock" + "github.com/lightningnetwork/lnd/lntypes" + "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/record" + "github.com/lightningnetwork/lnd/zpay32" +) + +type mockPayload struct { + mpp *record.MPP + customRecords record.CustomSet +} + +func (p *mockPayload) MultiPath() *record.MPP { + return p.mpp +} + +func (p *mockPayload) CustomRecords() record.CustomSet { + // This function should always return a map instance, but for mock + // configuration we do accept nil. + if p.customRecords == nil { + return make(record.CustomSet) + } + + return p.customRecords +} + +var ( + testTimeout = 5 * time.Second + + testTime = time.Date(2018, time.February, 2, 14, 0, 0, 0, time.UTC) + + testInvoicePreimage = lntypes.Preimage{1} + + testInvoicePaymentHash = testInvoicePreimage.Hash() + + testHtlcExpiry = uint32(5) + + testInvoiceCltvDelta = uint32(4) + + testFinalCltvRejectDelta = int32(4) + + testCurrentHeight = int32(1) + + testPrivKeyBytes, _ = hex.DecodeString( + "e126f68f7eafcc8b74f54d269fe206be715000f94dac067d1c04a8ca3b2db734") + + testPrivKey, _ = btcec.PrivKeyFromBytes( + btcec.S256(), testPrivKeyBytes) + + testInvoiceDescription = "coffee" + + testInvoiceAmount = lnwire.MilliSatoshi(100000) + + testNetParams = &chaincfg.MainNetParams + + testMessageSigner = zpay32.MessageSigner{ + SignCompact: func(hash []byte) ([]byte, error) { + sig, err := btcec.SignCompact(btcec.S256(), testPrivKey, hash, true) + if err != nil { + return nil, fmt.Errorf("can't sign the message: %v", err) + } + return sig, nil + }, + } + + testFeatures = lnwire.NewFeatureVector( + nil, lnwire.Features, + ) + + testPayload = &mockPayload{} + + testInvoiceCreationDate = testTime +) + +var ( + testInvoiceAmt = lnwire.MilliSatoshi(100000) + testInvoice = &channeldb.Invoice{ + Terms: channeldb.ContractTerm{ + PaymentPreimage: testInvoicePreimage, + Value: testInvoiceAmt, + Expiry: time.Hour, + Features: testFeatures, + }, + CreationDate: testInvoiceCreationDate, + } + + testHodlInvoice = &channeldb.Invoice{ + Terms: channeldb.ContractTerm{ + PaymentPreimage: channeldb.UnknownPreimage, + Value: testInvoiceAmt, + Expiry: time.Hour, + Features: testFeatures, + }, + CreationDate: testInvoiceCreationDate, + } +) + +func newTestChannelDB(clock clock.Clock) (*channeldb.DB, func(), error) { + // First, create a temporary directory to be used for the duration of + // this test. + tempDirName, err := ioutil.TempDir("", "channeldb") + if err != nil { + return nil, nil, err + } + + // Next, create channeldb for the first time. + cdb, err := channeldb.Open( + tempDirName, channeldb.OptionClock(clock), + ) + if err != nil { + os.RemoveAll(tempDirName) + return nil, nil, err + } + + cleanUp := func() { + cdb.Close() + os.RemoveAll(tempDirName) + } + + return cdb, cleanUp, nil +} + +type testContext struct { + cdb *channeldb.DB + registry *InvoiceRegistry + clock *clock.TestClock + + cleanup func() + t *testing.T +} + +func newTestContext(t *testing.T) *testContext { + clock := clock.NewTestClock(testTime) + + cdb, cleanup, err := newTestChannelDB(clock) + if err != nil { + t.Fatal(err) + } + + expiryWatcher := NewInvoiceExpiryWatcher(clock) + + // Instantiate and start the invoice ctx.registry. + cfg := RegistryConfig{ + FinalCltvRejectDelta: testFinalCltvRejectDelta, + HtlcHoldDuration: 30 * time.Second, + Clock: clock, + } + registry := NewRegistry(cdb, expiryWatcher, &cfg) + + err = registry.Start() + if err != nil { + cleanup() + t.Fatal(err) + } + + ctx := testContext{ + cdb: cdb, + registry: registry, + clock: clock, + t: t, + cleanup: func() { + registry.Stop() + cleanup() + }, + } + + return &ctx +} + +func getCircuitKey(htlcID uint64) channeldb.CircuitKey { + return channeldb.CircuitKey{ + ChanID: lnwire.ShortChannelID{ + BlockHeight: 1, TxIndex: 2, TxPosition: 3, + }, + HtlcID: htlcID, + } +} + +func newTestInvoice(t *testing.T, preimage lntypes.Preimage, + timestamp time.Time, expiry time.Duration) *channeldb.Invoice { + + if expiry == 0 { + expiry = time.Hour + } + + rawInvoice, err := zpay32.NewInvoice( + testNetParams, + preimage.Hash(), + timestamp, + zpay32.Amount(testInvoiceAmount), + zpay32.Description(testInvoiceDescription), + zpay32.Expiry(expiry)) + + if err != nil { + t.Fatalf("Error while creating new invoice: %v", err) + } + + paymentRequest, err := rawInvoice.Encode(testMessageSigner) + + if err != nil { + t.Fatalf("Error while encoding payment request: %v", err) + } + + return &channeldb.Invoice{ + Terms: channeldb.ContractTerm{ + PaymentPreimage: preimage, + Value: testInvoiceAmount, + Expiry: expiry, + Features: testFeatures, + }, + PaymentRequest: []byte(paymentRequest), + CreationDate: timestamp, + } +} + +// timeout implements a test level timeout. +func timeout() func() { + done := make(chan struct{}) + + go func() { + select { + case <-time.After(5 * time.Second): + err := pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) + if err != nil { + panic(fmt.Sprintf("error writing to std out after timeout: %v", err)) + } + panic("timeout") + case <-done: + } + }() + + return func() { + close(done) + } +} + +// invoiceExpiryTestData simply holds generated expired and pending invoices. +type invoiceExpiryTestData struct { + expiredInvoices map[lntypes.Hash]*channeldb.Invoice + pendingInvoices map[lntypes.Hash]*channeldb.Invoice +} + +// generateInvoiceExpiryTestData generates the specified number of fake expired +// and pending invoices anchored to the passed now timestamp. +func generateInvoiceExpiryTestData( + t *testing.T, now time.Time, + offset, numExpired, numPending int) invoiceExpiryTestData { + + var testData invoiceExpiryTestData + + testData.expiredInvoices = make(map[lntypes.Hash]*channeldb.Invoice) + testData.pendingInvoices = make(map[lntypes.Hash]*channeldb.Invoice) + + expiredCreationDate := now.Add(-24 * time.Hour) + + for i := 1; i <= numExpired; i++ { + var preimage lntypes.Preimage + binary.BigEndian.PutUint32(preimage[:4], uint32(offset+i)) + expiry := time.Duration((i+offset)%24) * time.Hour + invoice := newTestInvoice(t, preimage, expiredCreationDate, expiry) + testData.expiredInvoices[preimage.Hash()] = invoice + } + + for i := 1; i <= numPending; i++ { + var preimage lntypes.Preimage + binary.BigEndian.PutUint32(preimage[4:], uint32(offset+i)) + expiry := time.Duration((i+offset)%24) * time.Hour + invoice := newTestInvoice(t, preimage, now, expiry) + testData.pendingInvoices[preimage.Hash()] = invoice + } + + return testData +} diff --git a/invoices/update.go b/invoices/update.go new file mode 100644 index 0000000000..3226779ce1 --- /dev/null +++ b/invoices/update.go @@ -0,0 +1,285 @@ +package invoices + +import ( + "errors" + + "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/lntypes" + "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/record" +) + +// invoiceUpdateCtx is an object that describes the context for the invoice +// update to be carried out. +type invoiceUpdateCtx struct { + hash lntypes.Hash + circuitKey channeldb.CircuitKey + amtPaid lnwire.MilliSatoshi + expiry uint32 + currentHeight int32 + finalCltvRejectDelta int32 + customRecords record.CustomSet + mpp *record.MPP +} + +// log logs a message specific to this update context. +func (i *invoiceUpdateCtx) log(s string) { + log.Debugf("Invoice(%x): %v, amt=%v, expiry=%v, circuit=%v, mpp=%v", + i.hash[:], s, i.amtPaid, i.expiry, i.circuitKey, i.mpp) +} + +// failRes is a helper function which creates a failure resolution with +// the information contained in the invoiceUpdateCtx and the fail resolution +// result provided. +func (i invoiceUpdateCtx) failRes(outcome FailResolutionResult) *HtlcFailResolution { + return NewFailResolution(i.circuitKey, i.currentHeight, outcome) +} + +// settleRes is a helper function which creates a settle resolution with +// the information contained in the invoiceUpdateCtx and the preimage and +// the settle resolution result provided. +func (i invoiceUpdateCtx) settleRes(preimage lntypes.Preimage, + outcome SettleResolutionResult) *HtlcSettleResolution { + + return NewSettleResolution( + preimage, i.circuitKey, i.currentHeight, outcome, + ) +} + +// acceptRes is a helper function which creates an accept resolution with +// the information contained in the invoiceUpdateCtx and the accept resolution +// result provided. +func (i invoiceUpdateCtx) acceptRes(outcome acceptResolutionResult) *htlcAcceptResolution { + return newAcceptResolution(i.circuitKey, outcome) +} + +// updateInvoice is a callback for DB.UpdateInvoice that contains the invoice +// settlement logic. It returns a hltc resolution that indicates what the +// outcome of the update was. +func updateInvoice(ctx *invoiceUpdateCtx, inv *channeldb.Invoice) ( + *channeldb.InvoiceUpdateDesc, HtlcResolution, error) { + + // Don't update the invoice when this is a replayed htlc. + htlc, ok := inv.Htlcs[ctx.circuitKey] + if ok { + switch htlc.State { + case channeldb.HtlcStateCanceled: + return nil, ctx.failRes(ResultReplayToCanceled), nil + + case channeldb.HtlcStateAccepted: + return nil, ctx.acceptRes(resultReplayToAccepted), nil + + case channeldb.HtlcStateSettled: + return nil, ctx.settleRes( + inv.Terms.PaymentPreimage, + ResultReplayToSettled, + ), nil + + default: + return nil, nil, errors.New("unknown htlc state") + } + } + + if ctx.mpp == nil { + return updateLegacy(ctx, inv) + } + + return updateMpp(ctx, inv) +} + +// updateMpp is a callback for DB.UpdateInvoice that contains the invoice +// settlement logic for mpp payments. +func updateMpp(ctx *invoiceUpdateCtx, + inv *channeldb.Invoice) (*channeldb.InvoiceUpdateDesc, + HtlcResolution, error) { + + // Start building the accept descriptor. + acceptDesc := &channeldb.HtlcAcceptDesc{ + Amt: ctx.amtPaid, + Expiry: ctx.expiry, + AcceptHeight: ctx.currentHeight, + MppTotalAmt: ctx.mpp.TotalMsat(), + CustomRecords: ctx.customRecords, + } + + // Only accept payments to open invoices. This behaviour differs from + // non-mpp payments that are accepted even after the invoice is settled. + // Because non-mpp payments don't have a payment address, this is needed + // to thwart probing. + if inv.State != channeldb.ContractOpen { + return nil, ctx.failRes(ResultInvoiceNotOpen), nil + } + + // Check the payment address that authorizes the payment. + if ctx.mpp.PaymentAddr() != inv.Terms.PaymentAddr { + return nil, ctx.failRes(ResultAddressMismatch), nil + } + + // Don't accept zero-valued sets. + if ctx.mpp.TotalMsat() == 0 { + return nil, ctx.failRes(ResultHtlcSetTotalTooLow), nil + } + + // Check that the total amt of the htlc set is high enough. In case this + // is a zero-valued invoice, it will always be enough. + if ctx.mpp.TotalMsat() < inv.Terms.Value { + return nil, ctx.failRes(ResultHtlcSetTotalTooLow), nil + } + + // Check whether total amt matches other htlcs in the set. + var newSetTotal lnwire.MilliSatoshi + for _, htlc := range inv.Htlcs { + // Only consider accepted mpp htlcs. It is possible that there + // are htlcs registered in the invoice database that previously + // timed out and are in the canceled state now. + if htlc.State != channeldb.HtlcStateAccepted { + continue + } + + if ctx.mpp.TotalMsat() != htlc.MppTotalAmt { + return nil, ctx.failRes(ResultHtlcSetTotalMismatch), nil + } + + newSetTotal += htlc.Amt + } + + // Add amount of new htlc. + newSetTotal += ctx.amtPaid + + // Make sure the communicated set total isn't overpaid. + if newSetTotal > ctx.mpp.TotalMsat() { + return nil, ctx.failRes(ResultHtlcSetOverpayment), nil + } + + // The invoice is still open. Check the expiry. + if ctx.expiry < uint32(ctx.currentHeight+ctx.finalCltvRejectDelta) { + return nil, ctx.failRes(ResultExpiryTooSoon), nil + } + + if ctx.expiry < uint32(ctx.currentHeight+inv.Terms.FinalCltvDelta) { + return nil, ctx.failRes(ResultExpiryTooSoon), nil + } + + // Record HTLC in the invoice database. + newHtlcs := map[channeldb.CircuitKey]*channeldb.HtlcAcceptDesc{ + ctx.circuitKey: acceptDesc, + } + + update := channeldb.InvoiceUpdateDesc{ + AddHtlcs: newHtlcs, + } + + // If the invoice cannot be settled yet, only record the htlc. + setComplete := newSetTotal == ctx.mpp.TotalMsat() + if !setComplete { + return &update, ctx.acceptRes(resultPartialAccepted), nil + } + + // Check to see if we can settle or this is an hold invoice and + // we need to wait for the preimage. + holdInvoice := inv.Terms.PaymentPreimage == channeldb.UnknownPreimage + if holdInvoice { + update.State = &channeldb.InvoiceStateUpdateDesc{ + NewState: channeldb.ContractAccepted, + } + return &update, ctx.acceptRes(resultAccepted), nil + } + + update.State = &channeldb.InvoiceStateUpdateDesc{ + NewState: channeldb.ContractSettled, + Preimage: inv.Terms.PaymentPreimage, + } + + return &update, ctx.settleRes( + inv.Terms.PaymentPreimage, ResultSettled, + ), nil +} + +// updateLegacy is a callback for DB.UpdateInvoice that contains the invoice +// settlement logic for legacy payments. +func updateLegacy(ctx *invoiceUpdateCtx, + inv *channeldb.Invoice) (*channeldb.InvoiceUpdateDesc, HtlcResolution, error) { + + // If the invoice is already canceled, there is no further + // checking to do. + if inv.State == channeldb.ContractCanceled { + return nil, ctx.failRes(ResultInvoiceAlreadyCanceled), nil + } + + // If an invoice amount is specified, check that enough is paid. Also + // check this for duplicate payments if the invoice is already settled + // or accepted. In case this is a zero-valued invoice, it will always be + // enough. + if ctx.amtPaid < inv.Terms.Value { + return nil, ctx.failRes(ResultAmountTooLow), nil + } + + // TODO(joostjager): Check invoice mpp required feature + // bit when feature becomes mandatory. + + // Don't allow settling the invoice with an old style + // htlc if we are already in the process of gathering an + // mpp set. + for _, htlc := range inv.Htlcs { + if htlc.State == channeldb.HtlcStateAccepted && + htlc.MppTotalAmt > 0 { + + return nil, ctx.failRes(ResultMppInProgress), nil + } + } + + // The invoice is still open. Check the expiry. + if ctx.expiry < uint32(ctx.currentHeight+ctx.finalCltvRejectDelta) { + return nil, ctx.failRes(ResultExpiryTooSoon), nil + } + + if ctx.expiry < uint32(ctx.currentHeight+inv.Terms.FinalCltvDelta) { + return nil, ctx.failRes(ResultExpiryTooSoon), nil + } + + // Record HTLC in the invoice database. + newHtlcs := map[channeldb.CircuitKey]*channeldb.HtlcAcceptDesc{ + ctx.circuitKey: { + Amt: ctx.amtPaid, + Expiry: ctx.expiry, + AcceptHeight: ctx.currentHeight, + CustomRecords: ctx.customRecords, + }, + } + + update := channeldb.InvoiceUpdateDesc{ + AddHtlcs: newHtlcs, + } + + // Don't update invoice state if we are accepting a duplicate payment. + // We do accept or settle the HTLC. + switch inv.State { + case channeldb.ContractAccepted: + return &update, ctx.acceptRes(resultDuplicateToAccepted), nil + + case channeldb.ContractSettled: + return &update, ctx.settleRes( + inv.Terms.PaymentPreimage, ResultDuplicateToSettled, + ), nil + } + + // Check to see if we can settle or this is an hold invoice and we need + // to wait for the preimage. + holdInvoice := inv.Terms.PaymentPreimage == channeldb.UnknownPreimage + if holdInvoice { + update.State = &channeldb.InvoiceStateUpdateDesc{ + NewState: channeldb.ContractAccepted, + } + + return &update, ctx.acceptRes(resultAccepted), nil + } + + update.State = &channeldb.InvoiceStateUpdateDesc{ + NewState: channeldb.ContractSettled, + Preimage: inv.Terms.PaymentPreimage, + } + + return &update, ctx.settleRes( + inv.Terms.PaymentPreimage, ResultSettled, + ), nil +} diff --git a/invoices/utils_test.go b/invoices/utils_test.go deleted file mode 100644 index f33a93e83b..0000000000 --- a/invoices/utils_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package invoices - -import ( - "os" - "runtime/pprof" - "testing" - "time" -) - -// timeout implements a test level timeout. -func timeout(t *testing.T) func() { - done := make(chan struct{}) - go func() { - select { - case <-time.After(5 * time.Second): - pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) - - panic("test timeout") - case <-done: - } - }() - - return func() { - close(done) - } -} diff --git a/keychain/interface_test.go b/keychain/interface_test.go index a857185588..345754db50 100644 --- a/keychain/interface_test.go +++ b/keychain/interface_test.go @@ -11,6 +11,7 @@ import ( "github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcwallet/snacl" "github.com/btcsuite/btcwallet/waddrmgr" "github.com/btcsuite/btcwallet/wallet" "github.com/btcsuite/btcwallet/walletdb" @@ -41,6 +42,19 @@ var ( ) func createTestBtcWallet(coinType uint32) (func(), *wallet.Wallet, error) { + // Instruct waddrmgr to use the cranked down scrypt parameters when + // creating new wallet encryption keys. + fastScrypt := waddrmgr.FastScryptOptions + keyGen := func(passphrase *[]byte, config *waddrmgr.ScryptOptions) ( + *snacl.SecretKey, error) { + + return snacl.NewSecretKey( + passphrase, fastScrypt.N, fastScrypt.R, fastScrypt.P, + ) + } + waddrmgr.SetSecretKeyGen(keyGen) + + // Create a new test wallet that uses fast scrypt as KDF. tempDir, err := ioutil.TempDir("", "keyring-lnwallet") if err != nil { return nil, nil, err diff --git a/lncfg/address.go b/lncfg/address.go index 5a5a07fd49..7fe2d71d55 100644 --- a/lncfg/address.go +++ b/lncfg/address.go @@ -1,13 +1,13 @@ package lncfg import ( + "context" "crypto/tls" "encoding/hex" "fmt" "net" "strconv" "strings" - "time" "github.com/btcsuite/btcd/btcec" "github.com/lightningnetwork/lnd/lnwire" @@ -274,8 +274,10 @@ func verifyPort(address string, defaultPort string) string { // ClientAddressDialer creates a gRPC dialer that can also dial unix socket // addresses instead of just TCP addresses. -func ClientAddressDialer(defaultPort string) func(string, time.Duration) (net.Conn, error) { - return func(addr string, timeout time.Duration) (net.Conn, error) { +func ClientAddressDialer(defaultPort string) func(context.Context, + string) (net.Conn, error) { + + return func(ctx context.Context, addr string) (net.Conn, error) { parsedAddr, err := ParseAddressString( addr, defaultPort, net.ResolveTCPAddr, ) @@ -283,8 +285,9 @@ func ClientAddressDialer(defaultPort string) func(string, time.Duration) (net.Co return nil, err } - return net.DialTimeout( - parsedAddr.Network(), parsedAddr.String(), timeout, + d := net.Dialer{} + return d.DialContext( + ctx, parsedAddr.Network(), parsedAddr.String(), ) } } diff --git a/lncfg/protocol_legacy_off.go b/lncfg/protocol_legacy_off.go index 7e1e47d323..bd589c2451 100644 --- a/lncfg/protocol_legacy_off.go +++ b/lncfg/protocol_legacy_off.go @@ -2,21 +2,27 @@ package lncfg -// LegacyProtocol is a struct that we use to be able to test backwards +// ProtocolOptions is a struct that we use to be able to test backwards // compatibility of protocol additions, while defaulting to the latest within -// lnd. -type LegacyProtocol struct { +// lnd, or to enable experimental protocol changes. +type ProtocolOptions struct { } // LegacyOnion returns true if the old legacy onion format should be used when // we're an intermediate or final hop. This controls if we set the // TLVOnionPayloadOptional bit or not. -func (l *LegacyProtocol) LegacyOnion() bool { +func (l *ProtocolOptions) LegacyOnion() bool { return false } -// LegacyOnion returns true if the old commitment format should be used for new -// funded channels. -func (l *LegacyProtocol) LegacyCommitment() bool { +// NoStaticRemoteKey returns true if the old commitment format with a tweaked +// remote key should be used for new funded channels. +func (l *ProtocolOptions) NoStaticRemoteKey() bool { + return false +} + +// AnchorCommitments returns true if support for the the anchor commitment type +// should be signaled. +func (l *ProtocolOptions) AnchorCommitments() bool { return false } diff --git a/lncfg/protocol_legacy_on.go b/lncfg/protocol_legacy_on.go index d384adc8a9..ac388f0200 100644 --- a/lncfg/protocol_legacy_on.go +++ b/lncfg/protocol_legacy_on.go @@ -2,31 +2,41 @@ package lncfg -// LegacyProtocol is a struct that we use to be able to test backwards +// ProtocolOptions is a struct that we use to be able to test backwards // compatibility of protocol additions, while defaulting to the latest within -// lnd. -type LegacyProtocol struct { - // Onion if set to true, then we won't signal TLVOnionPayloadOptional. - // As a result, nodes that include us in the route won't use the new - // modern onion framing. - Onion bool `long:"onion" description:"force node to not advertise the new modern TLV onion format"` +// lnd, or to enable experimental protocol changes. +type ProtocolOptions struct { + // LegacyOnionFormat if set to true, then we won't signal + // TLVOnionPayloadOptional. As a result, nodes that include us in the + // route won't use the new modern onion framing. + LegacyOnionFormat bool `long:"legacyonion" description:"force node to not advertise the new modern TLV onion format"` // CommitmentTweak guards if we should use the old legacy commitment // protocol, or the newer variant that doesn't have a tweak for the // remote party's output in the commitment. If set to true, then we // won't signal StaticRemoteKeyOptional. CommitmentTweak bool `long:"committweak" description:"force node to not advertise the new commitment format"` + + // Anchors should be set if we want to support opening or accepting + // channels having the anchor commitment type. + Anchors bool `long:"anchors" description:"EXPERIMENTAL: enable experimental support for anchor commitments, won't work with watchtowers"` } // LegacyOnion returns true if the old legacy onion format should be used when // we're an intermediate or final hop. This controls if we set the // TLVOnionPayloadOptional bit or not. -func (l *LegacyProtocol) LegacyOnion() bool { - return l.Onion +func (l *ProtocolOptions) LegacyOnion() bool { + return l.LegacyOnionFormat } -// LegacyOnion returns true if the old commitment format should be used for new -// funded channels. -func (l *LegacyProtocol) LegacyCommitment() bool { +// NoStaticRemoteKey returns true if the old commitment format with a tweaked +// remote key should be used for new funded channels. +func (l *ProtocolOptions) NoStaticRemoteKey() bool { return l.CommitmentTweak } + +// AnchorCommitments returns true if support for the anchor commitment type +// should be signaled. +func (l *ProtocolOptions) AnchorCommitments() bool { + return l.Anchors +} diff --git a/lnd.go b/lnd.go index 51f2ca3b63..6dae099323 100644 --- a/lnd.go +++ b/lnd.go @@ -5,18 +5,10 @@ package lnd import ( - "bytes" "context" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" "crypto/tls" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" "fmt" "io/ioutil" - "math/big" "net" "net/http" "os" @@ -30,6 +22,7 @@ import ( _ "net/http/pprof" "gopkg.in/macaroon-bakery.v2/bakery" + "gopkg.in/macaroon.v2" "google.golang.org/grpc" "google.golang.org/grpc/credentials" @@ -42,6 +35,7 @@ import ( "github.com/lightningnetwork/lnd/autopilot" "github.com/lightningnetwork/lnd/build" + "github.com/lightningnetwork/lnd/cert" "github.com/lightningnetwork/lnd/chanacceptor" "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/keychain" @@ -51,16 +45,12 @@ import ( "github.com/lightningnetwork/lnd/lnwallet/btcwallet" "github.com/lightningnetwork/lnd/macaroons" "github.com/lightningnetwork/lnd/signal" + "github.com/lightningnetwork/lnd/tor" "github.com/lightningnetwork/lnd/walletunlocker" "github.com/lightningnetwork/lnd/watchtower" "github.com/lightningnetwork/lnd/watchtower/wtdb" ) -const ( - // Make certificate valid for 14 months. - autogenCertValidity = 14 /*months*/ * 30 /*days*/ * 24 * time.Hour -) - var ( cfg *config registeredChains = newChainRegistry() @@ -69,53 +59,101 @@ var ( // network. This path will hold the files related to each different // network. networkDir string +) + +// WalletUnlockerAuthOptions returns a list of DialOptions that can be used to +// authenticate with the wallet unlocker service. +// +// NOTE: This should only be called after the WalletUnlocker listener has +// signaled it is ready. +func WalletUnlockerAuthOptions() ([]grpc.DialOption, error) { + creds, err := credentials.NewClientTLSFromFile(cfg.TLSCertPath, "") + if err != nil { + return nil, fmt.Errorf("unable to read TLS cert: %v", err) + } - // End of ASN.1 time. - endOfTime = time.Date(2049, 12, 31, 23, 59, 59, 0, time.UTC) - - // Max serial number. - serialNumberLimit = new(big.Int).Lsh(big.NewInt(1), 128) - - /* - * These cipher suites fit the following criteria: - * - Don't use outdated algorithms like SHA-1 and 3DES - * - Don't use ECB mode or other insecure symmetric methods - * - Included in the TLS v1.2 suite - * - Are available in the Go 1.7.6 standard library (more are - * available in 1.8.3 and will be added after lnd no longer - * supports 1.7, including suites that support CBC mode) - **/ - tlsCipherSuites = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, + // Create a dial options array with the TLS credentials. + opts := []grpc.DialOption{ + grpc.WithTransportCredentials(creds), + } + + return opts, nil +} + +// AdminAuthOptions returns a list of DialOptions that can be used to +// authenticate with the RPC server with admin capabilities. +// +// NOTE: This should only be called after the RPCListener has signaled it is +// ready. +func AdminAuthOptions() ([]grpc.DialOption, error) { + creds, err := credentials.NewClientTLSFromFile(cfg.TLSCertPath, "") + if err != nil { + return nil, fmt.Errorf("unable to read TLS cert: %v", err) + } + + // Create a dial options array. + opts := []grpc.DialOption{ + grpc.WithTransportCredentials(creds), } -) + + // Get the admin macaroon if macaroons are active. + if !cfg.NoMacaroons { + // Load the adming macaroon file. + macBytes, err := ioutil.ReadFile(cfg.AdminMacPath) + if err != nil { + return nil, fmt.Errorf("unable to read macaroon "+ + "path (check the network setting!): %v", err) + } + + mac := &macaroon.Macaroon{} + if err = mac.UnmarshalBinary(macBytes); err != nil { + return nil, fmt.Errorf("unable to decode macaroon: %v", + err) + } + + // Now we append the macaroon credentials to the dial options. + cred := macaroons.NewMacaroonCredential(mac) + opts = append(opts, grpc.WithPerRPCCredentials(cred)) + } + + return opts, nil +} + +// ListenerWithSignal is a net.Listener that has an additional Ready channel that +// will be closed when a server starts listening. +type ListenerWithSignal struct { + net.Listener + + // Ready will be closed by the server listening on Listener. + Ready chan struct{} +} // ListenerCfg is a wrapper around custom listeners that can be passed to lnd // when calling its main method. type ListenerCfg struct { // WalletUnlocker can be set to the listener to use for the wallet // unlocker. If nil a regular network listener will be created. - WalletUnlocker net.Listener + WalletUnlocker *ListenerWithSignal // RPCListener can be set to the listener to use for the RPC server. If // nil a regular network listener will be created. - RPCListener net.Listener + RPCListener *ListenerWithSignal } // rpcListeners is a function type used for closures that fetches a set of RPC -// listeners for the current configuration, and the GRPC server options to use -// with these listeners. If no custom listeners are present, this should return -// normal listeners from the RPC endpoints defined in the config, and server -// options specifying TLS. -type rpcListeners func() ([]net.Listener, func(), []grpc.ServerOption, error) +// listeners for the current configuration. If no custom listeners are present, +// this should return normal listeners from the RPC endpoints defined in the +// config. The second return value us a closure that will close the fetched +// listeners. +type rpcListeners func() ([]*ListenerWithSignal, func(), error) // Main is the true entry point for lnd. This function is required since defers // created in the top-level scope of a main method aren't executed if os.Exit() // is called. func Main(lisCfg ListenerCfg) error { + // Hook interceptor for os signals. + signal.Intercept() + // Load the configuration, and parse any command line options. This // function will also set up logging properly. loadedConfig, err := loadConfig() @@ -124,15 +162,17 @@ func Main(lisCfg ListenerCfg) error { } cfg = loadedConfig defer func() { - if logRotator != nil { - ltndLog.Info("Shutdown complete") - logRotator.Close() + ltndLog.Info("Shutdown complete") + err := logWriter.Close() + if err != nil { + ltndLog.Errorf("Could not close log rotator: %v", err) } }() // Show version at startup. - ltndLog.Infof("Version: %s, build=%s, logging=%s", - build.Version(), build.Deployment, build.LoggingType) + ltndLog.Infof("Version: %s commit=%s, build=%s, logging=%s", + build.Version(), build.Commit, build.Deployment, + build.LoggingType) var network string switch { @@ -184,8 +224,12 @@ func Main(lisCfg ListenerCfg) error { defaultGraphSubDirname, normalizeNetwork(activeNetParams.Name)) + ltndLog.Infof("Opening the main database, this might take a few " + + "minutes...") + // Open the channeldb, which is dedicated to storing channel, and // network related metadata. + startOpenTime := time.Now() chanDB, err := channeldb.Open( graphDir, channeldb.OptionSetRejectCacheSize(cfg.Caches.RejectCacheSize), @@ -199,6 +243,9 @@ func Main(lisCfg ListenerCfg) error { } defer chanDB.Close() + openTime := time.Since(startOpenTime) + ltndLog.Infof("Database now open (time_to_open=%v)!", openTime) + // Only process macaroons if --no-macaroons isn't set. ctx := context.Background() ctx, cancel := context.WithCancel(ctx) @@ -220,10 +267,12 @@ func Main(lisCfg ListenerCfg) error { // For our REST dial options, we'll still use TLS, but also increase // the max message size that we'll decode to allow clients to hit // endpoints which return more data such as the DescribeGraph call. + // We set this to 200MiB atm. Should be the same value as maxMsgRecvSize + // in cmd/lncli/main.go. restDialOpts := []grpc.DialOption{ grpc.WithTransportCredentials(*restCreds), grpc.WithDefaultCallOptions( - grpc.MaxCallRecvMsgSize(1 * 1024 * 1024 * 50), + grpc.MaxCallRecvMsgSize(1 * 1024 * 1024 * 200), ), } @@ -263,10 +312,8 @@ func Main(lisCfg ListenerCfg) error { // getListeners is a closure that creates listeners from the // RPCListeners defined in the config. It also returns a cleanup // closure and the server options to use for the GRPC server. - getListeners := func() ([]net.Listener, func(), []grpc.ServerOption, - error) { - - var grpcListeners []net.Listener + getListeners := func() ([]*ListenerWithSignal, func(), error) { + var grpcListeners []*ListenerWithSignal for _, grpcEndpoint := range cfg.RPCListeners { // Start a gRPC server listening for HTTP/2 // connections. @@ -274,9 +321,13 @@ func Main(lisCfg ListenerCfg) error { if err != nil { ltndLog.Errorf("unable to listen on %s", grpcEndpoint) - return nil, nil, nil, err + return nil, nil, err } - grpcListeners = append(grpcListeners, lis) + grpcListeners = append( + grpcListeners, &ListenerWithSignal{ + Listener: lis, + Ready: make(chan struct{}), + }) } cleanup := func() { @@ -284,23 +335,20 @@ func Main(lisCfg ListenerCfg) error { lis.Close() } } - return grpcListeners, cleanup, serverOpts, nil + return grpcListeners, cleanup, nil } // walletUnlockerListeners is a closure we'll hand to the wallet // unlocker, that will be called when it needs listeners for its GPRC // server. - walletUnlockerListeners := func() ([]net.Listener, func(), - []grpc.ServerOption, error) { + walletUnlockerListeners := func() ([]*ListenerWithSignal, func(), + error) { // If we have chosen to start with a dedicated listener for the - // wallet unlocker, we return it directly, and empty server - // options to deactivate TLS. - // TODO(halseth): any point in adding TLS support for custom - // listeners? + // wallet unlocker, we return it directly. if lisCfg.WalletUnlocker != nil { - return []net.Listener{lisCfg.WalletUnlocker}, func() {}, - []grpc.ServerOption{}, nil + return []*ListenerWithSignal{lisCfg.WalletUnlocker}, + func() {}, nil } // Otherwise we'll return the regular listeners. @@ -312,8 +360,8 @@ func Main(lisCfg ListenerCfg) error { // for wallet encryption. if !cfg.NoSeedBackup { params, err := waitForWalletPassword( - cfg.RESTListeners, restDialOpts, restProxyDest, tlsCfg, - walletUnlockerListeners, + cfg.RESTListeners, serverOpts, restDialOpts, + restProxyDest, tlsCfg, walletUnlockerListeners, ) if err != nil { err := fmt.Errorf("Unable to set up wallet password "+ @@ -427,6 +475,28 @@ func Main(lisCfg ListenerCfg) error { defer towerClientDB.Close() } + // If tor is active and either v2 or v3 onion services have been specified, + // make a tor controller and pass it into both the watchtower server and + // the regular lnd server. + var torController *tor.Controller + if cfg.Tor.Active && (cfg.Tor.V2 || cfg.Tor.V3) { + torController = tor.NewController( + cfg.Tor.Control, cfg.Tor.TargetIPAddress, cfg.Tor.Password, + ) + + // Start the tor controller before giving it to any other subsystems. + if err := torController.Start(); err != nil { + err := fmt.Errorf("unable to initialize tor controller: %v", err) + ltndLog.Error(err) + return err + } + defer func() { + if err := torController.Stop(); err != nil { + ltndLog.Errorf("error stopping tor controller: %v", err) + } + }() + } + var tower *watchtower.Standalone if cfg.Watchtower.Active { // Segment the watchtower directory by chain and network. @@ -460,7 +530,7 @@ func Main(lisCfg ListenerCfg) error { return err } - wtConfig, err := cfg.Watchtower.Apply(&watchtower.Config{ + wtCfg := &watchtower.Config{ BlockFetcher: activeChainControl.chainIO, DB: towerDB, EpochRegistrar: activeChainControl.chainNotifier, @@ -473,7 +543,23 @@ func Main(lisCfg ListenerCfg) error { NodePrivKey: towerPrivKey, PublishTx: activeChainControl.wallet.PublishTransaction, ChainHash: *activeNetParams.GenesisHash, - }, lncfg.NormalizeAddresses) + } + + // If there is a tor controller (user wants auto hidden services), then + // store a pointer in the watchtower config. + if torController != nil { + wtCfg.TorController = torController + wtCfg.WatchtowerKeyPath = cfg.Tor.WatchtowerKeyPath + + switch { + case cfg.Tor.V2: + wtCfg.Type = tor.V2 + case cfg.Tor.V3: + wtCfg.Type = tor.V3 + } + } + + wtConfig, err := cfg.Watchtower.Apply(wtCfg, lncfg.NormalizeAddresses) if err != nil { err := fmt.Errorf("Unable to configure watchtower: %v", err) @@ -497,6 +583,7 @@ func Main(lisCfg ListenerCfg) error { server, err := newServer( cfg.Listeners, chanDB, towerClientDB, activeChainControl, idPrivKey, walletInitParams.ChansToRestore, chainedAcceptor, + torController, ) if err != nil { err := fmt.Errorf("Unable to create server: %v", err) @@ -507,7 +594,7 @@ func Main(lisCfg ListenerCfg) error { // Set up an autopilot manager from the current config. This will be // used to manage the underlying autopilot agent, starting and stopping // it at will. - atplCfg, err := initAutoPilot(server, cfg.Autopilot) + atplCfg, err := initAutoPilot(server, cfg.Autopilot, mainChain) if err != nil { err := fmt.Errorf("Unable to initialize autopilot: %v", err) ltndLog.Error(err) @@ -529,17 +616,12 @@ func Main(lisCfg ListenerCfg) error { // rpcListeners is a closure we'll hand to the rpc server, that will be // called when it needs listeners for its GPRC server. - rpcListeners := func() ([]net.Listener, func(), []grpc.ServerOption, - error) { - + rpcListeners := func() ([]*ListenerWithSignal, func(), error) { // If we have chosen to start with a dedicated listener for the - // rpc server, we return it directly, and empty server options - // to deactivate TLS. - // TODO(halseth): any point in adding TLS support for custom - // listeners? + // rpc server, we return it directly. if lisCfg.RPCListener != nil { - return []net.Listener{lisCfg.RPCListener}, func() {}, - []grpc.ServerOption{}, nil + return []*ListenerWithSignal{lisCfg.RPCListener}, + func() {}, nil } // Otherwise we'll return the regular listeners. @@ -549,9 +631,9 @@ func Main(lisCfg ListenerCfg) error { // Initialize, and register our implementation of the gRPC interface // exported by the rpcServer. rpcServer, err := newRPCServer( - server, macaroonService, cfg.SubRPCServers, restDialOpts, - restProxyDest, atplManager, server.invoices, tower, tlsCfg, - rpcListeners, chainedAcceptor, + server, macaroonService, cfg.SubRPCServers, serverOpts, + restDialOpts, restProxyDest, atplManager, server.invoices, + tower, tlsCfg, rpcListeners, chainedAcceptor, ) if err != nil { err := fmt.Errorf("Unable to create RPC server: %v", err) @@ -657,29 +739,44 @@ func getTLSConfig(tlsCertPath string, tlsKeyPath string, tlsExtraIPs, tlsExtraDomains []string, rpcListeners []net.Addr) (*tls.Config, *credentials.TransportCredentials, string, error) { - // Ensure we create TLS key and certificate if they don't exist + // Ensure we create TLS key and certificate if they don't exist. if !fileExists(tlsCertPath) && !fileExists(tlsKeyPath) { - err := genCertPair( - tlsCertPath, tlsKeyPath, tlsExtraIPs, tlsExtraDomains, + rpcsLog.Infof("Generating TLS certificates...") + err := cert.GenCertPair( + "lnd autogenerated cert", tlsCertPath, tlsKeyPath, + tlsExtraIPs, tlsExtraDomains, + cert.DefaultAutogenValidity, ) if err != nil { return nil, nil, "", err } + rpcsLog.Infof("Done generating TLS certificates") } - certData, err := tls.LoadX509KeyPair(tlsCertPath, tlsKeyPath) + certData, parsedCert, err := cert.LoadCert(tlsCertPath, tlsKeyPath) if err != nil { return nil, nil, "", err } - cert, err := x509.ParseCertificate(certData.Certificate[0]) - if err != nil { - return nil, nil, "", err + // We check whether the certifcate we have on disk match the IPs and + // domains specified by the config. If the extra IPs or domains have + // changed from when the certificate was created, we will refresh the + // certificate if auto refresh is active. + refresh := false + if cfg.TLSAutoRefresh { + refresh, err = cert.IsOutdated( + parsedCert, tlsExtraIPs, tlsExtraDomains, + ) + if err != nil { + return nil, nil, "", err + } } - // If the certificate expired, delete it and the TLS key and generate a new pair - if time.Now().After(cert.NotAfter) { - ltndLog.Info("TLS certificate is expired, generating a new one") + // If the certificate expired or it was outdated, delete it and the TLS + // key and generate a new pair. + if time.Now().After(parsedCert.NotAfter) || refresh { + ltndLog.Info("TLS certificate is expired or outdated, " + + "generating a new one") err := os.Remove(tlsCertPath) if err != nil { @@ -691,21 +788,25 @@ func getTLSConfig(tlsCertPath string, tlsKeyPath string, tlsExtraIPs, return nil, nil, "", err } - err = genCertPair( - tlsCertPath, tlsKeyPath, tlsExtraIPs, tlsExtraDomains, + rpcsLog.Infof("Renewing TLS certificates...") + err = cert.GenCertPair( + "lnd autogenerated cert", tlsCertPath, tlsKeyPath, + tlsExtraIPs, tlsExtraDomains, + cert.DefaultAutogenValidity, ) if err != nil { return nil, nil, "", err } + rpcsLog.Infof("Done renewing TLS certificates") + // Reload the certificate data. + certData, _, err = cert.LoadCert(tlsCertPath, tlsKeyPath) + if err != nil { + return nil, nil, "", err + } } - tlsCfg := &tls.Config{ - Certificates: []tls.Certificate{certData}, - CipherSuites: tlsCipherSuites, - MinVersion: tls.VersionTLS12, - } - + tlsCfg := cert.TLSConfFromCert(certData) restCreds, err := credentials.NewClientTLSFromFile(tlsCertPath, "") if err != nil { return nil, nil, "", err @@ -738,147 +839,6 @@ func fileExists(name string) bool { return true } -// genCertPair generates a key/cert pair to the paths provided. The -// auto-generated certificates should *not* be used in production for public -// access as they're self-signed and don't necessarily contain all of the -// desired hostnames for the service. For production/public use, consider a -// real PKI. -// -// This function is adapted from https://github.com/btcsuite/btcd and -// https://github.com/btcsuite/btcutil -func genCertPair(certFile, keyFile string, tlsExtraIPs, - tlsExtraDomains []string) error { - - rpcsLog.Infof("Generating TLS certificates...") - - org := "lnd autogenerated cert" - now := time.Now() - validUntil := now.Add(autogenCertValidity) - - // Check that the certificate validity isn't past the ASN.1 end of time. - if validUntil.After(endOfTime) { - validUntil = endOfTime - } - - // Generate a serial number that's below the serialNumberLimit. - serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) - if err != nil { - return fmt.Errorf("failed to generate serial number: %s", err) - } - - // Collect the host's IP addresses, including loopback, in a slice. - ipAddresses := []net.IP{net.ParseIP("127.0.0.1"), net.ParseIP("::1")} - - // addIP appends an IP address only if it isn't already in the slice. - addIP := func(ipAddr net.IP) { - for _, ip := range ipAddresses { - if bytes.Equal(ip, ipAddr) { - return - } - } - ipAddresses = append(ipAddresses, ipAddr) - } - - // Add all the interface IPs that aren't already in the slice. - addrs, err := net.InterfaceAddrs() - if err != nil { - return err - } - for _, a := range addrs { - ipAddr, _, err := net.ParseCIDR(a.String()) - if err == nil { - addIP(ipAddr) - } - } - - // Add extra IPs to the slice. - for _, ip := range tlsExtraIPs { - ipAddr := net.ParseIP(ip) - if ipAddr != nil { - addIP(ipAddr) - } - } - - // Collect the host's names into a slice. - host, err := os.Hostname() - if err != nil { - rpcsLog.Errorf("Failed getting hostname, falling back to "+ - "localhost: %v", err) - host = "localhost" - } - - dnsNames := []string{host} - if host != "localhost" { - dnsNames = append(dnsNames, "localhost") - } - dnsNames = append(dnsNames, tlsExtraDomains...) - - // Also add fake hostnames for unix sockets, otherwise hostname - // verification will fail in the client. - dnsNames = append(dnsNames, "unix", "unixpacket") - - // Generate a private key for the certificate. - priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - if err != nil { - return err - } - - // Construct the certificate template. - template := x509.Certificate{ - SerialNumber: serialNumber, - Subject: pkix.Name{ - Organization: []string{org}, - CommonName: host, - }, - NotBefore: now.Add(-time.Hour * 24), - NotAfter: validUntil, - - KeyUsage: x509.KeyUsageKeyEncipherment | - x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, - IsCA: true, // so can sign self. - BasicConstraintsValid: true, - - DNSNames: dnsNames, - IPAddresses: ipAddresses, - } - - derBytes, err := x509.CreateCertificate(rand.Reader, &template, - &template, &priv.PublicKey, priv) - if err != nil { - return fmt.Errorf("failed to create certificate: %v", err) - } - - certBuf := &bytes.Buffer{} - err = pem.Encode(certBuf, &pem.Block{Type: "CERTIFICATE", - Bytes: derBytes}) - if err != nil { - return fmt.Errorf("failed to encode certificate: %v", err) - } - - keybytes, err := x509.MarshalECPrivateKey(priv) - if err != nil { - return fmt.Errorf("unable to encode privkey: %v", err) - } - keyBuf := &bytes.Buffer{} - err = pem.Encode(keyBuf, &pem.Block{Type: "EC PRIVATE KEY", - Bytes: keybytes}) - if err != nil { - return fmt.Errorf("failed to encode private key: %v", err) - } - - // Write cert and key files. - if err = ioutil.WriteFile(certFile, certBuf.Bytes(), 0644); err != nil { - return err - } - if err = ioutil.WriteFile(keyFile, keyBuf.Bytes(), 0600); err != nil { - os.Remove(certFile) - return err - } - - rpcsLog.Infof("Done generating TLS certificates") - return nil -} - // genMacaroons generates three macaroon files; one admin-level, one for // invoice access and one read-only. These can also be used to generate more // granular macaroons. @@ -970,13 +930,13 @@ type WalletUnlockParams struct { // WalletUnlocker server, and block until a password is provided by // the user to this RPC server. func waitForWalletPassword(restEndpoints []net.Addr, - restDialOpts []grpc.DialOption, restProxyDest string, - tlsConf *tls.Config, getListeners rpcListeners) ( - *WalletUnlockParams, error) { + serverOpts []grpc.ServerOption, restDialOpts []grpc.DialOption, + restProxyDest string, tlsConf *tls.Config, + getListeners rpcListeners) (*WalletUnlockParams, error) { // Start a gRPC server listening for HTTP/2 connections, solely used // for getting the encryption password from the client. - listeners, cleanup, serverOpts, err := getListeners() + listeners, cleanup, err := getListeners() if err != nil { return nil, err } @@ -1012,9 +972,13 @@ func waitForWalletPassword(restEndpoints []net.Addr, for _, lis := range listeners { wg.Add(1) - go func(lis net.Listener) { + go func(lis *ListenerWithSignal) { rpcsLog.Infof("password RPC server listening on %s", lis.Addr()) + + // Close the ready chan to indicate we are listening. + close(lis.Ready) + wg.Done() grpcServer.Serve(lis) }(lis) diff --git a/lnpeer/peer.go b/lnpeer/peer.go index e21e27968f..1b118bfb14 100644 --- a/lnpeer/peer.go +++ b/lnpeer/peer.go @@ -30,7 +30,7 @@ type Peer interface { // WipeChannel removes the channel uniquely identified by its channel // point from all indexes associated with the peer. - WipeChannel(*wire.OutPoint) error + WipeChannel(*wire.OutPoint) // PubKey returns the serialized public key of the remote peer. PubKey() [33]byte @@ -47,15 +47,14 @@ type Peer interface { // implementation exits. QuitSignal() <-chan struct{} - // LocalGlobalFeatures returns the set of global features that has been - // advertised by the local peer. This allows sub-systems that use this + // LocalFeatures returns the set of features that has been advertised by + // the us to the remote peer. This allows sub-systems that use this // interface to gate their behavior off the set of negotiated feature // bits. - LocalGlobalFeatures() *lnwire.FeatureVector + LocalFeatures() *lnwire.FeatureVector - // RemoteGlobalFeatures returns the set of global features that has - // been advertised by the remote peer. This allows sub-systems that use - // this interface to gate their behavior off the set of negotiated - // feature bits. - RemoteGlobalFeatures() *lnwire.FeatureVector + // RemoteFeatures returns the set of features that has been advertised + // by the remote peer. This allows sub-systems that use this interface + // to gate their behavior off the set of negotiated feature bits. + RemoteFeatures() *lnwire.FeatureVector } diff --git a/lnrpc/.clang-format b/lnrpc/.clang-format new file mode 100644 index 0000000000..f19142787d --- /dev/null +++ b/lnrpc/.clang-format @@ -0,0 +1,7 @@ +--- +Language: Proto +BasedOnStyle: Google +IndentWidth: 4 +AllowShortFunctionsOnASingleLine: None +SpaceBeforeParens: Always +CompactNamespaces: false diff --git a/lnrpc/README.md b/lnrpc/README.md index b3a77a686f..d07e548ee9 100644 --- a/lnrpc/README.md +++ b/lnrpc/README.md @@ -120,7 +120,13 @@ description): enforced by the node globally for each channel. * UpdateChannelPolicy * Allows the caller to update the fee schedule and channel policies for all channels - globally, or a particular channel + globally, or a particular channel. + * ForwardingHistory + * ForwardingHistory allows the caller to query the htlcswitch for a + record of all HTLCs forwarded. + * BakeMacaroon + * Bakes a new macaroon with the provided list of permissions and + restrictions ## Service: WalletUnlocker @@ -140,6 +146,17 @@ $ go get -u github.com/lightningnetwork/lnd/lnrpc ## Generate protobuf definitions +### Linux + +For linux there is an easy install script that is also used for the Travis CI +build. Just run the following command (requires `sudo` permissions and the tools +`make`, `go`, `wget` and `unzip` to be installed) from the repository's root +folder: + +`./scripts/install_travis_proto.sh` + +### MacOS / Unix like systems + 1. Download [v.3.4.0](https://github.com/google/protobuf/releases/tag/v3.4.0) of `protoc` for your operating system and add it to your `PATH`. For example, if using macOS: @@ -164,12 +181,32 @@ $ cd $GOPATH/src/google.golang.org/genproto $ git reset --hard a8101f21cf983e773d0c1133ebc5424792003214 ``` -4. Install `grpc-ecosystem/grpc-gateway` at commit `f2862b476edcef83412c7af8687c9cd8e4097c0f`. +4. Install `grpc-ecosystem/grpc-gateway` at version `v1.8.6`. ```bash $ git clone https://github.com/grpc-ecosystem/grpc-gateway $GOPATH/src/github.com/grpc-ecosystem/grpc-gateway $ cd $GOPATH/src/github.com/grpc-ecosystem/grpc-gateway -$ git reset --hard f2862b476edcef83412c7af8687c9cd8e4097c0f +$ git reset --hard v1.8.6 $ go install ./protoc-gen-grpc-gateway ./protoc-gen-swagger ``` -5. Run [`gen_protos.sh`](https://github.com/lightningnetwork/lnd/blob/master/lnrpc/gen_protos.sh) to generate new protobuf definitions. +5. Run [`gen_protos.sh`](https://github.com/lightningnetwork/lnd/blob/master/lnrpc/gen_protos.sh) +or `make rpc` to generate new protobuf definitions. + +## Format .proto files + +We use `clang-format` to make sure the `.proto` files are formatted correctly. +You can install the formatter on Ubuntu by running `apt install clang-format`. + +Consult [this page](http://releases.llvm.org/download.html) to find binaries +for other operating systems or distributions. + +## Makefile commands + +The following commands are available with `make`: + +* `rpc`: Compile `.proto` files (calls `lnrpc/gen_protos.sh`). +* `rpc-format`: Formats all `.proto` files according to our formatting rules. + Requires `clang-format`, see previous chapter. +* `rpc-check`: Runs both previous commands and makes sure the git work tree is + not dirty. This can be used to check that the `.proto` files are formatted + and compiled properly. diff --git a/lnrpc/autopilotrpc/autopilot.pb.go b/lnrpc/autopilotrpc/autopilot.pb.go index e5bb41cb0b..7b6c77449b 100644 --- a/lnrpc/autopilotrpc/autopilot.pb.go +++ b/lnrpc/autopilotrpc/autopilot.pb.go @@ -167,7 +167,7 @@ var xxx_messageInfo_ModifyStatusResponse proto.InternalMessageInfo type QueryScoresRequest struct { Pubkeys []string `protobuf:"bytes,1,rep,name=pubkeys,proto3" json:"pubkeys,omitempty"` /// If set, we will ignore the local channel state when calculating scores. - IgnoreLocalState bool `protobuf:"varint,2,opt,name=ignore_local_state,json=no_state,proto3" json:"ignore_local_state,omitempty"` + IgnoreLocalState bool `protobuf:"varint,2,opt,name=ignore_local_state,json=ignoreLocalState,proto3" json:"ignore_local_state,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -397,36 +397,37 @@ func init() { func init() { proto.RegisterFile("autopilotrpc/autopilot.proto", fileDescriptor_e0b9dc347a92e084) } var fileDescriptor_e0b9dc347a92e084 = []byte{ - // 463 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xcf, 0x6f, 0xd3, 0x30, - 0x14, 0xc7, 0x95, 0x4c, 0x74, 0xcb, 0xeb, 0x60, 0xc3, 0x9d, 0xa6, 0x28, 0x54, 0xd0, 0x45, 0x1c, - 0x2a, 0x24, 0x52, 0x51, 0x38, 0x00, 0x12, 0x07, 0x86, 0x90, 0x90, 0x80, 0x03, 0x2e, 0xbb, 0x70, - 0x99, 0xd2, 0xcc, 0xb4, 0x56, 0x8d, 0x1d, 0xec, 0xe7, 0xa1, 0xfc, 0x43, 0x5c, 0xf9, 0x1b, 0x38, - 0xf2, 0x5f, 0xa1, 0xc5, 0x49, 0x49, 0xaa, 0x12, 0x84, 0xb4, 0x5b, 0xde, 0x8f, 0x7c, 0x9e, 0xdf, - 0xd7, 0x5f, 0x19, 0x86, 0xa9, 0x45, 0x95, 0x73, 0xa1, 0x50, 0xe7, 0xd9, 0x64, 0x1d, 0x24, 0xb9, - 0x56, 0xa8, 0xc8, 0x7e, 0xb3, 0x1a, 0x1f, 0xc0, 0xcd, 0x19, 0xa6, 0x68, 0x0d, 0x65, 0x5f, 0x2d, - 0x33, 0x18, 0x8f, 0xe1, 0x56, 0x9d, 0x30, 0xb9, 0x92, 0x86, 0x91, 0x63, 0xe8, 0xa5, 0x19, 0xf2, - 0x4b, 0x16, 0x7a, 0x23, 0x6f, 0xbc, 0x47, 0xab, 0x28, 0x7e, 0x08, 0x83, 0xf7, 0xea, 0x82, 0x7f, - 0x2e, 0x5a, 0x80, 0xab, 0x76, 0x26, 0xd3, 0xb9, 0x58, 0xb7, 0xbb, 0x28, 0x3e, 0x86, 0xa3, 0x76, - 0xbb, 0xc3, 0xc7, 0x1f, 0x81, 0x7c, 0xb0, 0x4c, 0x17, 0xb3, 0x4c, 0x69, 0xb6, 0xa6, 0x84, 0xb0, - 0x9b, 0xdb, 0xf9, 0x8a, 0x15, 0x26, 0xf4, 0x46, 0x3b, 0xe3, 0x80, 0xd6, 0x21, 0xb9, 0x0f, 0x84, - 0x2f, 0xa4, 0xd2, 0xec, 0x5c, 0xa8, 0x2c, 0x15, 0xe7, 0x06, 0x53, 0x64, 0xa1, 0x5f, 0xce, 0xda, - 0x93, 0xca, 0xc5, 0xf1, 0x77, 0x1f, 0x06, 0x2d, 0x6c, 0xb5, 0xcc, 0x5b, 0xd8, 0xd5, 0xcc, 0x58, - 0x81, 0x8e, 0xdb, 0x9f, 0x3e, 0x4a, 0x9a, 0x7a, 0x24, 0x5b, 0xfe, 0x49, 0xde, 0x30, 0xab, 0xb9, - 0x41, 0x9e, 0xd1, 0xf2, 0x4f, 0x5a, 0x13, 0xa2, 0x9f, 0x1e, 0x1c, 0x6c, 0x14, 0xc9, 0x10, 0x82, - 0x65, 0x9d, 0x2a, 0x15, 0x08, 0xe8, 0x9f, 0x04, 0x39, 0x83, 0x9e, 0x29, 0xe1, 0xa1, 0x5f, 0x4e, - 0x7f, 0xf1, 0xdf, 0xd3, 0x13, 0x57, 0x7e, 0x2d, 0x51, 0x17, 0xb4, 0x82, 0x45, 0xcf, 0xa0, 0xdf, - 0x48, 0x93, 0x43, 0xd8, 0x59, 0xb1, 0xa2, 0x9a, 0x7e, 0xf5, 0x49, 0x8e, 0xe0, 0xc6, 0x65, 0x2a, - 0xac, 0xd3, 0xc9, 0xa3, 0x2e, 0x78, 0xee, 0x3f, 0xf5, 0xe2, 0x1f, 0x1e, 0x1c, 0xce, 0x18, 0xb6, - 0xd5, 0xef, 0x5e, 0xe2, 0x74, 0x63, 0x89, 0x07, 0xed, 0x25, 0x36, 0x69, 0xd7, 0x7d, 0xe2, 0x01, - 0xdc, 0x6e, 0x8c, 0x70, 0x2a, 0x4d, 0x7f, 0xf9, 0x10, 0xbc, 0xac, 0x4f, 0x41, 0x5e, 0x41, 0xcf, - 0xb9, 0x8c, 0xdc, 0xd9, 0x38, 0x5b, 0xd3, 0xaa, 0xd1, 0x70, 0x7b, 0xb1, 0xb2, 0xca, 0x19, 0xec, - 0x37, 0x0d, 0x4b, 0x4e, 0xda, 0xdd, 0x5b, 0xbc, 0x1f, 0xc5, 0x5d, 0x2d, 0x15, 0x96, 0x42, 0xbf, - 0x71, 0xcd, 0x64, 0xd4, 0xe1, 0x00, 0x07, 0x3d, 0xf9, 0xa7, 0x47, 0xc8, 0x3b, 0x08, 0xd6, 0x92, - 0x90, 0xbb, 0xdd, 0xd7, 0x11, 0xdd, 0xfb, 0x6b, 0xdd, 0xd1, 0x4e, 0x9f, 0x7c, 0x9a, 0x2e, 0x38, - 0x2e, 0xed, 0x3c, 0xc9, 0xd4, 0x97, 0x89, 0xe0, 0x8b, 0x25, 0x4a, 0x2e, 0x17, 0x92, 0xe1, 0x37, - 0xa5, 0x57, 0x13, 0x21, 0x2f, 0x26, 0x42, 0xb6, 0x9e, 0x16, 0x9d, 0x67, 0xf3, 0x5e, 0xf9, 0xbc, - 0x3c, 0xfe, 0x1d, 0x00, 0x00, 0xff, 0xff, 0xb1, 0x4e, 0x74, 0x27, 0x7e, 0x04, 0x00, 0x00, + // 468 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xcf, 0x8b, 0xd3, 0x40, + 0x14, 0xc7, 0x49, 0x16, 0xbb, 0xe6, 0x75, 0x75, 0xeb, 0x74, 0x59, 0x42, 0x2c, 0xda, 0xcd, 0xa9, + 0x88, 0xa6, 0x58, 0x3d, 0xa8, 0xe0, 0xc1, 0x15, 0x41, 0x70, 0x3d, 0x38, 0x65, 0x2f, 0x22, 0x2c, + 0x69, 0x76, 0x6c, 0x87, 0x8e, 0x33, 0x71, 0xe6, 0xcd, 0x4a, 0xfe, 0x21, 0xaf, 0xfe, 0x0d, 0x1e, + 0xfd, 0xaf, 0xa4, 0x99, 0xa4, 0x26, 0xa5, 0x56, 0x84, 0xbd, 0xe5, 0xbd, 0xef, 0x9b, 0xcf, 0x9b, + 0xf7, 0x23, 0x03, 0x83, 0xd4, 0xa2, 0xca, 0xb9, 0x50, 0xa8, 0xf3, 0x6c, 0xbc, 0x36, 0x92, 0x5c, + 0x2b, 0x54, 0xe4, 0xa0, 0xa9, 0xc6, 0x87, 0x70, 0x6b, 0x8a, 0x29, 0x5a, 0x43, 0xd9, 0x57, 0xcb, + 0x0c, 0xc6, 0x23, 0xb8, 0x5d, 0x3b, 0x4c, 0xae, 0xa4, 0x61, 0xe4, 0x18, 0x3a, 0x69, 0x86, 0xfc, + 0x8a, 0x85, 0xde, 0xd0, 0x1b, 0xdd, 0xa4, 0x95, 0x15, 0x3f, 0x82, 0xfe, 0x7b, 0x75, 0xc9, 0x3f, + 0x17, 0x2d, 0xc0, 0x2a, 0x9c, 0xc9, 0x74, 0x26, 0xd6, 0xe1, 0xce, 0x8a, 0x8f, 0xe1, 0xa8, 0x1d, + 0xee, 0xf0, 0xf1, 0x27, 0x20, 0x1f, 0x2c, 0xd3, 0xc5, 0x34, 0x53, 0x9a, 0xad, 0x29, 0x21, 0xec, + 0xe7, 0x76, 0xb6, 0x64, 0x85, 0x09, 0xbd, 0xe1, 0xde, 0x28, 0xa0, 0xb5, 0x49, 0x1e, 0x02, 0xe1, + 0x73, 0xa9, 0x34, 0xbb, 0x10, 0x2a, 0x4b, 0xc5, 0x85, 0xc1, 0x14, 0x59, 0xe8, 0x97, 0xb9, 0x7a, + 0x4e, 0x39, 0x5b, 0x09, 0xab, 0x34, 0x2c, 0xfe, 0xee, 0x43, 0xbf, 0x85, 0xaf, 0x8a, 0x7a, 0x07, + 0xfb, 0x9a, 0x19, 0x2b, 0xd0, 0xf1, 0xbb, 0x93, 0xc7, 0x49, 0xb3, 0x2f, 0xc9, 0x96, 0x33, 0xc9, + 0x5b, 0x66, 0x35, 0x37, 0xc8, 0x33, 0x5a, 0x9e, 0xa4, 0x35, 0x21, 0xfa, 0xe9, 0xc1, 0xe1, 0x86, + 0x48, 0x06, 0x10, 0x2c, 0x6a, 0x57, 0xd9, 0x89, 0x80, 0xfe, 0x71, 0x90, 0x73, 0xe8, 0x98, 0x12, + 0x1e, 0xfa, 0x65, 0xf6, 0x97, 0xff, 0x9d, 0x3d, 0x71, 0xf2, 0x1b, 0x89, 0xba, 0xa0, 0x15, 0x2c, + 0x7a, 0x0e, 0xdd, 0x86, 0x9b, 0xf4, 0x60, 0x6f, 0xc9, 0x8a, 0x2a, 0xfb, 0xea, 0x93, 0x1c, 0xc1, + 0x8d, 0xab, 0x54, 0x58, 0xd7, 0x2f, 0x8f, 0x3a, 0xe3, 0x85, 0xff, 0xcc, 0x8b, 0x7f, 0x78, 0xd0, + 0x9b, 0x32, 0x6c, 0x4f, 0x61, 0x77, 0x11, 0xa7, 0x1b, 0x45, 0x3c, 0x68, 0x17, 0xb1, 0x49, 0xbb, + 0xee, 0x1b, 0xf7, 0xe1, 0x4e, 0x23, 0x85, 0xeb, 0xd2, 0xe4, 0x97, 0x0f, 0xc1, 0xab, 0xfa, 0x16, + 0xe4, 0x35, 0x74, 0xdc, 0xb6, 0x91, 0xbb, 0x1b, 0x77, 0x6b, 0xae, 0x6c, 0x34, 0xd8, 0x2e, 0x56, + 0xab, 0x72, 0x0e, 0x07, 0xcd, 0xc5, 0x25, 0x27, 0xed, 0xe8, 0x2d, 0xff, 0x40, 0x14, 0xef, 0x0a, + 0xa9, 0xb0, 0x14, 0xba, 0x8d, 0x31, 0x93, 0xe1, 0x8e, 0x0d, 0x70, 0xd0, 0x93, 0x7f, 0xee, 0x08, + 0x39, 0x83, 0x60, 0xdd, 0x12, 0x72, 0x6f, 0xf7, 0x38, 0xa2, 0xfb, 0x7f, 0xd5, 0x1d, 0xed, 0xf4, + 0xe9, 0xc7, 0xc9, 0x9c, 0xe3, 0xc2, 0xce, 0x92, 0x4c, 0x7d, 0x19, 0x0b, 0x3e, 0x5f, 0xa0, 0xe4, + 0x72, 0x2e, 0x19, 0x7e, 0x53, 0x7a, 0x39, 0x16, 0xf2, 0x72, 0x2c, 0x64, 0xeb, 0x89, 0xd1, 0x79, + 0x36, 0xeb, 0x94, 0xcf, 0xcc, 0x93, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf8, 0xb6, 0x25, 0xd7, + 0x86, 0x04, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/lnrpc/autopilotrpc/autopilot.proto b/lnrpc/autopilotrpc/autopilot.proto index 9570136cf4..614b5e75f8 100644 --- a/lnrpc/autopilotrpc/autopilot.proto +++ b/lnrpc/autopilotrpc/autopilot.proto @@ -11,68 +11,70 @@ service Autopilot { /** Status returns whether the daemon's autopilot agent is active. */ - rpc Status(StatusRequest) returns (StatusResponse); + rpc Status (StatusRequest) returns (StatusResponse); /** ModifyStatus is used to modify the status of the autopilot agent, like enabling or disabling it. */ - rpc ModifyStatus(ModifyStatusRequest) returns (ModifyStatusResponse); + rpc ModifyStatus (ModifyStatusRequest) returns (ModifyStatusResponse); /** QueryScores queries all available autopilot heuristics, in addition to any active combination of these heruristics, for the scores they would give to the given nodes. */ - rpc QueryScores(QueryScoresRequest) returns (QueryScoresResponse); + rpc QueryScores (QueryScoresRequest) returns (QueryScoresResponse); /** SetScores attempts to set the scores used by the running autopilot agent, if the external scoring heuristic is enabled. */ - rpc SetScores(SetScoresRequest) returns (SetScoresResponse); + rpc SetScores (SetScoresRequest) returns (SetScoresResponse); } -message StatusRequest{ +message StatusRequest { } -message StatusResponse{ +message StatusResponse { /// Indicates whether the autopilot is active or not. - bool active = 1 [json_name = "active"]; + bool active = 1; } -message ModifyStatusRequest{ +message ModifyStatusRequest { /// Whether the autopilot agent should be enabled or not. - bool enable = 1 [json_name = "enable"]; + bool enable = 1; } -message ModifyStatusResponse {} +message ModifyStatusResponse { +} -message QueryScoresRequest{ - repeated string pubkeys = 1 [json_name = "pubkeys"]; +message QueryScoresRequest { + repeated string pubkeys = 1; /// If set, we will ignore the local channel state when calculating scores. - bool ignore_local_state = 2 [json_name = "no_state"]; + bool ignore_local_state = 2; } message QueryScoresResponse { message HeuristicResult { - string heuristic = 1 [json_name = "heuristic"]; - map scores= 2 [json_name = "scores"]; + string heuristic = 1; + map scores = 2; } - repeated HeuristicResult results = 1 [json_name = "results"]; + repeated HeuristicResult results = 1; } -message SetScoresRequest{ +message SetScoresRequest { /// The name of the heuristic to provide scores to. - string heuristic = 1 [json_name = "heuristic"]; + string heuristic = 1; /** A map from hex-encoded public keys to scores. Scores must be in the range [0.0, 1.0]. */ - map scores = 2 [json_name = "scores"]; + map scores = 2; } -message SetScoresResponse {} +message SetScoresResponse { +} diff --git a/lnrpc/chainrpc/chainnotifier.pb.go b/lnrpc/chainrpc/chainnotifier.pb.go index 81cc9276f3..db9c13943c 100644 --- a/lnrpc/chainrpc/chainnotifier.pb.go +++ b/lnrpc/chainrpc/chainnotifier.pb.go @@ -106,7 +106,8 @@ type ConfDetails struct { RawTx []byte `protobuf:"bytes,1,opt,name=raw_tx,json=rawTx,proto3" json:"raw_tx,omitempty"` // The hash of the block in which the confirmed transaction was included in. BlockHash []byte `protobuf:"bytes,2,opt,name=block_hash,json=blockHash,proto3" json:"block_hash,omitempty"` - // The height of the block in which the confirmed transaction was included in. + // The height of the block in which the confirmed transaction was included + // in. BlockHeight uint32 `protobuf:"varint,3,opt,name=block_height,json=blockHeight,proto3" json:"block_height,omitempty"` // The index of the confirmed transaction within the transaction. TxIndex uint32 `protobuf:"varint,4,opt,name=tx_index,json=txIndex,proto3" json:"tx_index,omitempty"` diff --git a/lnrpc/chainrpc/chainnotifier.proto b/lnrpc/chainrpc/chainnotifier.proto index 2014c29081..5cc8a0da26 100644 --- a/lnrpc/chainrpc/chainnotifier.proto +++ b/lnrpc/chainrpc/chainnotifier.proto @@ -39,7 +39,8 @@ message ConfDetails { // The hash of the block in which the confirmed transaction was included in. bytes block_hash = 2; - // The height of the block in which the confirmed transaction was included in. + // The height of the block in which the confirmed transaction was included + // in. uint32 block_height = 3; // The index of the confirmed transaction within the transaction. @@ -150,7 +151,7 @@ service ChainNotifier { particular transaction by its hash or for an output script by specifying a zero hash. */ - rpc RegisterConfirmationsNtfn(ConfRequest) returns (stream ConfEvent); + rpc RegisterConfirmationsNtfn (ConfRequest) returns (stream ConfEvent); /* RegisterSpendNtfn is a synchronous response-streaming RPC that registers an @@ -160,7 +161,7 @@ service ChainNotifier { A client can specify whether the spend request should be for a particular outpoint or for an output script by specifying a zero outpoint. */ - rpc RegisterSpendNtfn(SpendRequest) returns (stream SpendEvent); + rpc RegisterSpendNtfn (SpendRequest) returns (stream SpendEvent); /* RegisterBlockEpochNtfn is a synchronous response-streaming RPC that @@ -173,5 +174,5 @@ service ChainNotifier { point. This allows clients to be idempotent by ensuring that they do not missing processing a single block within the chain. */ - rpc RegisterBlockEpochNtfn(BlockEpoch) returns (stream BlockEpoch); + rpc RegisterBlockEpochNtfn (BlockEpoch) returns (stream BlockEpoch); } diff --git a/lnrpc/gen_protos.sh b/lnrpc/gen_protos.sh index bdc7dde51d..4e54be9966 100755 --- a/lnrpc/gen_protos.sh +++ b/lnrpc/gen_protos.sh @@ -11,7 +11,7 @@ protoc -I/usr/local/include -I. \ # Generate the REST reverse proxy. protoc -I/usr/local/include -I. \ -I$GOPATH/src/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis \ - --grpc-gateway_out=logtostderr=true:. \ + --grpc-gateway_out=logtostderr=true,paths=source_relative:. \ rpc.proto # Finally, generate the swagger file which describes the REST API in detail. diff --git a/lnrpc/invoicesrpc/addinvoice.go b/lnrpc/invoicesrpc/addinvoice.go index 5685de771e..171e85b7b1 100644 --- a/lnrpc/invoicesrpc/addinvoice.go +++ b/lnrpc/invoicesrpc/addinvoice.go @@ -37,9 +37,6 @@ type AddInvoiceConfig struct { // that's backed by the identity private key of the running lnd node. NodeSigner *netann.NodeSigner - // MaxPaymentMSat is the maximum allowed payment. - MaxPaymentMSat lnwire.MilliSatoshi - // DefaultCLTVExpiry is the default invoice expiry if no values is // specified. DefaultCLTVExpiry uint32 @@ -47,6 +44,10 @@ type AddInvoiceConfig struct { // ChanDB is a global boltdb instance which is needed to access the // channel graph. ChanDB *channeldb.DB + + // GenInvoiceFeatures returns a feature containing feature bits that + // should be advertised on freshly generated invoices. + GenInvoiceFeatures func() *lnwire.FeatureVector } // AddInvoiceData contains the required data to create a new invoice. @@ -57,10 +58,6 @@ type AddInvoiceData struct { // description_hash field is not being used. Memo string - // Deprecated. An optional cryptographic receipt of payment which is not - // implemented. - Receipt []byte - // The preimage which will allow settling an incoming HTLC payable to // this preimage. If Preimage is set, Hash should be nil. If both // Preimage and Hash are nil, a random preimage is generated. @@ -71,8 +68,8 @@ type AddInvoiceData struct { // htlc will be accepted and held until the preimage becomes known. Hash *lntypes.Hash - // The value of this invoice in satoshis. - Value btcutil.Amount + // The value of this invoice in millisatoshis. + Value lnwire.MilliSatoshi // Hash (SHA-256) of a description of the payment. Used if the // description of payment (memo) is too long to naturally fit within the @@ -154,31 +151,30 @@ func AddInvoice(ctx context.Context, cfg *AddInvoiceConfig, return nil, nil, fmt.Errorf("memo too large: %v bytes "+ "(maxsize=%v)", len(invoice.Memo), channeldb.MaxMemoSize) } - if len(invoice.Receipt) > channeldb.MaxReceiptSize { - return nil, nil, fmt.Errorf("receipt too large: %v bytes "+ - "(maxsize=%v)", len(invoice.Receipt), channeldb.MaxReceiptSize) - } if len(invoice.DescriptionHash) > 0 && len(invoice.DescriptionHash) != 32 { return nil, nil, fmt.Errorf("description hash is %v bytes, must be 32", len(invoice.DescriptionHash)) } + // We set the max invoice amount to 100k BTC, which itself is several + // multiples off the current block reward. + maxInvoiceAmt := btcutil.Amount(btcutil.SatoshiPerBitcoin * 100000) + + switch { // The value of the invoice must not be negative. - if invoice.Value < 0 { + case int64(invoice.Value) < 0: return nil, nil, fmt.Errorf("payments of negative value "+ - "are not allowed, value is %v", invoice.Value) + "are not allowed, value is %v", int64(invoice.Value)) + + // Also ensure that the invoice is actually realistic, while preventing + // any issues due to underflow. + case invoice.Value.ToSatoshis() > maxInvoiceAmt: + return nil, nil, fmt.Errorf("invoice amount %v is "+ + "too large, max is %v", invoice.Value.ToSatoshis(), + maxInvoiceAmt) } - amtMSat := lnwire.NewMSatFromSatoshis(invoice.Value) - - // The value of the invoice must also not exceed the current soft-limit - // on the largest payment within the network. - if amtMSat > cfg.MaxPaymentMSat { - return nil, nil, fmt.Errorf("payment of %v is too large, max "+ - "payment allowed is %v", invoice.Value, - cfg.MaxPaymentMSat.ToSatoshis(), - ) - } + amtMSat := invoice.Value // We also create an encoded payment request which allows the // caller to compactly send the invoice to the payer. We'll create a @@ -371,6 +367,19 @@ func AddInvoice(ctx context.Context, cfg *AddInvoiceConfig, } + // Set our desired invoice features and add them to our list of options. + invoiceFeatures := cfg.GenInvoiceFeatures() + options = append(options, zpay32.Features(invoiceFeatures)) + + // Generate and set a random payment address for this invoice. If the + // sender understands payment addresses, this can be used to avoid + // intermediaries probing the receiver. + var paymentAddr [32]byte + if _, err := rand.Read(paymentAddr[:]); err != nil { + return nil, nil, err + } + options = append(options, zpay32.PaymentAddr(paymentAddr)) + // Create and encode the payment request as a bech32 (zpay32) string. creationDate := time.Now() payReq, err := zpay32.NewInvoice( @@ -392,13 +401,14 @@ func AddInvoice(ctx context.Context, cfg *AddInvoiceConfig, newInvoice := &channeldb.Invoice{ CreationDate: creationDate, Memo: []byte(invoice.Memo), - Receipt: invoice.Receipt, PaymentRequest: []byte(payReqString), - FinalCltvDelta: int32(payReq.MinFinalCLTVExpiry()), - Expiry: payReq.Expiry(), Terms: channeldb.ContractTerm{ + FinalCltvDelta: int32(payReq.MinFinalCLTVExpiry()), + Expiry: payReq.Expiry(), Value: amtMSat, PaymentPreimage: paymentPreimage, + PaymentAddr: paymentAddr, + Features: invoiceFeatures, }, } diff --git a/lnrpc/invoicesrpc/config_active.go b/lnrpc/invoicesrpc/config_active.go index 0bb311a187..9e43ccc893 100644 --- a/lnrpc/invoicesrpc/config_active.go +++ b/lnrpc/invoicesrpc/config_active.go @@ -40,9 +40,6 @@ type Config struct { // that's backed by the identity private key of the running lnd node. NodeSigner *netann.NodeSigner - // MaxPaymentMSat is the maximum allowed payment. - MaxPaymentMSat lnwire.MilliSatoshi - // DefaultCLTVExpiry is the default invoice expiry if no values is // specified. DefaultCLTVExpiry uint32 @@ -50,4 +47,8 @@ type Config struct { // ChanDB is a global boltdb instance which is needed to access the // channel graph. ChanDB *channeldb.DB + + // GenInvoiceFeatures returns a feature containing feature bits that + // should be advertised on freshly generated invoices. + GenInvoiceFeatures func() *lnwire.FeatureVector } diff --git a/lnrpc/invoicesrpc/invoices.pb.go b/lnrpc/invoicesrpc/invoices.pb.go index bed0f86834..1dbc8ccb2d 100644 --- a/lnrpc/invoicesrpc/invoices.pb.go +++ b/lnrpc/invoicesrpc/invoices.pb.go @@ -104,23 +104,31 @@ type AddHoldInvoiceRequest struct { Memo string `protobuf:"bytes,1,opt,name=memo,proto3" json:"memo,omitempty"` /// The hash of the preimage Hash []byte `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` - /// The value of this invoice in satoshis + //* + //The value of this invoice in satoshis + // + //The fields value and value_msat are mutually exclusive. Value int64 `protobuf:"varint,3,opt,name=value,proto3" json:"value,omitempty"` //* + //The value of this invoice in millisatoshis + // + //The fields value and value_msat are mutually exclusive. + ValueMsat int64 `protobuf:"varint,10,opt,name=value_msat,json=valueMsat,proto3" json:"value_msat,omitempty"` + //* //Hash (SHA-256) of a description of the payment. Used if the description of //payment (memo) is too long to naturally fit within the description field //of an encoded payment request. - DescriptionHash []byte `protobuf:"bytes,4,opt,name=description_hash,proto3" json:"description_hash,omitempty"` + DescriptionHash []byte `protobuf:"bytes,4,opt,name=description_hash,json=descriptionHash,proto3" json:"description_hash,omitempty"` /// Payment request expiry time in seconds. Default is 3600 (1 hour). Expiry int64 `protobuf:"varint,5,opt,name=expiry,proto3" json:"expiry,omitempty"` /// Fallback on-chain address. - FallbackAddr string `protobuf:"bytes,6,opt,name=fallback_addr,proto3" json:"fallback_addr,omitempty"` + FallbackAddr string `protobuf:"bytes,6,opt,name=fallback_addr,json=fallbackAddr,proto3" json:"fallback_addr,omitempty"` /// Delta to use for the time-lock of the CLTV extended to the final hop. - CltvExpiry uint64 `protobuf:"varint,7,opt,name=cltv_expiry,proto3" json:"cltv_expiry,omitempty"` + CltvExpiry uint64 `protobuf:"varint,7,opt,name=cltv_expiry,json=cltvExpiry,proto3" json:"cltv_expiry,omitempty"` //* //Route hints that can each be individually used to assist in reaching the //invoice's destination. - RouteHints []*lnrpc.RouteHint `protobuf:"bytes,8,rep,name=route_hints,proto3" json:"route_hints,omitempty"` + RouteHints []*lnrpc.RouteHint `protobuf:"bytes,8,rep,name=route_hints,json=routeHints,proto3" json:"route_hints,omitempty"` /// Whether this invoice should include routing hints for private channels. Private bool `protobuf:"varint,9,opt,name=private,proto3" json:"private,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -174,6 +182,13 @@ func (m *AddHoldInvoiceRequest) GetValue() int64 { return 0 } +func (m *AddHoldInvoiceRequest) GetValueMsat() int64 { + if m != nil { + return m.ValueMsat + } + return 0 +} + func (m *AddHoldInvoiceRequest) GetDescriptionHash() []byte { if m != nil { return m.DescriptionHash @@ -221,7 +236,7 @@ type AddHoldInvoiceResp struct { //A bare-bones invoice for a payment within the Lightning Network. With the //details of the invoice, the sender has all the data necessary to send a //payment to the recipient. - PaymentRequest string `protobuf:"bytes,1,opt,name=payment_request,proto3" json:"payment_request,omitempty"` + PaymentRequest string `protobuf:"bytes,1,opt,name=payment_request,json=paymentRequest,proto3" json:"payment_request,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -260,7 +275,8 @@ func (m *AddHoldInvoiceResp) GetPaymentRequest() string { } type SettleInvoiceMsg struct { - /// Externally discovered pre-image that should be used to settle the hold invoice. + /// Externally discovered pre-image that should be used to settle the hold + /// invoice. Preimage []byte `protobuf:"bytes,1,opt,name=preimage,proto3" json:"preimage,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -332,7 +348,7 @@ var xxx_messageInfo_SettleInvoiceResp proto.InternalMessageInfo type SubscribeSingleInvoiceRequest struct { /// Hash corresponding to the (hold) invoice to subscribe to. - RHash []byte `protobuf:"bytes,2,opt,name=r_hash,proto3" json:"r_hash,omitempty"` + RHash []byte `protobuf:"bytes,2,opt,name=r_hash,json=rHash,proto3" json:"r_hash,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -383,39 +399,41 @@ func init() { func init() { proto.RegisterFile("invoicesrpc/invoices.proto", fileDescriptor_090ab9c4958b987d) } var fileDescriptor_090ab9c4958b987d = []byte{ - // 509 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x53, 0x41, 0x6f, 0xd3, 0x4c, - 0x10, 0x95, 0x93, 0x34, 0x4d, 0x26, 0x6d, 0xbf, 0x7c, 0x0b, 0x44, 0x96, 0x45, 0xc1, 0x58, 0x1c, - 0xac, 0x1e, 0x6c, 0x48, 0xc5, 0x11, 0x24, 0xe0, 0x12, 0x90, 0xe0, 0xe0, 0x08, 0x0e, 0x5c, 0xa2, - 0x8d, 0xbd, 0xd8, 0xab, 0x6e, 0x76, 0x97, 0xdd, 0x75, 0xa0, 0xbf, 0x8a, 0xdf, 0xc3, 0xbf, 0x41, - 0x5e, 0x3b, 0x95, 0xed, 0x96, 0xde, 0x66, 0xde, 0xcc, 0x3c, 0x8f, 0xdf, 0x9b, 0x05, 0x8f, 0xf2, - 0xbd, 0xa0, 0x29, 0xd1, 0x4a, 0xa6, 0xf1, 0x21, 0x8e, 0xa4, 0x12, 0x46, 0xa0, 0x59, 0xab, 0xe6, - 0x3d, 0xce, 0x85, 0xc8, 0x19, 0x89, 0xb1, 0xa4, 0x31, 0xe6, 0x5c, 0x18, 0x6c, 0xa8, 0xe0, 0x4d, - 0xab, 0x37, 0x55, 0x32, 0xad, 0xc3, 0xe0, 0x15, 0xcc, 0xdf, 0x63, 0x9e, 0x12, 0xf6, 0xa1, 0x9e, - 0xfe, 0xa4, 0x73, 0xf4, 0x0c, 0x4e, 0x24, 0xbe, 0xde, 0x11, 0x6e, 0x36, 0x05, 0xd6, 0x85, 0xeb, - 0xf8, 0x4e, 0x78, 0x92, 0xcc, 0x1a, 0x6c, 0x85, 0x75, 0x11, 0x3c, 0x80, 0xff, 0x3b, 0x63, 0x09, - 0xd1, 0x32, 0xf8, 0x3d, 0x80, 0x47, 0x6f, 0xb3, 0x6c, 0x25, 0x58, 0x76, 0x03, 0xff, 0x28, 0x89, - 0x36, 0x08, 0xc1, 0x68, 0x47, 0x76, 0xc2, 0x32, 0x4d, 0x13, 0x1b, 0x57, 0x98, 0x65, 0x1f, 0x58, - 0x76, 0x1b, 0xa3, 0x87, 0x70, 0xb4, 0xc7, 0xac, 0x24, 0xee, 0xd0, 0x77, 0xc2, 0x61, 0x52, 0x27, - 0xe8, 0x02, 0xe6, 0x19, 0xd1, 0xa9, 0xa2, 0xb2, 0xfa, 0x89, 0x7a, 0xa7, 0x91, 0x9d, 0xba, 0x85, - 0xa3, 0x05, 0x8c, 0xc9, 0x2f, 0x49, 0xd5, 0xb5, 0x7b, 0x64, 0x29, 0x9a, 0x0c, 0x3d, 0x87, 0xd3, - 0xef, 0x98, 0xb1, 0x2d, 0x4e, 0xaf, 0x36, 0x38, 0xcb, 0x94, 0x3b, 0xb6, 0xab, 0x74, 0x41, 0xe4, - 0xc3, 0x2c, 0x65, 0x66, 0xbf, 0x69, 0x28, 0x8e, 0x7d, 0x27, 0x1c, 0x25, 0x6d, 0x08, 0x2d, 0x61, - 0xa6, 0x44, 0x69, 0xc8, 0xa6, 0xa0, 0xdc, 0x68, 0x77, 0xe2, 0x0f, 0xc3, 0xd9, 0x72, 0x1e, 0x31, - 0x5e, 0x49, 0x9a, 0x54, 0x95, 0x15, 0xe5, 0x26, 0x69, 0x37, 0x21, 0x17, 0x8e, 0xa5, 0xa2, 0x7b, - 0x6c, 0x88, 0x3b, 0xf5, 0x9d, 0x70, 0x92, 0x1c, 0xd2, 0xe0, 0x0d, 0xa0, 0xbe, 0x60, 0x5a, 0xa2, - 0x10, 0xfe, 0x3b, 0xe8, 0xaf, 0x6a, 0x01, 0x1b, 0xe1, 0xfa, 0x70, 0x10, 0xc1, 0x7c, 0x4d, 0x8c, - 0x61, 0xa4, 0xe5, 0x9e, 0x07, 0x13, 0xa9, 0x08, 0xdd, 0xe1, 0x9c, 0x34, 0xce, 0xdd, 0xe4, 0x95, - 0x6d, 0x9d, 0x7e, 0x6b, 0xdb, 0x6b, 0x38, 0x5f, 0x97, 0xdb, 0x4a, 0xc7, 0x2d, 0x59, 0x53, 0x9e, - 0xb7, 0xaa, 0xb5, 0x7b, 0x0b, 0x18, 0xab, 0x4d, 0xcb, 0xab, 0x26, 0xfb, 0x38, 0x9a, 0x38, 0xf3, - 0xc1, 0xf2, 0xcf, 0x00, 0x26, 0xcd, 0x80, 0x46, 0x5f, 0x61, 0x71, 0x37, 0x17, 0xba, 0x88, 0x5a, - 0xf7, 0x19, 0xdd, 0xfb, 0x41, 0xef, 0xac, 0xd1, 0xb3, 0x81, 0x5f, 0x38, 0xe8, 0x33, 0x9c, 0x76, - 0xee, 0x0d, 0x9d, 0x77, 0xe8, 0xfa, 0x27, 0xec, 0x3d, 0xf9, 0x77, 0xd9, 0x4a, 0xfc, 0x05, 0xce, - 0xba, 0xc2, 0xa3, 0xa0, 0x33, 0x71, 0xe7, 0x19, 0x7b, 0x4f, 0xef, 0xed, 0xd1, 0xb2, 0x5a, 0xb3, - 0xa3, 0x6f, 0x6f, 0xcd, 0xbe, 0x57, 0xbd, 0x35, 0x6f, 0x59, 0xf3, 0xee, 0xf2, 0xdb, 0xcb, 0x9c, - 0x9a, 0xa2, 0xdc, 0x46, 0xa9, 0xd8, 0xc5, 0x8c, 0xe6, 0x85, 0xe1, 0x94, 0xe7, 0x9c, 0x98, 0x9f, - 0x42, 0x5d, 0xc5, 0x8c, 0x67, 0xb1, 0x55, 0x2a, 0x6e, 0xd1, 0x6c, 0xc7, 0xf6, 0x65, 0x5f, 0xfe, - 0x0d, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x5f, 0xe2, 0x9a, 0x2d, 0x04, 0x00, 0x00, + // 541 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x53, 0xc1, 0x6e, 0xd3, 0x40, + 0x10, 0x95, 0xd3, 0x34, 0x4d, 0x26, 0x69, 0x1a, 0x16, 0x5a, 0x59, 0x16, 0xa1, 0xc1, 0x1c, 0x08, + 0x1c, 0x6c, 0xda, 0x8a, 0x1b, 0x1c, 0x0a, 0x42, 0x0a, 0x48, 0xe5, 0xe0, 0x08, 0x0e, 0x5c, 0xac, + 0x8d, 0xbd, 0xd8, 0xab, 0xda, 0xbb, 0xcb, 0xee, 0x26, 0xd0, 0x5f, 0xe4, 0x0f, 0xf8, 0x1b, 0xe4, + 0xf5, 0xa6, 0xb2, 0x43, 0xe9, 0x6d, 0xe6, 0xcd, 0xec, 0xcb, 0xcb, 0x7b, 0x63, 0xf0, 0x28, 0xdb, + 0x70, 0x9a, 0x10, 0x25, 0x45, 0x12, 0x6e, 0xeb, 0x40, 0x48, 0xae, 0x39, 0x1a, 0x36, 0x66, 0xde, + 0xe3, 0x8c, 0xf3, 0xac, 0x20, 0x21, 0x16, 0x34, 0xc4, 0x8c, 0x71, 0x8d, 0x35, 0xe5, 0xcc, 0xae, + 0x7a, 0x03, 0x29, 0x92, 0xba, 0xf4, 0x5f, 0xc3, 0xe4, 0x3d, 0x66, 0x09, 0x29, 0x3e, 0xd6, 0xaf, + 0xaf, 0x54, 0x86, 0x9e, 0xc2, 0x48, 0xe0, 0x9b, 0x92, 0x30, 0x1d, 0xe7, 0x58, 0xe5, 0xae, 0x33, + 0x73, 0xe6, 0xa3, 0x68, 0x68, 0xb1, 0x05, 0x56, 0xb9, 0xff, 0x10, 0x1e, 0xb4, 0x9e, 0x45, 0x44, + 0x09, 0xff, 0x77, 0x07, 0x8e, 0x2f, 0xd3, 0x74, 0xc1, 0x8b, 0xf4, 0x16, 0xfe, 0xb1, 0x26, 0x4a, + 0x23, 0x04, 0xdd, 0x92, 0x94, 0xdc, 0x30, 0x0d, 0x22, 0x53, 0x57, 0x98, 0x61, 0xef, 0x18, 0x76, + 0x53, 0xa3, 0x47, 0xb0, 0xbf, 0xc1, 0xc5, 0x9a, 0xb8, 0x7b, 0x33, 0x67, 0xbe, 0x17, 0xd5, 0x0d, + 0x9a, 0x02, 0x98, 0x22, 0x2e, 0x15, 0xd6, 0x2e, 0x98, 0xd1, 0xc0, 0x20, 0x57, 0x0a, 0x6b, 0xf4, + 0x02, 0x26, 0x29, 0x51, 0x89, 0xa4, 0xa2, 0xfa, 0x8f, 0xb5, 0xe4, 0xae, 0x21, 0x3d, 0x6a, 0xe0, + 0x95, 0x6c, 0x74, 0x02, 0x3d, 0xf2, 0x4b, 0x50, 0x79, 0xe3, 0xee, 0x1b, 0x16, 0xdb, 0xa1, 0x67, + 0x70, 0xf8, 0x1d, 0x17, 0xc5, 0x0a, 0x27, 0xd7, 0x31, 0x4e, 0x53, 0xe9, 0xf6, 0x8c, 0xd0, 0xd1, + 0x16, 0xbc, 0x4c, 0x53, 0x89, 0x4e, 0x61, 0x98, 0x14, 0x7a, 0x13, 0x5b, 0x86, 0x83, 0x99, 0x33, + 0xef, 0x46, 0x50, 0x41, 0x1f, 0x6a, 0x96, 0x33, 0x18, 0x4a, 0xbe, 0xd6, 0x24, 0xce, 0x29, 0xd3, + 0xca, 0xed, 0xcf, 0xf6, 0xe6, 0xc3, 0xf3, 0x49, 0x50, 0xb0, 0xca, 0xee, 0xa8, 0x9a, 0x2c, 0x28, + 0xd3, 0x11, 0xc8, 0x6d, 0xa9, 0x90, 0x0b, 0x07, 0x42, 0xd2, 0x0d, 0xd6, 0xc4, 0x1d, 0xcc, 0x9c, + 0x79, 0x3f, 0xda, 0xb6, 0xfe, 0x5b, 0x40, 0xbb, 0x5e, 0x2a, 0x81, 0x9e, 0xc3, 0xd1, 0x36, 0x1a, + 0x59, 0x7b, 0x6b, 0x3d, 0x1d, 0x5b, 0xd8, 0x3a, 0xee, 0x07, 0x30, 0x59, 0x12, 0xad, 0x0b, 0xd2, + 0xc8, 0xd5, 0x83, 0xbe, 0x90, 0x84, 0x96, 0x38, 0x23, 0x36, 0xd3, 0xdb, 0xbe, 0x0a, 0xb4, 0xb5, + 0x6f, 0x02, 0x7d, 0x03, 0xd3, 0xe5, 0x7a, 0x55, 0x59, 0xb8, 0x22, 0x4b, 0xca, 0xb2, 0xc6, 0xb4, + 0xce, 0xf5, 0x18, 0x7a, 0x32, 0x6e, 0xa4, 0xb8, 0x2f, 0x2b, 0x9b, 0x3f, 0x75, 0xfb, 0xce, 0xa4, + 0x73, 0xfe, 0xa7, 0x03, 0x7d, 0xbb, 0xaf, 0xd0, 0x57, 0x38, 0xb9, 0x9b, 0x0a, 0xbd, 0x0c, 0x1a, + 0x87, 0x1b, 0xdc, 0xfb, 0x7b, 0xde, 0xd8, 0x9a, 0x69, 0xe1, 0x57, 0x0e, 0xfa, 0x0c, 0x87, 0xad, + 0x43, 0x44, 0xd3, 0x16, 0xdd, 0xee, 0x6d, 0x7b, 0x4f, 0xfe, 0x3f, 0x36, 0x06, 0x7f, 0x81, 0x71, + 0xdb, 0x76, 0xe4, 0xb7, 0x5e, 0xdc, 0x79, 0xdf, 0xde, 0xe9, 0xbd, 0x3b, 0x4a, 0x54, 0x32, 0x5b, + 0xf6, 0xee, 0xc8, 0xdc, 0x8d, 0x6a, 0x47, 0xe6, 0x3f, 0xc9, 0xbc, 0xbb, 0xf8, 0x76, 0x96, 0x51, + 0x9d, 0xaf, 0x57, 0x41, 0xc2, 0xcb, 0xb0, 0xa0, 0x59, 0xae, 0x19, 0x65, 0x19, 0x23, 0xfa, 0x27, + 0x97, 0xd7, 0x61, 0xc1, 0xd2, 0xd0, 0x38, 0x15, 0x36, 0x68, 0x56, 0x3d, 0xf3, 0xc9, 0x5f, 0xfc, + 0x0d, 0x00, 0x00, 0xff, 0xff, 0x56, 0x22, 0x58, 0xe6, 0x46, 0x04, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/lnrpc/invoicesrpc/invoices.proto b/lnrpc/invoicesrpc/invoices.proto index b0353b2b82..3c92802eae 100644 --- a/lnrpc/invoicesrpc/invoices.proto +++ b/lnrpc/invoicesrpc/invoices.proto @@ -15,33 +15,35 @@ service Invoices { to notify the client of state transitions of the specified invoice. Initially the current invoice state is always sent out. */ - rpc SubscribeSingleInvoice (SubscribeSingleInvoiceRequest) returns (stream lnrpc.Invoice); + rpc SubscribeSingleInvoice (SubscribeSingleInvoiceRequest) + returns (stream lnrpc.Invoice); /** - CancelInvoice cancels a currently open invoice. If the invoice is already + CancelInvoice cancels a currently open invoice. If the invoice is already canceled, this call will succeed. If the invoice is already settled, it will fail. */ - rpc CancelInvoice(CancelInvoiceMsg) returns (CancelInvoiceResp); + rpc CancelInvoice (CancelInvoiceMsg) returns (CancelInvoiceResp); /** AddHoldInvoice creates a hold invoice. It ties the invoice to the hash supplied in the request. */ - rpc AddHoldInvoice(AddHoldInvoiceRequest) returns (AddHoldInvoiceResp); - + rpc AddHoldInvoice (AddHoldInvoiceRequest) returns (AddHoldInvoiceResp); + /** SettleInvoice settles an accepted invoice. If the invoice is already settled, this call will succeed. */ - rpc SettleInvoice(SettleInvoiceMsg) returns (SettleInvoiceResp); + rpc SettleInvoice (SettleInvoiceMsg) returns (SettleInvoiceResp); } message CancelInvoiceMsg { /// Hash corresponding to the (hold) invoice to cancel. bytes payment_hash = 1; -} -message CancelInvoiceResp {} +} +message CancelInvoiceResp { +} message AddHoldInvoiceRequest { /** @@ -50,38 +52,49 @@ message AddHoldInvoiceRequest { field of the encoded payment request if the description_hash field is not being used. */ - string memo = 1 [json_name = "memo"]; + string memo = 1; /// The hash of the preimage - bytes hash = 2 [json_name = "hash"]; + bytes hash = 2; + + /** + The value of this invoice in satoshis + + The fields value and value_msat are mutually exclusive. + */ + int64 value = 3; + + /** + The value of this invoice in millisatoshis - /// The value of this invoice in satoshis - int64 value = 3 [json_name = "value"]; + The fields value and value_msat are mutually exclusive. + */ + int64 value_msat = 10; /** Hash (SHA-256) of a description of the payment. Used if the description of payment (memo) is too long to naturally fit within the description field of an encoded payment request. */ - bytes description_hash = 4 [json_name = "description_hash"]; + bytes description_hash = 4; /// Payment request expiry time in seconds. Default is 3600 (1 hour). - int64 expiry = 5 [json_name = "expiry"]; + int64 expiry = 5; /// Fallback on-chain address. - string fallback_addr = 6 [json_name = "fallback_addr"]; + string fallback_addr = 6; /// Delta to use for the time-lock of the CLTV extended to the final hop. - uint64 cltv_expiry = 7 [json_name = "cltv_expiry"]; + uint64 cltv_expiry = 7; /** Route hints that can each be individually used to assist in reaching the invoice's destination. */ - repeated lnrpc.RouteHint route_hints = 8 [json_name = "route_hints"]; + repeated lnrpc.RouteHint route_hints = 8; /// Whether this invoice should include routing hints for private channels. - bool private = 9 [json_name = "private"]; + bool private = 9; } message AddHoldInvoiceResp { @@ -90,19 +103,21 @@ message AddHoldInvoiceResp { details of the invoice, the sender has all the data necessary to send a payment to the recipient. */ - string payment_request = 1 [json_name = "payment_request"]; + string payment_request = 1; } message SettleInvoiceMsg { - /// Externally discovered pre-image that should be used to settle the hold invoice. + /// Externally discovered pre-image that should be used to settle the hold + /// invoice. bytes preimage = 1; -} +} -message SettleInvoiceResp {} +message SettleInvoiceResp { +} message SubscribeSingleInvoiceRequest { reserved 1; /// Hash corresponding to the (hold) invoice to subscribe to. - bytes r_hash = 2 [json_name = "r_hash"]; + bytes r_hash = 2; } diff --git a/lnrpc/invoicesrpc/invoices_server.go b/lnrpc/invoicesrpc/invoices_server.go index f7293efaa7..e915bc1a08 100644 --- a/lnrpc/invoicesrpc/invoices_server.go +++ b/lnrpc/invoicesrpc/invoices_server.go @@ -11,7 +11,6 @@ import ( "google.golang.org/grpc" "gopkg.in/macaroon-bakery.v2/bakery" - "github.com/btcsuite/btcutil" "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lntypes" @@ -247,13 +246,13 @@ func (s *Server) AddHoldInvoice(ctx context.Context, invoice *AddHoldInvoiceRequest) (*AddHoldInvoiceResp, error) { addInvoiceCfg := &AddInvoiceConfig{ - AddInvoice: s.cfg.InvoiceRegistry.AddInvoice, - IsChannelActive: s.cfg.IsChannelActive, - ChainParams: s.cfg.ChainParams, - NodeSigner: s.cfg.NodeSigner, - MaxPaymentMSat: s.cfg.MaxPaymentMSat, - DefaultCLTVExpiry: s.cfg.DefaultCLTVExpiry, - ChanDB: s.cfg.ChanDB, + AddInvoice: s.cfg.InvoiceRegistry.AddInvoice, + IsChannelActive: s.cfg.IsChannelActive, + ChainParams: s.cfg.ChainParams, + NodeSigner: s.cfg.NodeSigner, + DefaultCLTVExpiry: s.cfg.DefaultCLTVExpiry, + ChanDB: s.cfg.ChanDB, + GenInvoiceFeatures: s.cfg.GenInvoiceFeatures, } hash, err := lntypes.MakeHash(invoice.Hash) @@ -261,10 +260,15 @@ func (s *Server) AddHoldInvoice(ctx context.Context, return nil, err } + value, err := lnrpc.UnmarshallAmt(invoice.Value, invoice.ValueMsat) + if err != nil { + return nil, err + } + addInvoiceData := &AddInvoiceData{ Memo: invoice.Memo, Hash: &hash, - Value: btcutil.Amount(invoice.Value), + Value: value, DescriptionHash: invoice.DescriptionHash, Expiry: invoice.Expiry, FallbackAddr: invoice.FallbackAddr, diff --git a/lnrpc/invoicesrpc/utils.go b/lnrpc/invoicesrpc/utils.go index ea10d1fd82..1d3bab33c6 100644 --- a/lnrpc/invoicesrpc/utils.go +++ b/lnrpc/invoicesrpc/utils.go @@ -2,23 +2,52 @@ package invoicesrpc import ( "encoding/hex" + "errors" "fmt" "github.com/btcsuite/btcd/chaincfg" "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/zpay32" ) +// decodePayReq decodes the invoice payment request if present. This is needed, +// because not all information is stored in dedicated invoice fields. If there +// is no payment request present, a dummy request will be returned. This can +// happen with just-in-time inserted keysend invoices. +func decodePayReq(invoice *channeldb.Invoice, + activeNetParams *chaincfg.Params) (*zpay32.Invoice, error) { + + paymentRequest := string(invoice.PaymentRequest) + if paymentRequest == "" { + preimage := invoice.Terms.PaymentPreimage + if preimage == channeldb.UnknownPreimage { + return nil, errors.New("cannot reconstruct pay req") + } + hash := [32]byte(preimage.Hash()) + return &zpay32.Invoice{ + PaymentHash: &hash, + }, nil + } + + var err error + decoded, err := zpay32.Decode(paymentRequest, activeNetParams) + if err != nil { + return nil, fmt.Errorf("unable to decode payment "+ + "request: %v", err) + } + return decoded, nil + +} + // CreateRPCInvoice creates an *lnrpc.Invoice from the *channeldb.Invoice. func CreateRPCInvoice(invoice *channeldb.Invoice, activeNetParams *chaincfg.Params) (*lnrpc.Invoice, error) { - paymentRequest := string(invoice.PaymentRequest) - decoded, err := zpay32.Decode(paymentRequest, activeNetParams) + decoded, err := decodePayReq(invoice, activeNetParams) if err != nil { - return nil, fmt.Errorf("unable to decode payment request: %v", - err) + return nil, err } var descHash []byte @@ -43,10 +72,10 @@ func CreateRPCInvoice(invoice *channeldb.Invoice, satAmt := invoice.Terms.Value.ToSatoshis() satAmtPaid := invoice.AmtPaid.ToSatoshis() - isSettled := invoice.Terms.State == channeldb.ContractSettled + isSettled := invoice.State == channeldb.ContractSettled var state lnrpc.Invoice_InvoiceState - switch invoice.Terms.State { + switch invoice.State { case channeldb.ContractOpen: state = lnrpc.Invoice_OPEN case channeldb.ContractSettled: @@ -57,7 +86,7 @@ func CreateRPCInvoice(invoice *channeldb.Invoice, state = lnrpc.Invoice_ACCEPTED default: return nil, fmt.Errorf("unknown invoice state %v", - invoice.Terms.State) + invoice.State) } rpcHtlcs := make([]*lnrpc.InvoiceHTLC, 0, len(invoice.Htlcs)) @@ -75,13 +104,15 @@ func CreateRPCInvoice(invoice *channeldb.Invoice, } rpcHtlc := lnrpc.InvoiceHTLC{ - ChanId: key.ChanID.ToUint64(), - HtlcIndex: key.HtlcID, - AcceptHeight: int32(htlc.AcceptHeight), - AcceptTime: htlc.AcceptTime.Unix(), - ExpiryHeight: int32(htlc.Expiry), - AmtMsat: uint64(htlc.Amt), - State: state, + ChanId: key.ChanID.ToUint64(), + HtlcIndex: key.HtlcID, + AcceptHeight: int32(htlc.AcceptHeight), + AcceptTime: htlc.AcceptTime.Unix(), + ExpiryHeight: int32(htlc.Expiry), + AmtMsat: uint64(htlc.Amt), + State: state, + CustomRecords: htlc.CustomRecords, + MppTotalAmtMsat: uint64(htlc.MppTotalAmt), } // Only report resolved times if htlc is resolved. @@ -94,16 +125,16 @@ func CreateRPCInvoice(invoice *channeldb.Invoice, rpcInvoice := &lnrpc.Invoice{ Memo: string(invoice.Memo[:]), - Receipt: invoice.Receipt[:], RHash: decoded.PaymentHash[:], Value: int64(satAmt), + ValueMsat: int64(invoice.Terms.Value), CreationDate: invoice.CreationDate.Unix(), SettleDate: settleDate, Settled: isSettled, - PaymentRequest: paymentRequest, + PaymentRequest: string(invoice.PaymentRequest), DescriptionHash: descHash, - Expiry: int64(invoice.Expiry.Seconds()), - CltvExpiry: uint64(invoice.FinalCltvDelta), + Expiry: int64(invoice.Terms.Expiry.Seconds()), + CltvExpiry: uint64(invoice.Terms.FinalCltvDelta), FallbackAddr: fallbackAddr, RouteHints: routeHints, AddIndex: invoice.AddIndex, @@ -114,6 +145,8 @@ func CreateRPCInvoice(invoice *channeldb.Invoice, AmtPaid: int64(invoice.AmtPaid), State: state, Htlcs: rpcHtlcs, + Features: CreateRPCFeatures(invoice.Terms.Features), + IsKeysend: len(invoice.PaymentRequest) == 0, } if preimage != channeldb.UnknownPreimage { @@ -123,6 +156,25 @@ func CreateRPCInvoice(invoice *channeldb.Invoice, return rpcInvoice, nil } +// CreateRPCFeatures maps a feature vector into a list of lnrpc.Features. +func CreateRPCFeatures(fv *lnwire.FeatureVector) map[uint32]*lnrpc.Feature { + if fv == nil { + return nil + } + + features := fv.Features() + rpcFeatures := make(map[uint32]*lnrpc.Feature, len(features)) + for bit := range features { + rpcFeatures[uint32(bit)] = &lnrpc.Feature{ + Name: fv.Name(bit), + IsRequired: bit.IsRequired(), + IsKnown: fv.IsKnown(bit), + } + } + + return rpcFeatures +} + // CreateRPCRouteHints takes in the decoded form of an invoice's route hints // and converts them into the lnrpc type. func CreateRPCRouteHints(routeHints [][]zpay32.HopHint) []*lnrpc.RouteHint { diff --git a/lnrpc/lnclipb/lncli.pb.go b/lnrpc/lnclipb/lncli.pb.go new file mode 100644 index 0000000000..f6174d3d2d --- /dev/null +++ b/lnrpc/lnclipb/lncli.pb.go @@ -0,0 +1,91 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: lnclipb/lncli.proto + +package lnclipb + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + verrpc "github.com/lightningnetwork/lnd/lnrpc/verrpc" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type VersionResponse struct { + /// The version information for lncli. + Lncli *verrpc.Version `protobuf:"bytes,1,opt,name=lncli,proto3" json:"lncli,omitempty"` + /// The version information for lnd. + Lnd *verrpc.Version `protobuf:"bytes,2,opt,name=lnd,proto3" json:"lnd,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VersionResponse) Reset() { *m = VersionResponse{} } +func (m *VersionResponse) String() string { return proto.CompactTextString(m) } +func (*VersionResponse) ProtoMessage() {} +func (*VersionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_88b54c9c61b986c4, []int{0} +} + +func (m *VersionResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VersionResponse.Unmarshal(m, b) +} +func (m *VersionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VersionResponse.Marshal(b, m, deterministic) +} +func (m *VersionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_VersionResponse.Merge(m, src) +} +func (m *VersionResponse) XXX_Size() int { + return xxx_messageInfo_VersionResponse.Size(m) +} +func (m *VersionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_VersionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_VersionResponse proto.InternalMessageInfo + +func (m *VersionResponse) GetLncli() *verrpc.Version { + if m != nil { + return m.Lncli + } + return nil +} + +func (m *VersionResponse) GetLnd() *verrpc.Version { + if m != nil { + return m.Lnd + } + return nil +} + +func init() { + proto.RegisterType((*VersionResponse)(nil), "lnclipb.VersionResponse") +} + +func init() { proto.RegisterFile("lnclipb/lncli.proto", fileDescriptor_88b54c9c61b986c4) } + +var fileDescriptor_88b54c9c61b986c4 = []byte{ + // 159 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xce, 0xc9, 0x4b, 0xce, + 0xc9, 0x2c, 0x48, 0xd2, 0x07, 0xd3, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0xec, 0x50, 0x41, + 0x29, 0xe1, 0xb2, 0xd4, 0xa2, 0xa2, 0x82, 0x64, 0x7d, 0x08, 0x05, 0x91, 0x55, 0x8a, 0xe6, 0xe2, + 0x0f, 0x4b, 0x2d, 0x2a, 0xce, 0xcc, 0xcf, 0x0b, 0x4a, 0x2d, 0x2e, 0xc8, 0xcf, 0x2b, 0x4e, 0x15, + 0x52, 0xe5, 0x62, 0x05, 0x6b, 0x91, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x36, 0xe2, 0xd7, 0x83, 0x6a, + 0x80, 0xa9, 0x83, 0xc8, 0x0a, 0x29, 0x72, 0x31, 0xe7, 0xe4, 0xa5, 0x48, 0x30, 0x61, 0x57, 0x04, + 0x92, 0x73, 0xd2, 0x8f, 0xd2, 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, + 0xcf, 0xc9, 0x4c, 0xcf, 0x28, 0xc9, 0xcb, 0xcc, 0x4b, 0xcf, 0x4b, 0x2d, 0x29, 0xcf, 0x2f, 0xca, + 0xd6, 0xcf, 0xc9, 0x4b, 0xd1, 0xcf, 0xc9, 0x03, 0x39, 0x09, 0xea, 0xc4, 0x24, 0x36, 0xb0, 0xa3, + 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x4d, 0xfc, 0x5e, 0x89, 0xc9, 0x00, 0x00, 0x00, +} diff --git a/lnrpc/lnclipb/lncli.proto b/lnrpc/lnclipb/lncli.proto new file mode 100644 index 0000000000..37286fd98b --- /dev/null +++ b/lnrpc/lnclipb/lncli.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; + +import "verrpc/verrpc.proto"; + +package lnclipb; + +option go_package = "github.com/lightningnetwork/lnd/lnrpc/lnclipb"; + +message VersionResponse { + /// The version information for lncli. + verrpc.Version lncli = 1; + + /// The version information for lnd. + verrpc.Version lnd = 2; +}; diff --git a/lnrpc/marshall_utils.go b/lnrpc/marshall_utils.go new file mode 100644 index 0000000000..937dee0677 --- /dev/null +++ b/lnrpc/marshall_utils.go @@ -0,0 +1,56 @@ +package lnrpc + +import ( + "errors" + + "github.com/btcsuite/btcutil" + "github.com/lightningnetwork/lnd/lnwire" +) + +var ( + // ErrSatMsatMutualExclusive is returned when both a sat and an msat + // amount are set. + ErrSatMsatMutualExclusive = errors.New( + "sat and msat arguments are mutually exclusive", + ) +) + +// CalculateFeeLimit returns the fee limit in millisatoshis. If a percentage +// based fee limit has been requested, we'll factor in the ratio provided with +// the amount of the payment. +func CalculateFeeLimit(feeLimit *FeeLimit, + amount lnwire.MilliSatoshi) lnwire.MilliSatoshi { + + switch feeLimit.GetLimit().(type) { + + case *FeeLimit_Fixed: + return lnwire.NewMSatFromSatoshis( + btcutil.Amount(feeLimit.GetFixed()), + ) + + case *FeeLimit_FixedMsat: + return lnwire.MilliSatoshi(feeLimit.GetFixedMsat()) + + case *FeeLimit_Percent: + return amount * lnwire.MilliSatoshi(feeLimit.GetPercent()) / 100 + + default: + // If a fee limit was not specified, we'll use the payment's + // amount as an upper bound in order to avoid payment attempts + // from incurring fees higher than the payment amount itself. + return amount + } +} + +// UnmarshallAmt returns a strong msat type for a sat/msat pair of rpc fields. +func UnmarshallAmt(amtSat, amtMsat int64) (lnwire.MilliSatoshi, error) { + if amtSat != 0 && amtMsat != 0 { + return 0, ErrSatMsatMutualExclusive + } + + if amtSat != 0 { + return lnwire.NewMSatFromSatoshis(btcutil.Amount(amtSat)), nil + } + + return lnwire.MilliSatoshi(amtMsat), nil +} diff --git a/lnrpc/routerrpc/config.go b/lnrpc/routerrpc/config.go index ba094c8f01..045561dbfd 100644 --- a/lnrpc/routerrpc/config.go +++ b/lnrpc/routerrpc/config.go @@ -1,31 +1,69 @@ package routerrpc import ( - "time" - - "github.com/btcsuite/btcutil" + "github.com/lightningnetwork/lnd/macaroons" + "github.com/lightningnetwork/lnd/routing" ) -// RoutingConfig contains the configurable parameters that control routing. -type RoutingConfig struct { - // MinRouteProbability is the minimum required route success probability - // to attempt the payment. - MinRouteProbability float64 `long:"minrtprob" description:"Minimum required route success probability to attempt the payment"` +// Config is the main configuration file for the router RPC server. It contains +// all the items required for the router RPC server to carry out its duties. +// The fields with struct tags are meant to be parsed as normal configuration +// options, while if able to be populated, the latter fields MUST also be +// specified. +type Config struct { + RoutingConfig + + // RouterMacPath is the path for the router macaroon. If unspecified + // then we assume that the macaroon will be found under the network + // directory, named DefaultRouterMacFilename. + RouterMacPath string `long:"routermacaroonpath" description:"Path to the router macaroon"` - // AprioriHopProbability is the assumed success probability of a hop in - // a route when no other information is available. - AprioriHopProbability float64 `long:"apriorihopprob" description:"Assumed success probability of a hop in a route when no other information is available."` + // NetworkDir is the main network directory wherein the router rpc + // server will find the macaroon named DefaultRouterMacFilename. + NetworkDir string - // PenaltyHalfLife defines after how much time a penalized node or - // channel is back at 50% probability. - PenaltyHalfLife time.Duration `long:"penaltyhalflife" description:"Defines the duration after which a penalized node or channel is back at 50% probability"` + // MacService is the main macaroon service that we'll use to handle + // authentication for the Router rpc server. + MacService *macaroons.Service - // AttemptCost is the virtual cost in path finding weight units of - // executing a payment attempt that fails. It is used to trade off - // potentially better routes against their probability of succeeding. - AttemptCost btcutil.Amount `long:"attemptcost" description:"The (virtual) cost in sats of a failed payment attempt"` + // Router is the main channel router instance that backs this RPC + // server. + // + // TODO(roasbeef): make into pkg lvl interface? + // + // TODO(roasbeef): assumes router handles saving payment state + Router *routing.ChannelRouter + + // RouterBackend contains shared logic between this sub server and the + // main rpc server. + RouterBackend *RouterBackend +} + +// DefaultConfig defines the config defaults. +func DefaultConfig() *Config { + defaultRoutingConfig := RoutingConfig{ + AprioriHopProbability: routing.DefaultAprioriHopProbability, + AprioriWeight: routing.DefaultAprioriWeight, + MinRouteProbability: routing.DefaultMinRouteProbability, + PenaltyHalfLife: routing.DefaultPenaltyHalfLife, + AttemptCost: routing.DefaultPaymentAttemptPenalty. + ToSatoshis(), + MaxMcHistory: routing.DefaultMaxMcHistory, + } + + return &Config{ + RoutingConfig: defaultRoutingConfig, + } +} - // MaxMcHistory defines the maximum number of payment results that - // are held on disk by mission control. - MaxMcHistory int `long:"maxmchistory" description:"the maximum number of payment results that are held on disk by mission control"` +// GetRoutingConfig returns the routing config based on this sub server config. +func GetRoutingConfig(cfg *Config) *RoutingConfig { + return &RoutingConfig{ + AprioriHopProbability: cfg.AprioriHopProbability, + AprioriWeight: cfg.AprioriWeight, + MinRouteProbability: cfg.MinRouteProbability, + AttemptCost: cfg.AttemptCost, + PenaltyHalfLife: cfg.PenaltyHalfLife, + MaxMcHistory: cfg.MaxMcHistory, + } } diff --git a/lnrpc/routerrpc/config_active.go b/lnrpc/routerrpc/config_active.go deleted file mode 100644 index 0479211ca3..0000000000 --- a/lnrpc/routerrpc/config_active.go +++ /dev/null @@ -1,69 +0,0 @@ -// +build routerrpc - -package routerrpc - -import ( - "github.com/lightningnetwork/lnd/macaroons" - "github.com/lightningnetwork/lnd/routing" -) - -// Config is the main configuration file for the router RPC server. It contains -// all the items required for the router RPC server to carry out its duties. -// The fields with struct tags are meant to be parsed as normal configuration -// options, while if able to be populated, the latter fields MUST also be -// specified. -type Config struct { - RoutingConfig - - // RouterMacPath is the path for the router macaroon. If unspecified - // then we assume that the macaroon will be found under the network - // directory, named DefaultRouterMacFilename. - RouterMacPath string `long:"routermacaroonpath" description:"Path to the router macaroon"` - - // NetworkDir is the main network directory wherein the router rpc - // server will find the macaroon named DefaultRouterMacFilename. - NetworkDir string - - // MacService is the main macaroon service that we'll use to handle - // authentication for the Router rpc server. - MacService *macaroons.Service - - // Router is the main channel router instance that backs this RPC - // server. - // - // TODO(roasbeef): make into pkg lvl interface? - // - // TODO(roasbeef): assumes router handles saving payment state - Router *routing.ChannelRouter - - // RouterBackend contains shared logic between this sub server and the - // main rpc server. - RouterBackend *RouterBackend -} - -// DefaultConfig defines the config defaults. -func DefaultConfig() *Config { - defaultRoutingConfig := RoutingConfig{ - AprioriHopProbability: routing.DefaultAprioriHopProbability, - MinRouteProbability: routing.DefaultMinRouteProbability, - PenaltyHalfLife: routing.DefaultPenaltyHalfLife, - AttemptCost: routing.DefaultPaymentAttemptPenalty. - ToSatoshis(), - MaxMcHistory: routing.DefaultMaxMcHistory, - } - - return &Config{ - RoutingConfig: defaultRoutingConfig, - } -} - -// GetRoutingConfig returns the routing config based on this sub server config. -func GetRoutingConfig(cfg *Config) *RoutingConfig { - return &RoutingConfig{ - AprioriHopProbability: cfg.AprioriHopProbability, - MinRouteProbability: cfg.MinRouteProbability, - AttemptCost: cfg.AttemptCost, - PenaltyHalfLife: cfg.PenaltyHalfLife, - MaxMcHistory: cfg.MaxMcHistory, - } -} diff --git a/lnrpc/routerrpc/config_default.go b/lnrpc/routerrpc/config_default.go deleted file mode 100644 index f8af98f348..0000000000 --- a/lnrpc/routerrpc/config_default.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build !routerrpc - -package routerrpc - -import "github.com/lightningnetwork/lnd/routing" - -// Config is the default config struct for the package. When the build tag isn't -// specified, then we output a blank config. -type Config struct{} - -// DefaultConfig defines the config defaults. Without the sub server enabled, -// there are no defaults to set. -func DefaultConfig() *Config { - return &Config{} -} - -// GetRoutingConfig returns the routing config based on this sub server config. -func GetRoutingConfig(cfg *Config) *RoutingConfig { - return &RoutingConfig{ - AprioriHopProbability: routing.DefaultAprioriHopProbability, - MinRouteProbability: routing.DefaultMinRouteProbability, - AttemptCost: routing.DefaultPaymentAttemptPenalty. - ToSatoshis(), - PenaltyHalfLife: routing.DefaultPenaltyHalfLife, - MaxMcHistory: routing.DefaultMaxMcHistory, - } -} diff --git a/lnrpc/routerrpc/driver.go b/lnrpc/routerrpc/driver.go index 05cc0c4b00..e174e2222e 100644 --- a/lnrpc/routerrpc/driver.go +++ b/lnrpc/routerrpc/driver.go @@ -1,5 +1,3 @@ -// +build routerrpc - package routerrpc import ( diff --git a/lnrpc/routerrpc/router.pb.go b/lnrpc/routerrpc/router.pb.go index 317c5717cc..ae57eacdf9 100644 --- a/lnrpc/routerrpc/router.pb.go +++ b/lnrpc/routerrpc/router.pb.go @@ -23,6 +23,94 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +type FailureDetail int32 + +const ( + FailureDetail_UNKNOWN FailureDetail = 0 + FailureDetail_NO_DETAIL FailureDetail = 1 + FailureDetail_ONION_DECODE FailureDetail = 2 + FailureDetail_LINK_NOT_ELIGIBLE FailureDetail = 3 + FailureDetail_ON_CHAIN_TIMEOUT FailureDetail = 4 + FailureDetail_HTLC_EXCEEDS_MAX FailureDetail = 5 + FailureDetail_INSUFFICIENT_BALANCE FailureDetail = 6 + FailureDetail_INCOMPLETE_FORWARD FailureDetail = 7 + FailureDetail_HTLC_ADD_FAILED FailureDetail = 8 + FailureDetail_FORWARDS_DISABLED FailureDetail = 9 + FailureDetail_INVOICE_CANCELED FailureDetail = 10 + FailureDetail_INVOICE_UNDERPAID FailureDetail = 11 + FailureDetail_INVOICE_EXPIRY_TOO_SOON FailureDetail = 12 + FailureDetail_INVOICE_NOT_OPEN FailureDetail = 13 + FailureDetail_MPP_INVOICE_TIMEOUT FailureDetail = 14 + FailureDetail_ADDRESS_MISMATCH FailureDetail = 15 + FailureDetail_SET_TOTAL_MISMATCH FailureDetail = 16 + FailureDetail_SET_TOTAL_TOO_LOW FailureDetail = 17 + FailureDetail_SET_OVERPAID FailureDetail = 18 + FailureDetail_UNKNOWN_INVOICE FailureDetail = 19 + FailureDetail_INVALID_KEYSEND FailureDetail = 20 + FailureDetail_MPP_IN_PROGRESS FailureDetail = 21 + FailureDetail_CIRCULAR_ROUTE FailureDetail = 22 +) + +var FailureDetail_name = map[int32]string{ + 0: "UNKNOWN", + 1: "NO_DETAIL", + 2: "ONION_DECODE", + 3: "LINK_NOT_ELIGIBLE", + 4: "ON_CHAIN_TIMEOUT", + 5: "HTLC_EXCEEDS_MAX", + 6: "INSUFFICIENT_BALANCE", + 7: "INCOMPLETE_FORWARD", + 8: "HTLC_ADD_FAILED", + 9: "FORWARDS_DISABLED", + 10: "INVOICE_CANCELED", + 11: "INVOICE_UNDERPAID", + 12: "INVOICE_EXPIRY_TOO_SOON", + 13: "INVOICE_NOT_OPEN", + 14: "MPP_INVOICE_TIMEOUT", + 15: "ADDRESS_MISMATCH", + 16: "SET_TOTAL_MISMATCH", + 17: "SET_TOTAL_TOO_LOW", + 18: "SET_OVERPAID", + 19: "UNKNOWN_INVOICE", + 20: "INVALID_KEYSEND", + 21: "MPP_IN_PROGRESS", + 22: "CIRCULAR_ROUTE", +} + +var FailureDetail_value = map[string]int32{ + "UNKNOWN": 0, + "NO_DETAIL": 1, + "ONION_DECODE": 2, + "LINK_NOT_ELIGIBLE": 3, + "ON_CHAIN_TIMEOUT": 4, + "HTLC_EXCEEDS_MAX": 5, + "INSUFFICIENT_BALANCE": 6, + "INCOMPLETE_FORWARD": 7, + "HTLC_ADD_FAILED": 8, + "FORWARDS_DISABLED": 9, + "INVOICE_CANCELED": 10, + "INVOICE_UNDERPAID": 11, + "INVOICE_EXPIRY_TOO_SOON": 12, + "INVOICE_NOT_OPEN": 13, + "MPP_INVOICE_TIMEOUT": 14, + "ADDRESS_MISMATCH": 15, + "SET_TOTAL_MISMATCH": 16, + "SET_TOTAL_TOO_LOW": 17, + "SET_OVERPAID": 18, + "UNKNOWN_INVOICE": 19, + "INVALID_KEYSEND": 20, + "MPP_IN_PROGRESS": 21, + "CIRCULAR_ROUTE": 22, +} + +func (x FailureDetail) String() string { + return proto.EnumName(FailureDetail_name, int32(x)) +} + +func (FailureDetail) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_7a0613f69d37b0a5, []int{0} +} + type PaymentState int32 const ( @@ -46,6 +134,9 @@ const ( //Payment details incorrect (unknown hash, invalid amt or //invalid final cltv delta) PaymentState_FAILED_INCORRECT_PAYMENT_DETAILS PaymentState = 5 + //* + //Insufficient local balance. + PaymentState_FAILED_INSUFFICIENT_BALANCE PaymentState = 6 ) var PaymentState_name = map[int32]string{ @@ -55,6 +146,7 @@ var PaymentState_name = map[int32]string{ 3: "FAILED_NO_ROUTE", 4: "FAILED_ERROR", 5: "FAILED_INCORRECT_PAYMENT_DETAILS", + 6: "FAILED_INSUFFICIENT_BALANCE", } var PaymentState_value = map[string]int32{ @@ -64,6 +156,7 @@ var PaymentState_value = map[string]int32{ "FAILED_NO_ROUTE": 3, "FAILED_ERROR": 4, "FAILED_INCORRECT_PAYMENT_DETAILS": 5, + "FAILED_INSUFFICIENT_BALANCE": 6, } func (x PaymentState) String() string { @@ -71,117 +164,53 @@ func (x PaymentState) String() string { } func (PaymentState) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{0} + return fileDescriptor_7a0613f69d37b0a5, []int{1} } -type Failure_FailureCode int32 +type HtlcEvent_EventType int32 const ( - //* - //The numbers assigned in this enumeration match the failure codes as - //defined in BOLT #4. Because protobuf 3 requires enums to start with 0, - //a RESERVED value is added. - Failure_RESERVED Failure_FailureCode = 0 - Failure_INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS Failure_FailureCode = 1 - Failure_INCORRECT_PAYMENT_AMOUNT Failure_FailureCode = 2 - Failure_FINAL_INCORRECT_CLTV_EXPIRY Failure_FailureCode = 3 - Failure_FINAL_INCORRECT_HTLC_AMOUNT Failure_FailureCode = 4 - Failure_FINAL_EXPIRY_TOO_SOON Failure_FailureCode = 5 - Failure_INVALID_REALM Failure_FailureCode = 6 - Failure_EXPIRY_TOO_SOON Failure_FailureCode = 7 - Failure_INVALID_ONION_VERSION Failure_FailureCode = 8 - Failure_INVALID_ONION_HMAC Failure_FailureCode = 9 - Failure_INVALID_ONION_KEY Failure_FailureCode = 10 - Failure_AMOUNT_BELOW_MINIMUM Failure_FailureCode = 11 - Failure_FEE_INSUFFICIENT Failure_FailureCode = 12 - Failure_INCORRECT_CLTV_EXPIRY Failure_FailureCode = 13 - Failure_CHANNEL_DISABLED Failure_FailureCode = 14 - Failure_TEMPORARY_CHANNEL_FAILURE Failure_FailureCode = 15 - Failure_REQUIRED_NODE_FEATURE_MISSING Failure_FailureCode = 16 - Failure_REQUIRED_CHANNEL_FEATURE_MISSING Failure_FailureCode = 17 - Failure_UNKNOWN_NEXT_PEER Failure_FailureCode = 18 - Failure_TEMPORARY_NODE_FAILURE Failure_FailureCode = 19 - Failure_PERMANENT_NODE_FAILURE Failure_FailureCode = 20 - Failure_PERMANENT_CHANNEL_FAILURE Failure_FailureCode = 21 - Failure_EXPIRY_TOO_FAR Failure_FailureCode = 22 - //* - //The error source is known, but the failure itself couldn't be decoded. - Failure_UNKNOWN_FAILURE Failure_FailureCode = 998 - //* - //An unreadable failure result is returned if the received failure message - //cannot be decrypted. In that case the error source is unknown. - Failure_UNREADABLE_FAILURE Failure_FailureCode = 999 + HtlcEvent_UNKNOWN HtlcEvent_EventType = 0 + HtlcEvent_SEND HtlcEvent_EventType = 1 + HtlcEvent_RECEIVE HtlcEvent_EventType = 2 + HtlcEvent_FORWARD HtlcEvent_EventType = 3 ) -var Failure_FailureCode_name = map[int32]string{ - 0: "RESERVED", - 1: "INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS", - 2: "INCORRECT_PAYMENT_AMOUNT", - 3: "FINAL_INCORRECT_CLTV_EXPIRY", - 4: "FINAL_INCORRECT_HTLC_AMOUNT", - 5: "FINAL_EXPIRY_TOO_SOON", - 6: "INVALID_REALM", - 7: "EXPIRY_TOO_SOON", - 8: "INVALID_ONION_VERSION", - 9: "INVALID_ONION_HMAC", - 10: "INVALID_ONION_KEY", - 11: "AMOUNT_BELOW_MINIMUM", - 12: "FEE_INSUFFICIENT", - 13: "INCORRECT_CLTV_EXPIRY", - 14: "CHANNEL_DISABLED", - 15: "TEMPORARY_CHANNEL_FAILURE", - 16: "REQUIRED_NODE_FEATURE_MISSING", - 17: "REQUIRED_CHANNEL_FEATURE_MISSING", - 18: "UNKNOWN_NEXT_PEER", - 19: "TEMPORARY_NODE_FAILURE", - 20: "PERMANENT_NODE_FAILURE", - 21: "PERMANENT_CHANNEL_FAILURE", - 22: "EXPIRY_TOO_FAR", - 998: "UNKNOWN_FAILURE", - 999: "UNREADABLE_FAILURE", -} - -var Failure_FailureCode_value = map[string]int32{ - "RESERVED": 0, - "INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS": 1, - "INCORRECT_PAYMENT_AMOUNT": 2, - "FINAL_INCORRECT_CLTV_EXPIRY": 3, - "FINAL_INCORRECT_HTLC_AMOUNT": 4, - "FINAL_EXPIRY_TOO_SOON": 5, - "INVALID_REALM": 6, - "EXPIRY_TOO_SOON": 7, - "INVALID_ONION_VERSION": 8, - "INVALID_ONION_HMAC": 9, - "INVALID_ONION_KEY": 10, - "AMOUNT_BELOW_MINIMUM": 11, - "FEE_INSUFFICIENT": 12, - "INCORRECT_CLTV_EXPIRY": 13, - "CHANNEL_DISABLED": 14, - "TEMPORARY_CHANNEL_FAILURE": 15, - "REQUIRED_NODE_FEATURE_MISSING": 16, - "REQUIRED_CHANNEL_FEATURE_MISSING": 17, - "UNKNOWN_NEXT_PEER": 18, - "TEMPORARY_NODE_FAILURE": 19, - "PERMANENT_NODE_FAILURE": 20, - "PERMANENT_CHANNEL_FAILURE": 21, - "EXPIRY_TOO_FAR": 22, - "UNKNOWN_FAILURE": 998, - "UNREADABLE_FAILURE": 999, -} - -func (x Failure_FailureCode) String() string { - return proto.EnumName(Failure_FailureCode_name, int32(x)) -} - -func (Failure_FailureCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{7, 0} +var HtlcEvent_EventType_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SEND", + 2: "RECEIVE", + 3: "FORWARD", +} + +var HtlcEvent_EventType_value = map[string]int32{ + "UNKNOWN": 0, + "SEND": 1, + "RECEIVE": 2, + "FORWARD": 3, +} + +func (x HtlcEvent_EventType) String() string { + return proto.EnumName(HtlcEvent_EventType_name, int32(x)) +} + +func (HtlcEvent_EventType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_7a0613f69d37b0a5, []int{17, 0} } type SendPaymentRequest struct { /// The identity pubkey of the payment recipient Dest []byte `protobuf:"bytes,1,opt,name=dest,proto3" json:"dest,omitempty"` - /// Number of satoshis to send. + //* + //Number of satoshis to send. + // + //The fields amt and amt_msat are mutually exclusive. Amt int64 `protobuf:"varint,2,opt,name=amt,proto3" json:"amt,omitempty"` + //* + //Number of millisatoshis to send. + // + //The fields amt and amt_msat are mutually exclusive. + AmtMsat int64 `protobuf:"varint,12,opt,name=amt_msat,json=amtMsat,proto3" json:"amt_msat,omitempty"` /// The hash to use within the payment's HTLC PaymentHash []byte `protobuf:"bytes,3,opt,name=payment_hash,json=paymentHash,proto3" json:"payment_hash,omitempty"` //* @@ -206,27 +235,60 @@ type SendPaymentRequest struct { //If this field is left to the default value of 0, only zero-fee routes will //be considered. This usually means single hop routes connecting directly to //the destination. To send the payment without a fee limit, use max int here. + // + //The fields fee_limit_sat and fee_limit_msat are mutually exclusive. FeeLimitSat int64 `protobuf:"varint,7,opt,name=fee_limit_sat,json=feeLimitSat,proto3" json:"fee_limit_sat,omitempty"` //* + //The maximum number of millisatoshis that will be paid as a fee of the + //payment. If this field is left to the default value of 0, only zero-fee + //routes will be considered. This usually means single hop routes connecting + //directly to the destination. To send the payment without a fee limit, use + //max int here. + // + //The fields fee_limit_sat and fee_limit_msat are mutually exclusive. + FeeLimitMsat int64 `protobuf:"varint,13,opt,name=fee_limit_msat,json=feeLimitMsat,proto3" json:"fee_limit_msat,omitempty"` + //* //The channel id of the channel that must be taken to the first hop. If zero, //any channel may be used. OutgoingChanId uint64 `protobuf:"varint,8,opt,name=outgoing_chan_id,json=outgoingChanId,proto3" json:"outgoing_chan_id,omitempty"` //* + //The pubkey of the last hop of the route. If empty, any hop may be used. + LastHopPubkey []byte `protobuf:"bytes,14,opt,name=last_hop_pubkey,json=lastHopPubkey,proto3" json:"last_hop_pubkey,omitempty"` + //* //An optional maximum total time lock for the route. This should not exceed //lnd's `--max-cltv-expiry` setting. If zero, then the value of //`--max-cltv-expiry` is enforced. CltvLimit int32 `protobuf:"varint,9,opt,name=cltv_limit,json=cltvLimit,proto3" json:"cltv_limit,omitempty"` //* //Optional route hints to reach the destination through private channels. - RouteHints []*lnrpc.RouteHint `protobuf:"bytes,10,rep,name=route_hints,proto3" json:"route_hints,omitempty"` + RouteHints []*lnrpc.RouteHint `protobuf:"bytes,10,rep,name=route_hints,json=routeHints,proto3" json:"route_hints,omitempty"` //* //An optional field that can be used to pass an arbitrary set of TLV records //to a peer which understands the new records. This can be used to pass - //application specific data during the payment attempt. - DestTlv map[uint64][]byte `protobuf:"bytes,11,rep,name=dest_tlv,json=destTlv,proto3" json:"dest_tlv,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + //application specific data during the payment attempt. Record types are + //required to be in the custom range >= 65536. When using REST, the values + //must be encoded as base64. + DestCustomRecords map[uint64][]byte `protobuf:"bytes,11,rep,name=dest_custom_records,json=destCustomRecords,proto3" json:"dest_custom_records,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + /// If set, circular payments to self are permitted. + AllowSelfPayment bool `protobuf:"varint,15,opt,name=allow_self_payment,json=allowSelfPayment,proto3" json:"allow_self_payment,omitempty"` + //* + //Features assumed to be supported by the final node. All transitive feature + //dependencies must also be set properly. For a given feature bit pair, either + //optional or remote may be set, but not both. If this field is nil or empty, + //the router will try to load destination features from the graph as a + //fallback. + DestFeatures []lnrpc.FeatureBit `protobuf:"varint,16,rep,packed,name=dest_features,json=destFeatures,proto3,enum=lnrpc.FeatureBit" json:"dest_features,omitempty"` + //* + //The maximum number of partial payments that may be use to complete the full + //amount. + MaxParts uint32 `protobuf:"varint,17,opt,name=max_parts,json=maxParts,proto3" json:"max_parts,omitempty"` + //* + //If set, only the final payment update is streamed back. Intermediate updates + //that show which htlcs are still in flight are suppressed. + NoInflightUpdates bool `protobuf:"varint,18,opt,name=no_inflight_updates,json=noInflightUpdates,proto3" json:"no_inflight_updates,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *SendPaymentRequest) Reset() { *m = SendPaymentRequest{} } @@ -268,6 +330,13 @@ func (m *SendPaymentRequest) GetAmt() int64 { return 0 } +func (m *SendPaymentRequest) GetAmtMsat() int64 { + if m != nil { + return m.AmtMsat + } + return 0 +} + func (m *SendPaymentRequest) GetPaymentHash() []byte { if m != nil { return m.PaymentHash @@ -303,6 +372,13 @@ func (m *SendPaymentRequest) GetFeeLimitSat() int64 { return 0 } +func (m *SendPaymentRequest) GetFeeLimitMsat() int64 { + if m != nil { + return m.FeeLimitMsat + } + return 0 +} + func (m *SendPaymentRequest) GetOutgoingChanId() uint64 { if m != nil { return m.OutgoingChanId @@ -310,6 +386,13 @@ func (m *SendPaymentRequest) GetOutgoingChanId() uint64 { return 0 } +func (m *SendPaymentRequest) GetLastHopPubkey() []byte { + if m != nil { + return m.LastHopPubkey + } + return nil +} + func (m *SendPaymentRequest) GetCltvLimit() int32 { if m != nil { return m.CltvLimit @@ -324,16 +407,48 @@ func (m *SendPaymentRequest) GetRouteHints() []*lnrpc.RouteHint { return nil } -func (m *SendPaymentRequest) GetDestTlv() map[uint64][]byte { +func (m *SendPaymentRequest) GetDestCustomRecords() map[uint64][]byte { + if m != nil { + return m.DestCustomRecords + } + return nil +} + +func (m *SendPaymentRequest) GetAllowSelfPayment() bool { + if m != nil { + return m.AllowSelfPayment + } + return false +} + +func (m *SendPaymentRequest) GetDestFeatures() []lnrpc.FeatureBit { if m != nil { - return m.DestTlv + return m.DestFeatures } return nil } +func (m *SendPaymentRequest) GetMaxParts() uint32 { + if m != nil { + return m.MaxParts + } + return 0 +} + +func (m *SendPaymentRequest) GetNoInflightUpdates() bool { + if m != nil { + return m.NoInflightUpdates + } + return false +} + type TrackPaymentRequest struct { /// The hash of the payment to look up. - PaymentHash []byte `protobuf:"bytes,1,opt,name=payment_hash,json=paymentHash,proto3" json:"payment_hash,omitempty"` + PaymentHash []byte `protobuf:"bytes,1,opt,name=payment_hash,json=paymentHash,proto3" json:"payment_hash,omitempty"` + //* + //If set, only the final payment update is streamed back. Intermediate updates + //that show which htlcs are still in flight are suppressed. + NoInflightUpdates bool `protobuf:"varint,2,opt,name=no_inflight_updates,json=noInflightUpdates,proto3" json:"no_inflight_updates,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -371,64 +486,11 @@ func (m *TrackPaymentRequest) GetPaymentHash() []byte { return nil } -type PaymentStatus struct { - /// Current state the payment is in. - State PaymentState `protobuf:"varint,1,opt,name=state,proto3,enum=routerrpc.PaymentState" json:"state,omitempty"` - //* - //The pre-image of the payment when state is SUCCEEDED. - Preimage []byte `protobuf:"bytes,2,opt,name=preimage,proto3" json:"preimage,omitempty"` - //* - //The taken route when state is SUCCEEDED. - Route *lnrpc.Route `protobuf:"bytes,3,opt,name=route,proto3" json:"route,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PaymentStatus) Reset() { *m = PaymentStatus{} } -func (m *PaymentStatus) String() string { return proto.CompactTextString(m) } -func (*PaymentStatus) ProtoMessage() {} -func (*PaymentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{2} -} - -func (m *PaymentStatus) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PaymentStatus.Unmarshal(m, b) -} -func (m *PaymentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PaymentStatus.Marshal(b, m, deterministic) -} -func (m *PaymentStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_PaymentStatus.Merge(m, src) -} -func (m *PaymentStatus) XXX_Size() int { - return xxx_messageInfo_PaymentStatus.Size(m) -} -func (m *PaymentStatus) XXX_DiscardUnknown() { - xxx_messageInfo_PaymentStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_PaymentStatus proto.InternalMessageInfo - -func (m *PaymentStatus) GetState() PaymentState { - if m != nil { - return m.State - } - return PaymentState_IN_FLIGHT -} - -func (m *PaymentStatus) GetPreimage() []byte { - if m != nil { - return m.Preimage - } - return nil -} - -func (m *PaymentStatus) GetRoute() *lnrpc.Route { +func (m *TrackPaymentRequest) GetNoInflightUpdates() bool { if m != nil { - return m.Route + return m.NoInflightUpdates } - return nil + return false } type RouteFeeRequest struct { @@ -447,7 +509,7 @@ func (m *RouteFeeRequest) Reset() { *m = RouteFeeRequest{} } func (m *RouteFeeRequest) String() string { return proto.CompactTextString(m) } func (*RouteFeeRequest) ProtoMessage() {} func (*RouteFeeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{3} + return fileDescriptor_7a0613f69d37b0a5, []int{2} } func (m *RouteFeeRequest) XXX_Unmarshal(b []byte) error { @@ -501,7 +563,7 @@ func (m *RouteFeeResponse) Reset() { *m = RouteFeeResponse{} } func (m *RouteFeeResponse) String() string { return proto.CompactTextString(m) } func (*RouteFeeResponse) ProtoMessage() {} func (*RouteFeeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{4} + return fileDescriptor_7a0613f69d37b0a5, []int{3} } func (m *RouteFeeResponse) XXX_Unmarshal(b []byte) error { @@ -550,7 +612,7 @@ func (m *SendToRouteRequest) Reset() { *m = SendToRouteRequest{} } func (m *SendToRouteRequest) String() string { return proto.CompactTextString(m) } func (*SendToRouteRequest) ProtoMessage() {} func (*SendToRouteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{5} + return fileDescriptor_7a0613f69d37b0a5, []int{4} } func (m *SendToRouteRequest) XXX_Unmarshal(b []byte) error { @@ -589,17 +651,17 @@ type SendToRouteResponse struct { /// The preimage obtained by making the payment. Preimage []byte `protobuf:"bytes,1,opt,name=preimage,proto3" json:"preimage,omitempty"` /// The failure message in case the payment failed. - Failure *Failure `protobuf:"bytes,2,opt,name=failure,proto3" json:"failure,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Failure *lnrpc.Failure `protobuf:"bytes,2,opt,name=failure,proto3" json:"failure,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *SendToRouteResponse) Reset() { *m = SendToRouteResponse{} } func (m *SendToRouteResponse) String() string { return proto.CompactTextString(m) } func (*SendToRouteResponse) ProtoMessage() {} func (*SendToRouteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{6} + return fileDescriptor_7a0613f69d37b0a5, []int{5} } func (m *SendToRouteResponse) XXX_Unmarshal(b []byte) error { @@ -627,578 +689,399 @@ func (m *SendToRouteResponse) GetPreimage() []byte { return nil } -func (m *SendToRouteResponse) GetFailure() *Failure { +func (m *SendToRouteResponse) GetFailure() *lnrpc.Failure { if m != nil { return m.Failure } return nil } -type Failure struct { - /// Failure code as defined in the Lightning spec - Code Failure_FailureCode `protobuf:"varint,1,opt,name=code,proto3,enum=routerrpc.Failure_FailureCode" json:"code,omitempty"` - /// An optional channel update message. - ChannelUpdate *ChannelUpdate `protobuf:"bytes,3,opt,name=channel_update,json=channelUpdate,proto3" json:"channel_update,omitempty"` - /// A failure type-dependent htlc value. - HtlcMsat uint64 `protobuf:"varint,4,opt,name=htlc_msat,json=htlcMsat,proto3" json:"htlc_msat,omitempty"` - /// The sha256 sum of the onion payload. - OnionSha_256 []byte `protobuf:"bytes,5,opt,name=onion_sha_256,json=onionSha256,proto3" json:"onion_sha_256,omitempty"` - /// A failure type-dependent cltv expiry value. - CltvExpiry uint32 `protobuf:"varint,6,opt,name=cltv_expiry,json=cltvExpiry,proto3" json:"cltv_expiry,omitempty"` - /// A failure type-dependent flags value. - Flags uint32 `protobuf:"varint,7,opt,name=flags,proto3" json:"flags,omitempty"` - //* - //The position in the path of the intermediate or final node that generated - //the failure message. Position zero is the sender node. - FailureSourceIndex uint32 `protobuf:"varint,8,opt,name=failure_source_index,json=failureSourceIndex,proto3" json:"failure_source_index,omitempty"` - /// A failure type-dependent block height. - Height uint32 `protobuf:"varint,9,opt,name=height,proto3" json:"height,omitempty"` +type ResetMissionControlRequest struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *Failure) Reset() { *m = Failure{} } -func (m *Failure) String() string { return proto.CompactTextString(m) } -func (*Failure) ProtoMessage() {} -func (*Failure) Descriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{7} +func (m *ResetMissionControlRequest) Reset() { *m = ResetMissionControlRequest{} } +func (m *ResetMissionControlRequest) String() string { return proto.CompactTextString(m) } +func (*ResetMissionControlRequest) ProtoMessage() {} +func (*ResetMissionControlRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7a0613f69d37b0a5, []int{6} } -func (m *Failure) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Failure.Unmarshal(m, b) +func (m *ResetMissionControlRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResetMissionControlRequest.Unmarshal(m, b) } -func (m *Failure) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Failure.Marshal(b, m, deterministic) +func (m *ResetMissionControlRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResetMissionControlRequest.Marshal(b, m, deterministic) } -func (m *Failure) XXX_Merge(src proto.Message) { - xxx_messageInfo_Failure.Merge(m, src) +func (m *ResetMissionControlRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResetMissionControlRequest.Merge(m, src) } -func (m *Failure) XXX_Size() int { - return xxx_messageInfo_Failure.Size(m) +func (m *ResetMissionControlRequest) XXX_Size() int { + return xxx_messageInfo_ResetMissionControlRequest.Size(m) } -func (m *Failure) XXX_DiscardUnknown() { - xxx_messageInfo_Failure.DiscardUnknown(m) +func (m *ResetMissionControlRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ResetMissionControlRequest.DiscardUnknown(m) } -var xxx_messageInfo_Failure proto.InternalMessageInfo +var xxx_messageInfo_ResetMissionControlRequest proto.InternalMessageInfo -func (m *Failure) GetCode() Failure_FailureCode { - if m != nil { - return m.Code - } - return Failure_RESERVED +type ResetMissionControlResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Failure) GetChannelUpdate() *ChannelUpdate { - if m != nil { - return m.ChannelUpdate - } - return nil +func (m *ResetMissionControlResponse) Reset() { *m = ResetMissionControlResponse{} } +func (m *ResetMissionControlResponse) String() string { return proto.CompactTextString(m) } +func (*ResetMissionControlResponse) ProtoMessage() {} +func (*ResetMissionControlResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7a0613f69d37b0a5, []int{7} } -func (m *Failure) GetHtlcMsat() uint64 { - if m != nil { - return m.HtlcMsat - } - return 0 +func (m *ResetMissionControlResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResetMissionControlResponse.Unmarshal(m, b) } - -func (m *Failure) GetOnionSha_256() []byte { - if m != nil { - return m.OnionSha_256 - } - return nil +func (m *ResetMissionControlResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResetMissionControlResponse.Marshal(b, m, deterministic) } - -func (m *Failure) GetCltvExpiry() uint32 { - if m != nil { - return m.CltvExpiry - } - return 0 +func (m *ResetMissionControlResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResetMissionControlResponse.Merge(m, src) } - -func (m *Failure) GetFlags() uint32 { - if m != nil { - return m.Flags - } - return 0 +func (m *ResetMissionControlResponse) XXX_Size() int { + return xxx_messageInfo_ResetMissionControlResponse.Size(m) } - -func (m *Failure) GetFailureSourceIndex() uint32 { - if m != nil { - return m.FailureSourceIndex - } - return 0 +func (m *ResetMissionControlResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ResetMissionControlResponse.DiscardUnknown(m) } -func (m *Failure) GetHeight() uint32 { - if m != nil { - return m.Height - } - return 0 -} +var xxx_messageInfo_ResetMissionControlResponse proto.InternalMessageInfo -type ChannelUpdate struct { - //* - //The signature that validates the announced data and proves the ownership - //of node id. - Signature []byte `protobuf:"bytes,1,opt,name=signature,proto3" json:"signature,omitempty"` - //* - //The target chain that this channel was opened within. This value - //should be the genesis hash of the target chain. Along with the short - //channel ID, this uniquely identifies the channel globally in a - //blockchain. - ChainHash []byte `protobuf:"bytes,2,opt,name=chain_hash,json=chainHash,proto3" json:"chain_hash,omitempty"` - //* - //The unique description of the funding transaction. - ChanId uint64 `protobuf:"varint,3,opt,name=chan_id,json=chanId,proto3" json:"chan_id,omitempty"` - //* - //A timestamp that allows ordering in the case of multiple announcements. - //We should ignore the message if timestamp is not greater than the - //last-received. - Timestamp uint32 `protobuf:"varint,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - //* - //The bitfield that describes whether optional fields are present in this - //update. Currently, the least-significant bit must be set to 1 if the - //optional field MaxHtlc is present. - MessageFlags uint32 `protobuf:"varint,10,opt,name=message_flags,json=messageFlags,proto3" json:"message_flags,omitempty"` - //* - //The bitfield that describes additional meta-data concerning how the - //update is to be interpreted. Currently, the least-significant bit must be - //set to 0 if the creating node corresponds to the first node in the - //previously sent channel announcement and 1 otherwise. If the second bit - //is set, then the channel is set to be disabled. - ChannelFlags uint32 `protobuf:"varint,5,opt,name=channel_flags,json=channelFlags,proto3" json:"channel_flags,omitempty"` - //* - //The minimum number of blocks this node requires to be added to the expiry - //of HTLCs. This is a security parameter determined by the node operator. - //This value represents the required gap between the time locks of the - //incoming and outgoing HTLC's set to this node. - TimeLockDelta uint32 `protobuf:"varint,6,opt,name=time_lock_delta,json=timeLockDelta,proto3" json:"time_lock_delta,omitempty"` - //* - //The minimum HTLC value which will be accepted. - HtlcMinimumMsat uint64 `protobuf:"varint,7,opt,name=htlc_minimum_msat,json=htlcMinimumMsat,proto3" json:"htlc_minimum_msat,omitempty"` - //* - //The base fee that must be used for incoming HTLC's to this particular - //channel. This value will be tacked onto the required for a payment - //independent of the size of the payment. - BaseFee uint32 `protobuf:"varint,8,opt,name=base_fee,json=baseFee,proto3" json:"base_fee,omitempty"` - //* - //The fee rate that will be charged per millionth of a satoshi. - FeeRate uint32 `protobuf:"varint,9,opt,name=fee_rate,json=feeRate,proto3" json:"fee_rate,omitempty"` - //* - //The maximum HTLC value which will be accepted. - HtlcMaximumMsat uint64 `protobuf:"varint,11,opt,name=htlc_maximum_msat,json=htlcMaximumMsat,proto3" json:"htlc_maximum_msat,omitempty"` - //* - //The set of data that was appended to this message, some of which we may - //not actually know how to iterate or parse. By holding onto this data, we - //ensure that we're able to properly validate the set of signatures that - //cover these new fields, and ensure we're able to make upgrades to the - //network in a forwards compatible manner. - ExtraOpaqueData []byte `protobuf:"bytes,12,opt,name=extra_opaque_data,json=extraOpaqueData,proto3" json:"extra_opaque_data,omitempty"` +type QueryMissionControlRequest struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *ChannelUpdate) Reset() { *m = ChannelUpdate{} } -func (m *ChannelUpdate) String() string { return proto.CompactTextString(m) } -func (*ChannelUpdate) ProtoMessage() {} -func (*ChannelUpdate) Descriptor() ([]byte, []int) { +func (m *QueryMissionControlRequest) Reset() { *m = QueryMissionControlRequest{} } +func (m *QueryMissionControlRequest) String() string { return proto.CompactTextString(m) } +func (*QueryMissionControlRequest) ProtoMessage() {} +func (*QueryMissionControlRequest) Descriptor() ([]byte, []int) { return fileDescriptor_7a0613f69d37b0a5, []int{8} } -func (m *ChannelUpdate) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ChannelUpdate.Unmarshal(m, b) +func (m *QueryMissionControlRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_QueryMissionControlRequest.Unmarshal(m, b) } -func (m *ChannelUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ChannelUpdate.Marshal(b, m, deterministic) +func (m *QueryMissionControlRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_QueryMissionControlRequest.Marshal(b, m, deterministic) } -func (m *ChannelUpdate) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChannelUpdate.Merge(m, src) +func (m *QueryMissionControlRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryMissionControlRequest.Merge(m, src) } -func (m *ChannelUpdate) XXX_Size() int { - return xxx_messageInfo_ChannelUpdate.Size(m) +func (m *QueryMissionControlRequest) XXX_Size() int { + return xxx_messageInfo_QueryMissionControlRequest.Size(m) } -func (m *ChannelUpdate) XXX_DiscardUnknown() { - xxx_messageInfo_ChannelUpdate.DiscardUnknown(m) +func (m *QueryMissionControlRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryMissionControlRequest.DiscardUnknown(m) } -var xxx_messageInfo_ChannelUpdate proto.InternalMessageInfo +var xxx_messageInfo_QueryMissionControlRequest proto.InternalMessageInfo -func (m *ChannelUpdate) GetSignature() []byte { - if m != nil { - return m.Signature - } - return nil +/// QueryMissionControlResponse contains mission control state. +type QueryMissionControlResponse struct { + /// Node pair-level mission control state. + Pairs []*PairHistory `protobuf:"bytes,2,rep,name=pairs,proto3" json:"pairs,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ChannelUpdate) GetChainHash() []byte { - if m != nil { - return m.ChainHash - } - return nil +func (m *QueryMissionControlResponse) Reset() { *m = QueryMissionControlResponse{} } +func (m *QueryMissionControlResponse) String() string { return proto.CompactTextString(m) } +func (*QueryMissionControlResponse) ProtoMessage() {} +func (*QueryMissionControlResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7a0613f69d37b0a5, []int{9} } -func (m *ChannelUpdate) GetChanId() uint64 { - if m != nil { - return m.ChanId - } - return 0 +func (m *QueryMissionControlResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_QueryMissionControlResponse.Unmarshal(m, b) } - -func (m *ChannelUpdate) GetTimestamp() uint32 { - if m != nil { - return m.Timestamp - } - return 0 +func (m *QueryMissionControlResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_QueryMissionControlResponse.Marshal(b, m, deterministic) } - -func (m *ChannelUpdate) GetMessageFlags() uint32 { - if m != nil { - return m.MessageFlags - } - return 0 +func (m *QueryMissionControlResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryMissionControlResponse.Merge(m, src) } - -func (m *ChannelUpdate) GetChannelFlags() uint32 { - if m != nil { - return m.ChannelFlags - } - return 0 -} - -func (m *ChannelUpdate) GetTimeLockDelta() uint32 { - if m != nil { - return m.TimeLockDelta - } - return 0 -} - -func (m *ChannelUpdate) GetHtlcMinimumMsat() uint64 { - if m != nil { - return m.HtlcMinimumMsat - } - return 0 -} - -func (m *ChannelUpdate) GetBaseFee() uint32 { - if m != nil { - return m.BaseFee - } - return 0 +func (m *QueryMissionControlResponse) XXX_Size() int { + return xxx_messageInfo_QueryMissionControlResponse.Size(m) } - -func (m *ChannelUpdate) GetFeeRate() uint32 { - if m != nil { - return m.FeeRate - } - return 0 +func (m *QueryMissionControlResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryMissionControlResponse.DiscardUnknown(m) } -func (m *ChannelUpdate) GetHtlcMaximumMsat() uint64 { - if m != nil { - return m.HtlcMaximumMsat - } - return 0 -} +var xxx_messageInfo_QueryMissionControlResponse proto.InternalMessageInfo -func (m *ChannelUpdate) GetExtraOpaqueData() []byte { +func (m *QueryMissionControlResponse) GetPairs() []*PairHistory { if m != nil { - return m.ExtraOpaqueData + return m.Pairs } return nil } -type ResetMissionControlRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +/// PairHistory contains the mission control state for a particular node pair. +type PairHistory struct { + /// The source node pubkey of the pair. + NodeFrom []byte `protobuf:"bytes,1,opt,name=node_from,json=nodeFrom,proto3" json:"node_from,omitempty"` + /// The destination node pubkey of the pair. + NodeTo []byte `protobuf:"bytes,2,opt,name=node_to,json=nodeTo,proto3" json:"node_to,omitempty"` + History *PairData `protobuf:"bytes,7,opt,name=history,proto3" json:"history,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ResetMissionControlRequest) Reset() { *m = ResetMissionControlRequest{} } -func (m *ResetMissionControlRequest) String() string { return proto.CompactTextString(m) } -func (*ResetMissionControlRequest) ProtoMessage() {} -func (*ResetMissionControlRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{9} +func (m *PairHistory) Reset() { *m = PairHistory{} } +func (m *PairHistory) String() string { return proto.CompactTextString(m) } +func (*PairHistory) ProtoMessage() {} +func (*PairHistory) Descriptor() ([]byte, []int) { + return fileDescriptor_7a0613f69d37b0a5, []int{10} } -func (m *ResetMissionControlRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ResetMissionControlRequest.Unmarshal(m, b) +func (m *PairHistory) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PairHistory.Unmarshal(m, b) } -func (m *ResetMissionControlRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ResetMissionControlRequest.Marshal(b, m, deterministic) +func (m *PairHistory) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PairHistory.Marshal(b, m, deterministic) } -func (m *ResetMissionControlRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResetMissionControlRequest.Merge(m, src) +func (m *PairHistory) XXX_Merge(src proto.Message) { + xxx_messageInfo_PairHistory.Merge(m, src) } -func (m *ResetMissionControlRequest) XXX_Size() int { - return xxx_messageInfo_ResetMissionControlRequest.Size(m) +func (m *PairHistory) XXX_Size() int { + return xxx_messageInfo_PairHistory.Size(m) } -func (m *ResetMissionControlRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ResetMissionControlRequest.DiscardUnknown(m) +func (m *PairHistory) XXX_DiscardUnknown() { + xxx_messageInfo_PairHistory.DiscardUnknown(m) } -var xxx_messageInfo_ResetMissionControlRequest proto.InternalMessageInfo +var xxx_messageInfo_PairHistory proto.InternalMessageInfo -type ResetMissionControlResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (m *PairHistory) GetNodeFrom() []byte { + if m != nil { + return m.NodeFrom + } + return nil } -func (m *ResetMissionControlResponse) Reset() { *m = ResetMissionControlResponse{} } -func (m *ResetMissionControlResponse) String() string { return proto.CompactTextString(m) } -func (*ResetMissionControlResponse) ProtoMessage() {} -func (*ResetMissionControlResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{10} +func (m *PairHistory) GetNodeTo() []byte { + if m != nil { + return m.NodeTo + } + return nil } -func (m *ResetMissionControlResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ResetMissionControlResponse.Unmarshal(m, b) -} -func (m *ResetMissionControlResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ResetMissionControlResponse.Marshal(b, m, deterministic) -} -func (m *ResetMissionControlResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResetMissionControlResponse.Merge(m, src) -} -func (m *ResetMissionControlResponse) XXX_Size() int { - return xxx_messageInfo_ResetMissionControlResponse.Size(m) -} -func (m *ResetMissionControlResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ResetMissionControlResponse.DiscardUnknown(m) +func (m *PairHistory) GetHistory() *PairData { + if m != nil { + return m.History + } + return nil } -var xxx_messageInfo_ResetMissionControlResponse proto.InternalMessageInfo - -type QueryMissionControlRequest struct { +type PairData struct { + /// Time of last failure. + FailTime int64 `protobuf:"varint,1,opt,name=fail_time,json=failTime,proto3" json:"fail_time,omitempty"` + //* + //Lowest amount that failed to forward rounded to whole sats. This may be + //set to zero if the failure is independent of amount. + FailAmtSat int64 `protobuf:"varint,2,opt,name=fail_amt_sat,json=failAmtSat,proto3" json:"fail_amt_sat,omitempty"` + //* + //Lowest amount that failed to forward in millisats. This may be + //set to zero if the failure is independent of amount. + FailAmtMsat int64 `protobuf:"varint,4,opt,name=fail_amt_msat,json=failAmtMsat,proto3" json:"fail_amt_msat,omitempty"` + /// Time of last success. + SuccessTime int64 `protobuf:"varint,5,opt,name=success_time,json=successTime,proto3" json:"success_time,omitempty"` + /// Highest amount that we could successfully forward rounded to whole sats. + SuccessAmtSat int64 `protobuf:"varint,6,opt,name=success_amt_sat,json=successAmtSat,proto3" json:"success_amt_sat,omitempty"` + /// Highest amount that we could successfully forward in millisats. + SuccessAmtMsat int64 `protobuf:"varint,7,opt,name=success_amt_msat,json=successAmtMsat,proto3" json:"success_amt_msat,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *QueryMissionControlRequest) Reset() { *m = QueryMissionControlRequest{} } -func (m *QueryMissionControlRequest) String() string { return proto.CompactTextString(m) } -func (*QueryMissionControlRequest) ProtoMessage() {} -func (*QueryMissionControlRequest) Descriptor() ([]byte, []int) { +func (m *PairData) Reset() { *m = PairData{} } +func (m *PairData) String() string { return proto.CompactTextString(m) } +func (*PairData) ProtoMessage() {} +func (*PairData) Descriptor() ([]byte, []int) { return fileDescriptor_7a0613f69d37b0a5, []int{11} } -func (m *QueryMissionControlRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_QueryMissionControlRequest.Unmarshal(m, b) +func (m *PairData) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PairData.Unmarshal(m, b) } -func (m *QueryMissionControlRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_QueryMissionControlRequest.Marshal(b, m, deterministic) +func (m *PairData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PairData.Marshal(b, m, deterministic) } -func (m *QueryMissionControlRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryMissionControlRequest.Merge(m, src) -} -func (m *QueryMissionControlRequest) XXX_Size() int { - return xxx_messageInfo_QueryMissionControlRequest.Size(m) -} -func (m *QueryMissionControlRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryMissionControlRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryMissionControlRequest proto.InternalMessageInfo - -/// QueryMissionControlResponse contains mission control state. -type QueryMissionControlResponse struct { - /// Node-level mission control state. - Nodes []*NodeHistory `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty"` - /// Node pair-level mission control state. - Pairs []*PairHistory `protobuf:"bytes,2,rep,name=pairs,proto3" json:"pairs,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *QueryMissionControlResponse) Reset() { *m = QueryMissionControlResponse{} } -func (m *QueryMissionControlResponse) String() string { return proto.CompactTextString(m) } -func (*QueryMissionControlResponse) ProtoMessage() {} -func (*QueryMissionControlResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{12} -} - -func (m *QueryMissionControlResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_QueryMissionControlResponse.Unmarshal(m, b) -} -func (m *QueryMissionControlResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_QueryMissionControlResponse.Marshal(b, m, deterministic) +func (m *PairData) XXX_Merge(src proto.Message) { + xxx_messageInfo_PairData.Merge(m, src) } -func (m *QueryMissionControlResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryMissionControlResponse.Merge(m, src) +func (m *PairData) XXX_Size() int { + return xxx_messageInfo_PairData.Size(m) } -func (m *QueryMissionControlResponse) XXX_Size() int { - return xxx_messageInfo_QueryMissionControlResponse.Size(m) -} -func (m *QueryMissionControlResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryMissionControlResponse.DiscardUnknown(m) +func (m *PairData) XXX_DiscardUnknown() { + xxx_messageInfo_PairData.DiscardUnknown(m) } -var xxx_messageInfo_QueryMissionControlResponse proto.InternalMessageInfo +var xxx_messageInfo_PairData proto.InternalMessageInfo -func (m *QueryMissionControlResponse) GetNodes() []*NodeHistory { +func (m *PairData) GetFailTime() int64 { if m != nil { - return m.Nodes + return m.FailTime } - return nil + return 0 } -func (m *QueryMissionControlResponse) GetPairs() []*PairHistory { +func (m *PairData) GetFailAmtSat() int64 { if m != nil { - return m.Pairs + return m.FailAmtSat } - return nil -} - -/// NodeHistory contains the mission control state for a particular node. -type NodeHistory struct { - /// Node pubkey - Pubkey []byte `protobuf:"bytes,1,opt,name=pubkey,proto3" json:"pubkey,omitempty"` - /// Time stamp of last failure. Set to zero if no failure happened yet. - LastFailTime int64 `protobuf:"varint,2,opt,name=last_fail_time,proto3" json:"last_fail_time,omitempty"` - //* - //Estimation of success probability of forwarding towards peers of this node - //for which no specific history is available. - OtherSuccessProb float32 `protobuf:"fixed32,3,opt,name=other_success_prob,proto3" json:"other_success_prob,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NodeHistory) Reset() { *m = NodeHistory{} } -func (m *NodeHistory) String() string { return proto.CompactTextString(m) } -func (*NodeHistory) ProtoMessage() {} -func (*NodeHistory) Descriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{13} + return 0 } -func (m *NodeHistory) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NodeHistory.Unmarshal(m, b) -} -func (m *NodeHistory) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NodeHistory.Marshal(b, m, deterministic) -} -func (m *NodeHistory) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeHistory.Merge(m, src) -} -func (m *NodeHistory) XXX_Size() int { - return xxx_messageInfo_NodeHistory.Size(m) -} -func (m *NodeHistory) XXX_DiscardUnknown() { - xxx_messageInfo_NodeHistory.DiscardUnknown(m) +func (m *PairData) GetFailAmtMsat() int64 { + if m != nil { + return m.FailAmtMsat + } + return 0 } -var xxx_messageInfo_NodeHistory proto.InternalMessageInfo - -func (m *NodeHistory) GetPubkey() []byte { +func (m *PairData) GetSuccessTime() int64 { if m != nil { - return m.Pubkey + return m.SuccessTime } - return nil + return 0 } -func (m *NodeHistory) GetLastFailTime() int64 { +func (m *PairData) GetSuccessAmtSat() int64 { if m != nil { - return m.LastFailTime + return m.SuccessAmtSat } return 0 } -func (m *NodeHistory) GetOtherSuccessProb() float32 { +func (m *PairData) GetSuccessAmtMsat() int64 { if m != nil { - return m.OtherSuccessProb + return m.SuccessAmtMsat } return 0 } -/// PairHistory contains the mission control state for a particular node pair. -type PairHistory struct { +type QueryProbabilityRequest struct { /// The source node pubkey of the pair. - NodeFrom []byte `protobuf:"bytes,1,opt,name=node_from,proto3" json:"node_from,omitempty"` + FromNode []byte `protobuf:"bytes,1,opt,name=from_node,json=fromNode,proto3" json:"from_node,omitempty"` /// The destination node pubkey of the pair. - NodeTo []byte `protobuf:"bytes,2,opt,name=node_to,proto3" json:"node_to,omitempty"` - /// Time stamp of last result. - Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - /// Minimum penalization amount (only applies to failed attempts). - MinPenalizeAmtSat int64 `protobuf:"varint,4,opt,name=min_penalize_amt_sat,proto3" json:"min_penalize_amt_sat,omitempty"` - /// Estimation of success probability for this pair. - SuccessProb float32 `protobuf:"fixed32,5,opt,name=success_prob,proto3" json:"success_prob,omitempty"` - /// Whether the last payment attempt through this pair was successful. - LastAttemptSuccessful bool `protobuf:"varint,6,opt,name=last_attempt_successful,proto3" json:"last_attempt_successful,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + ToNode []byte `protobuf:"bytes,2,opt,name=to_node,json=toNode,proto3" json:"to_node,omitempty"` + /// The amount for which to calculate a probability. + AmtMsat int64 `protobuf:"varint,3,opt,name=amt_msat,json=amtMsat,proto3" json:"amt_msat,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *PairHistory) Reset() { *m = PairHistory{} } -func (m *PairHistory) String() string { return proto.CompactTextString(m) } -func (*PairHistory) ProtoMessage() {} -func (*PairHistory) Descriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{14} +func (m *QueryProbabilityRequest) Reset() { *m = QueryProbabilityRequest{} } +func (m *QueryProbabilityRequest) String() string { return proto.CompactTextString(m) } +func (*QueryProbabilityRequest) ProtoMessage() {} +func (*QueryProbabilityRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7a0613f69d37b0a5, []int{12} } -func (m *PairHistory) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PairHistory.Unmarshal(m, b) +func (m *QueryProbabilityRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_QueryProbabilityRequest.Unmarshal(m, b) } -func (m *PairHistory) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PairHistory.Marshal(b, m, deterministic) +func (m *QueryProbabilityRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_QueryProbabilityRequest.Marshal(b, m, deterministic) } -func (m *PairHistory) XXX_Merge(src proto.Message) { - xxx_messageInfo_PairHistory.Merge(m, src) +func (m *QueryProbabilityRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryProbabilityRequest.Merge(m, src) } -func (m *PairHistory) XXX_Size() int { - return xxx_messageInfo_PairHistory.Size(m) +func (m *QueryProbabilityRequest) XXX_Size() int { + return xxx_messageInfo_QueryProbabilityRequest.Size(m) } -func (m *PairHistory) XXX_DiscardUnknown() { - xxx_messageInfo_PairHistory.DiscardUnknown(m) +func (m *QueryProbabilityRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryProbabilityRequest.DiscardUnknown(m) } -var xxx_messageInfo_PairHistory proto.InternalMessageInfo +var xxx_messageInfo_QueryProbabilityRequest proto.InternalMessageInfo -func (m *PairHistory) GetNodeFrom() []byte { +func (m *QueryProbabilityRequest) GetFromNode() []byte { if m != nil { - return m.NodeFrom + return m.FromNode } return nil } -func (m *PairHistory) GetNodeTo() []byte { +func (m *QueryProbabilityRequest) GetToNode() []byte { if m != nil { - return m.NodeTo + return m.ToNode } return nil } -func (m *PairHistory) GetTimestamp() int64 { +func (m *QueryProbabilityRequest) GetAmtMsat() int64 { if m != nil { - return m.Timestamp + return m.AmtMsat } return 0 } -func (m *PairHistory) GetMinPenalizeAmtSat() int64 { - if m != nil { - return m.MinPenalizeAmtSat - } - return 0 +type QueryProbabilityResponse struct { + /// The success probability for the requested pair. + Probability float64 `protobuf:"fixed64,1,opt,name=probability,proto3" json:"probability,omitempty"` + /// The historical data for the requested pair. + History *PairData `protobuf:"bytes,2,opt,name=history,proto3" json:"history,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *QueryProbabilityResponse) Reset() { *m = QueryProbabilityResponse{} } +func (m *QueryProbabilityResponse) String() string { return proto.CompactTextString(m) } +func (*QueryProbabilityResponse) ProtoMessage() {} +func (*QueryProbabilityResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7a0613f69d37b0a5, []int{13} } -func (m *PairHistory) GetSuccessProb() float32 { +func (m *QueryProbabilityResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_QueryProbabilityResponse.Unmarshal(m, b) +} +func (m *QueryProbabilityResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_QueryProbabilityResponse.Marshal(b, m, deterministic) +} +func (m *QueryProbabilityResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryProbabilityResponse.Merge(m, src) +} +func (m *QueryProbabilityResponse) XXX_Size() int { + return xxx_messageInfo_QueryProbabilityResponse.Size(m) +} +func (m *QueryProbabilityResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryProbabilityResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryProbabilityResponse proto.InternalMessageInfo + +func (m *QueryProbabilityResponse) GetProbability() float64 { if m != nil { - return m.SuccessProb + return m.Probability } return 0 } -func (m *PairHistory) GetLastAttemptSuccessful() bool { +func (m *QueryProbabilityResponse) GetHistory() *PairData { if m != nil { - return m.LastAttemptSuccessful + return m.History } - return false + return nil } type BuildRouteRequest struct { @@ -1227,7 +1110,7 @@ func (m *BuildRouteRequest) Reset() { *m = BuildRouteRequest{} } func (m *BuildRouteRequest) String() string { return proto.CompactTextString(m) } func (*BuildRouteRequest) ProtoMessage() {} func (*BuildRouteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_7a0613f69d37b0a5, []int{15} + return fileDescriptor_7a0613f69d37b0a5, []int{14} } func (m *BuildRouteRequest) XXX_Unmarshal(b []byte) error { @@ -1289,178 +1172,722 @@ func (m *BuildRouteResponse) Reset() { *m = BuildRouteResponse{} } func (m *BuildRouteResponse) String() string { return proto.CompactTextString(m) } func (*BuildRouteResponse) ProtoMessage() {} func (*BuildRouteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7a0613f69d37b0a5, []int{15} +} + +func (m *BuildRouteResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BuildRouteResponse.Unmarshal(m, b) +} +func (m *BuildRouteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BuildRouteResponse.Marshal(b, m, deterministic) +} +func (m *BuildRouteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildRouteResponse.Merge(m, src) +} +func (m *BuildRouteResponse) XXX_Size() int { + return xxx_messageInfo_BuildRouteResponse.Size(m) +} +func (m *BuildRouteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_BuildRouteResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildRouteResponse proto.InternalMessageInfo + +func (m *BuildRouteResponse) GetRoute() *lnrpc.Route { + if m != nil { + return m.Route + } + return nil +} + +type SubscribeHtlcEventsRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SubscribeHtlcEventsRequest) Reset() { *m = SubscribeHtlcEventsRequest{} } +func (m *SubscribeHtlcEventsRequest) String() string { return proto.CompactTextString(m) } +func (*SubscribeHtlcEventsRequest) ProtoMessage() {} +func (*SubscribeHtlcEventsRequest) Descriptor() ([]byte, []int) { return fileDescriptor_7a0613f69d37b0a5, []int{16} } -func (m *BuildRouteResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BuildRouteResponse.Unmarshal(m, b) +func (m *SubscribeHtlcEventsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SubscribeHtlcEventsRequest.Unmarshal(m, b) +} +func (m *SubscribeHtlcEventsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SubscribeHtlcEventsRequest.Marshal(b, m, deterministic) +} +func (m *SubscribeHtlcEventsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubscribeHtlcEventsRequest.Merge(m, src) +} +func (m *SubscribeHtlcEventsRequest) XXX_Size() int { + return xxx_messageInfo_SubscribeHtlcEventsRequest.Size(m) +} +func (m *SubscribeHtlcEventsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SubscribeHtlcEventsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SubscribeHtlcEventsRequest proto.InternalMessageInfo + +//* +//HtlcEvent contains the htlc event that was processed. These are served on a +//best-effort basis; events are not persisted, delivery is not guaranteed +//(in the event of a crash in the switch, forward events may be lost) and +//some events may be replayed upon restart. Events consumed from this package +//should be de-duplicated by the htlc's unique combination of incoming and +//outgoing channel id and htlc id. [EXPERIMENTAL] +type HtlcEvent struct { + //* + //The short channel id that the incoming htlc arrived at our node on. This + //value is zero for sends. + IncomingChannelId uint64 `protobuf:"varint,1,opt,name=incoming_channel_id,json=incomingChannelId,proto3" json:"incoming_channel_id,omitempty"` + //* + //The short channel id that the outgoing htlc left our node on. This value + //is zero for receives. + OutgoingChannelId uint64 `protobuf:"varint,2,opt,name=outgoing_channel_id,json=outgoingChannelId,proto3" json:"outgoing_channel_id,omitempty"` + //* + //Incoming id is the index of the incoming htlc in the incoming channel. + //This value is zero for sends. + IncomingHtlcId uint64 `protobuf:"varint,3,opt,name=incoming_htlc_id,json=incomingHtlcId,proto3" json:"incoming_htlc_id,omitempty"` + //* + //Outgoing id is the index of the outgoing htlc in the outgoing channel. + //This value is zero for receives. + OutgoingHtlcId uint64 `protobuf:"varint,4,opt,name=outgoing_htlc_id,json=outgoingHtlcId,proto3" json:"outgoing_htlc_id,omitempty"` + //* + //The time in unix nanoseconds that the event occurred. + TimestampNs uint64 `protobuf:"varint,5,opt,name=timestamp_ns,json=timestampNs,proto3" json:"timestamp_ns,omitempty"` + //* + //The event type indicates whether the htlc was part of a send, receive or + //forward. + EventType HtlcEvent_EventType `protobuf:"varint,6,opt,name=event_type,json=eventType,proto3,enum=routerrpc.HtlcEvent_EventType" json:"event_type,omitempty"` + // Types that are valid to be assigned to Event: + // *HtlcEvent_ForwardEvent + // *HtlcEvent_ForwardFailEvent + // *HtlcEvent_SettleEvent + // *HtlcEvent_LinkFailEvent + Event isHtlcEvent_Event `protobuf_oneof:"event"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HtlcEvent) Reset() { *m = HtlcEvent{} } +func (m *HtlcEvent) String() string { return proto.CompactTextString(m) } +func (*HtlcEvent) ProtoMessage() {} +func (*HtlcEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_7a0613f69d37b0a5, []int{17} +} + +func (m *HtlcEvent) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HtlcEvent.Unmarshal(m, b) +} +func (m *HtlcEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HtlcEvent.Marshal(b, m, deterministic) +} +func (m *HtlcEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_HtlcEvent.Merge(m, src) +} +func (m *HtlcEvent) XXX_Size() int { + return xxx_messageInfo_HtlcEvent.Size(m) +} +func (m *HtlcEvent) XXX_DiscardUnknown() { + xxx_messageInfo_HtlcEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_HtlcEvent proto.InternalMessageInfo + +func (m *HtlcEvent) GetIncomingChannelId() uint64 { + if m != nil { + return m.IncomingChannelId + } + return 0 +} + +func (m *HtlcEvent) GetOutgoingChannelId() uint64 { + if m != nil { + return m.OutgoingChannelId + } + return 0 +} + +func (m *HtlcEvent) GetIncomingHtlcId() uint64 { + if m != nil { + return m.IncomingHtlcId + } + return 0 +} + +func (m *HtlcEvent) GetOutgoingHtlcId() uint64 { + if m != nil { + return m.OutgoingHtlcId + } + return 0 +} + +func (m *HtlcEvent) GetTimestampNs() uint64 { + if m != nil { + return m.TimestampNs + } + return 0 +} + +func (m *HtlcEvent) GetEventType() HtlcEvent_EventType { + if m != nil { + return m.EventType + } + return HtlcEvent_UNKNOWN +} + +type isHtlcEvent_Event interface { + isHtlcEvent_Event() +} + +type HtlcEvent_ForwardEvent struct { + ForwardEvent *ForwardEvent `protobuf:"bytes,7,opt,name=forward_event,json=forwardEvent,proto3,oneof"` +} + +type HtlcEvent_ForwardFailEvent struct { + ForwardFailEvent *ForwardFailEvent `protobuf:"bytes,8,opt,name=forward_fail_event,json=forwardFailEvent,proto3,oneof"` +} + +type HtlcEvent_SettleEvent struct { + SettleEvent *SettleEvent `protobuf:"bytes,9,opt,name=settle_event,json=settleEvent,proto3,oneof"` +} + +type HtlcEvent_LinkFailEvent struct { + LinkFailEvent *LinkFailEvent `protobuf:"bytes,10,opt,name=link_fail_event,json=linkFailEvent,proto3,oneof"` +} + +func (*HtlcEvent_ForwardEvent) isHtlcEvent_Event() {} + +func (*HtlcEvent_ForwardFailEvent) isHtlcEvent_Event() {} + +func (*HtlcEvent_SettleEvent) isHtlcEvent_Event() {} + +func (*HtlcEvent_LinkFailEvent) isHtlcEvent_Event() {} + +func (m *HtlcEvent) GetEvent() isHtlcEvent_Event { + if m != nil { + return m.Event + } + return nil +} + +func (m *HtlcEvent) GetForwardEvent() *ForwardEvent { + if x, ok := m.GetEvent().(*HtlcEvent_ForwardEvent); ok { + return x.ForwardEvent + } + return nil +} + +func (m *HtlcEvent) GetForwardFailEvent() *ForwardFailEvent { + if x, ok := m.GetEvent().(*HtlcEvent_ForwardFailEvent); ok { + return x.ForwardFailEvent + } + return nil +} + +func (m *HtlcEvent) GetSettleEvent() *SettleEvent { + if x, ok := m.GetEvent().(*HtlcEvent_SettleEvent); ok { + return x.SettleEvent + } + return nil +} + +func (m *HtlcEvent) GetLinkFailEvent() *LinkFailEvent { + if x, ok := m.GetEvent().(*HtlcEvent_LinkFailEvent); ok { + return x.LinkFailEvent + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*HtlcEvent) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*HtlcEvent_ForwardEvent)(nil), + (*HtlcEvent_ForwardFailEvent)(nil), + (*HtlcEvent_SettleEvent)(nil), + (*HtlcEvent_LinkFailEvent)(nil), + } +} + +type HtlcInfo struct { + // The timelock on the incoming htlc. + IncomingTimelock uint32 `protobuf:"varint,1,opt,name=incoming_timelock,json=incomingTimelock,proto3" json:"incoming_timelock,omitempty"` + // The timelock on the outgoing htlc. + OutgoingTimelock uint32 `protobuf:"varint,2,opt,name=outgoing_timelock,json=outgoingTimelock,proto3" json:"outgoing_timelock,omitempty"` + // The amount of the incoming htlc. + IncomingAmtMsat uint64 `protobuf:"varint,3,opt,name=incoming_amt_msat,json=incomingAmtMsat,proto3" json:"incoming_amt_msat,omitempty"` + // The amount of the outgoing htlc. + OutgoingAmtMsat uint64 `protobuf:"varint,4,opt,name=outgoing_amt_msat,json=outgoingAmtMsat,proto3" json:"outgoing_amt_msat,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HtlcInfo) Reset() { *m = HtlcInfo{} } +func (m *HtlcInfo) String() string { return proto.CompactTextString(m) } +func (*HtlcInfo) ProtoMessage() {} +func (*HtlcInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_7a0613f69d37b0a5, []int{18} +} + +func (m *HtlcInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HtlcInfo.Unmarshal(m, b) +} +func (m *HtlcInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HtlcInfo.Marshal(b, m, deterministic) +} +func (m *HtlcInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_HtlcInfo.Merge(m, src) +} +func (m *HtlcInfo) XXX_Size() int { + return xxx_messageInfo_HtlcInfo.Size(m) +} +func (m *HtlcInfo) XXX_DiscardUnknown() { + xxx_messageInfo_HtlcInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_HtlcInfo proto.InternalMessageInfo + +func (m *HtlcInfo) GetIncomingTimelock() uint32 { + if m != nil { + return m.IncomingTimelock + } + return 0 +} + +func (m *HtlcInfo) GetOutgoingTimelock() uint32 { + if m != nil { + return m.OutgoingTimelock + } + return 0 +} + +func (m *HtlcInfo) GetIncomingAmtMsat() uint64 { + if m != nil { + return m.IncomingAmtMsat + } + return 0 +} + +func (m *HtlcInfo) GetOutgoingAmtMsat() uint64 { + if m != nil { + return m.OutgoingAmtMsat + } + return 0 +} + +type ForwardEvent struct { + // Info contains details about the htlc that was forwarded. + Info *HtlcInfo `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ForwardEvent) Reset() { *m = ForwardEvent{} } +func (m *ForwardEvent) String() string { return proto.CompactTextString(m) } +func (*ForwardEvent) ProtoMessage() {} +func (*ForwardEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_7a0613f69d37b0a5, []int{19} +} + +func (m *ForwardEvent) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ForwardEvent.Unmarshal(m, b) +} +func (m *ForwardEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ForwardEvent.Marshal(b, m, deterministic) +} +func (m *ForwardEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_ForwardEvent.Merge(m, src) +} +func (m *ForwardEvent) XXX_Size() int { + return xxx_messageInfo_ForwardEvent.Size(m) +} +func (m *ForwardEvent) XXX_DiscardUnknown() { + xxx_messageInfo_ForwardEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_ForwardEvent proto.InternalMessageInfo + +func (m *ForwardEvent) GetInfo() *HtlcInfo { + if m != nil { + return m.Info + } + return nil +} + +type ForwardFailEvent struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ForwardFailEvent) Reset() { *m = ForwardFailEvent{} } +func (m *ForwardFailEvent) String() string { return proto.CompactTextString(m) } +func (*ForwardFailEvent) ProtoMessage() {} +func (*ForwardFailEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_7a0613f69d37b0a5, []int{20} +} + +func (m *ForwardFailEvent) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ForwardFailEvent.Unmarshal(m, b) +} +func (m *ForwardFailEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ForwardFailEvent.Marshal(b, m, deterministic) +} +func (m *ForwardFailEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_ForwardFailEvent.Merge(m, src) +} +func (m *ForwardFailEvent) XXX_Size() int { + return xxx_messageInfo_ForwardFailEvent.Size(m) +} +func (m *ForwardFailEvent) XXX_DiscardUnknown() { + xxx_messageInfo_ForwardFailEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_ForwardFailEvent proto.InternalMessageInfo + +type SettleEvent struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SettleEvent) Reset() { *m = SettleEvent{} } +func (m *SettleEvent) String() string { return proto.CompactTextString(m) } +func (*SettleEvent) ProtoMessage() {} +func (*SettleEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_7a0613f69d37b0a5, []int{21} +} + +func (m *SettleEvent) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SettleEvent.Unmarshal(m, b) +} +func (m *SettleEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SettleEvent.Marshal(b, m, deterministic) +} +func (m *SettleEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_SettleEvent.Merge(m, src) +} +func (m *SettleEvent) XXX_Size() int { + return xxx_messageInfo_SettleEvent.Size(m) +} +func (m *SettleEvent) XXX_DiscardUnknown() { + xxx_messageInfo_SettleEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_SettleEvent proto.InternalMessageInfo + +type LinkFailEvent struct { + // Info contains details about the htlc that we failed. + Info *HtlcInfo `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty"` + // FailureCode is the BOLT error code for the failure. + WireFailure lnrpc.Failure_FailureCode `protobuf:"varint,2,opt,name=wire_failure,json=wireFailure,proto3,enum=lnrpc.Failure_FailureCode" json:"wire_failure,omitempty"` + //* + //FailureDetail provides additional information about the reason for the + //failure. This detail enriches the information provided by the wire message + //and may be 'no detail' if the wire message requires no additional metadata. + FailureDetail FailureDetail `protobuf:"varint,3,opt,name=failure_detail,json=failureDetail,proto3,enum=routerrpc.FailureDetail" json:"failure_detail,omitempty"` + // A string representation of the link failure. + FailureString string `protobuf:"bytes,4,opt,name=failure_string,json=failureString,proto3" json:"failure_string,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LinkFailEvent) Reset() { *m = LinkFailEvent{} } +func (m *LinkFailEvent) String() string { return proto.CompactTextString(m) } +func (*LinkFailEvent) ProtoMessage() {} +func (*LinkFailEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_7a0613f69d37b0a5, []int{22} +} + +func (m *LinkFailEvent) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LinkFailEvent.Unmarshal(m, b) +} +func (m *LinkFailEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LinkFailEvent.Marshal(b, m, deterministic) +} +func (m *LinkFailEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_LinkFailEvent.Merge(m, src) +} +func (m *LinkFailEvent) XXX_Size() int { + return xxx_messageInfo_LinkFailEvent.Size(m) +} +func (m *LinkFailEvent) XXX_DiscardUnknown() { + xxx_messageInfo_LinkFailEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_LinkFailEvent proto.InternalMessageInfo + +func (m *LinkFailEvent) GetInfo() *HtlcInfo { + if m != nil { + return m.Info + } + return nil +} + +func (m *LinkFailEvent) GetWireFailure() lnrpc.Failure_FailureCode { + if m != nil { + return m.WireFailure + } + return lnrpc.Failure_RESERVED +} + +func (m *LinkFailEvent) GetFailureDetail() FailureDetail { + if m != nil { + return m.FailureDetail + } + return FailureDetail_UNKNOWN +} + +func (m *LinkFailEvent) GetFailureString() string { + if m != nil { + return m.FailureString + } + return "" +} + +type PaymentStatus struct { + /// Current state the payment is in. + State PaymentState `protobuf:"varint,1,opt,name=state,proto3,enum=routerrpc.PaymentState" json:"state,omitempty"` + //* + //The pre-image of the payment when state is SUCCEEDED. + Preimage []byte `protobuf:"bytes,2,opt,name=preimage,proto3" json:"preimage,omitempty"` + //* + //The HTLCs made in attempt to settle the payment [EXPERIMENTAL]. + Htlcs []*lnrpc.HTLCAttempt `protobuf:"bytes,4,rep,name=htlcs,proto3" json:"htlcs,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PaymentStatus) Reset() { *m = PaymentStatus{} } +func (m *PaymentStatus) String() string { return proto.CompactTextString(m) } +func (*PaymentStatus) ProtoMessage() {} +func (*PaymentStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_7a0613f69d37b0a5, []int{23} +} + +func (m *PaymentStatus) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PaymentStatus.Unmarshal(m, b) } -func (m *BuildRouteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BuildRouteResponse.Marshal(b, m, deterministic) +func (m *PaymentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PaymentStatus.Marshal(b, m, deterministic) } -func (m *BuildRouteResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_BuildRouteResponse.Merge(m, src) +func (m *PaymentStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PaymentStatus.Merge(m, src) } -func (m *BuildRouteResponse) XXX_Size() int { - return xxx_messageInfo_BuildRouteResponse.Size(m) +func (m *PaymentStatus) XXX_Size() int { + return xxx_messageInfo_PaymentStatus.Size(m) } -func (m *BuildRouteResponse) XXX_DiscardUnknown() { - xxx_messageInfo_BuildRouteResponse.DiscardUnknown(m) +func (m *PaymentStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PaymentStatus.DiscardUnknown(m) } -var xxx_messageInfo_BuildRouteResponse proto.InternalMessageInfo +var xxx_messageInfo_PaymentStatus proto.InternalMessageInfo -func (m *BuildRouteResponse) GetRoute() *lnrpc.Route { +func (m *PaymentStatus) GetState() PaymentState { if m != nil { - return m.Route + return m.State + } + return PaymentState_IN_FLIGHT +} + +func (m *PaymentStatus) GetPreimage() []byte { + if m != nil { + return m.Preimage + } + return nil +} + +func (m *PaymentStatus) GetHtlcs() []*lnrpc.HTLCAttempt { + if m != nil { + return m.Htlcs } return nil } func init() { + proto.RegisterEnum("routerrpc.FailureDetail", FailureDetail_name, FailureDetail_value) proto.RegisterEnum("routerrpc.PaymentState", PaymentState_name, PaymentState_value) - proto.RegisterEnum("routerrpc.Failure_FailureCode", Failure_FailureCode_name, Failure_FailureCode_value) + proto.RegisterEnum("routerrpc.HtlcEvent_EventType", HtlcEvent_EventType_name, HtlcEvent_EventType_value) proto.RegisterType((*SendPaymentRequest)(nil), "routerrpc.SendPaymentRequest") - proto.RegisterMapType((map[uint64][]byte)(nil), "routerrpc.SendPaymentRequest.DestTlvEntry") + proto.RegisterMapType((map[uint64][]byte)(nil), "routerrpc.SendPaymentRequest.DestCustomRecordsEntry") proto.RegisterType((*TrackPaymentRequest)(nil), "routerrpc.TrackPaymentRequest") - proto.RegisterType((*PaymentStatus)(nil), "routerrpc.PaymentStatus") proto.RegisterType((*RouteFeeRequest)(nil), "routerrpc.RouteFeeRequest") proto.RegisterType((*RouteFeeResponse)(nil), "routerrpc.RouteFeeResponse") proto.RegisterType((*SendToRouteRequest)(nil), "routerrpc.SendToRouteRequest") proto.RegisterType((*SendToRouteResponse)(nil), "routerrpc.SendToRouteResponse") - proto.RegisterType((*Failure)(nil), "routerrpc.Failure") - proto.RegisterType((*ChannelUpdate)(nil), "routerrpc.ChannelUpdate") proto.RegisterType((*ResetMissionControlRequest)(nil), "routerrpc.ResetMissionControlRequest") proto.RegisterType((*ResetMissionControlResponse)(nil), "routerrpc.ResetMissionControlResponse") proto.RegisterType((*QueryMissionControlRequest)(nil), "routerrpc.QueryMissionControlRequest") proto.RegisterType((*QueryMissionControlResponse)(nil), "routerrpc.QueryMissionControlResponse") - proto.RegisterType((*NodeHistory)(nil), "routerrpc.NodeHistory") proto.RegisterType((*PairHistory)(nil), "routerrpc.PairHistory") + proto.RegisterType((*PairData)(nil), "routerrpc.PairData") + proto.RegisterType((*QueryProbabilityRequest)(nil), "routerrpc.QueryProbabilityRequest") + proto.RegisterType((*QueryProbabilityResponse)(nil), "routerrpc.QueryProbabilityResponse") proto.RegisterType((*BuildRouteRequest)(nil), "routerrpc.BuildRouteRequest") proto.RegisterType((*BuildRouteResponse)(nil), "routerrpc.BuildRouteResponse") + proto.RegisterType((*SubscribeHtlcEventsRequest)(nil), "routerrpc.SubscribeHtlcEventsRequest") + proto.RegisterType((*HtlcEvent)(nil), "routerrpc.HtlcEvent") + proto.RegisterType((*HtlcInfo)(nil), "routerrpc.HtlcInfo") + proto.RegisterType((*ForwardEvent)(nil), "routerrpc.ForwardEvent") + proto.RegisterType((*ForwardFailEvent)(nil), "routerrpc.ForwardFailEvent") + proto.RegisterType((*SettleEvent)(nil), "routerrpc.SettleEvent") + proto.RegisterType((*LinkFailEvent)(nil), "routerrpc.LinkFailEvent") + proto.RegisterType((*PaymentStatus)(nil), "routerrpc.PaymentStatus") } func init() { proto.RegisterFile("routerrpc/router.proto", fileDescriptor_7a0613f69d37b0a5) } var fileDescriptor_7a0613f69d37b0a5 = []byte{ - // 1867 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x58, 0x4f, 0x73, 0x22, 0xc7, - 0x15, 0x37, 0x02, 0x04, 0x3c, 0x40, 0x1a, 0xb5, 0xb4, 0xd2, 0x2c, 0x92, 0xbc, 0x32, 0x76, 0xd6, - 0xaa, 0x2d, 0x47, 0x72, 0x94, 0xb2, 0x6b, 0xcb, 0x87, 0xa4, 0x58, 0x68, 0xac, 0xd9, 0x85, 0x19, - 0xb9, 0x81, 0xb5, 0x37, 0x39, 0x74, 0xb5, 0xa0, 0x25, 0xa6, 0x34, 0xcc, 0xe0, 0x99, 0x46, 0x59, - 0xe5, 0x90, 0x4b, 0xce, 0xf9, 0x14, 0xb9, 0xa6, 0x2a, 0x5f, 0x25, 0x9f, 0x22, 0xf9, 0x04, 0x7b, - 0x4c, 0x55, 0xaa, 0xbb, 0x67, 0x60, 0x90, 0xd0, 0xc6, 0x27, 0x4d, 0xff, 0xde, 0xeb, 0xd7, 0xaf, - 0xdf, 0x9f, 0x5f, 0x3f, 0x01, 0xbb, 0x61, 0x30, 0x13, 0x3c, 0x0c, 0xa7, 0xc3, 0x53, 0xfd, 0x75, - 0x32, 0x0d, 0x03, 0x11, 0xa0, 0xd2, 0x1c, 0xaf, 0x95, 0xc2, 0xe9, 0x50, 0xa3, 0xf5, 0xff, 0x66, - 0x01, 0xf5, 0xb8, 0x3f, 0xba, 0x60, 0x77, 0x13, 0xee, 0x0b, 0xc2, 0x7f, 0x9e, 0xf1, 0x48, 0x20, - 0x04, 0xb9, 0x11, 0x8f, 0x84, 0x99, 0x39, 0xca, 0x1c, 0x57, 0x88, 0xfa, 0x46, 0x06, 0x64, 0xd9, - 0x44, 0x98, 0x6b, 0x47, 0x99, 0xe3, 0x2c, 0x91, 0x9f, 0xe8, 0x33, 0xa8, 0x4c, 0xf5, 0x3e, 0x3a, - 0x66, 0xd1, 0xd8, 0xcc, 0x2a, 0xed, 0x72, 0x8c, 0x9d, 0xb3, 0x68, 0x8c, 0x8e, 0xc1, 0xb8, 0x72, - 0x7d, 0xe6, 0xd1, 0xa1, 0x27, 0x6e, 0xe9, 0x88, 0x7b, 0x82, 0x99, 0xb9, 0xa3, 0xcc, 0x71, 0x9e, - 0x6c, 0x28, 0xbc, 0xe9, 0x89, 0xdb, 0x96, 0x44, 0xd1, 0x97, 0xb0, 0x99, 0x18, 0x0b, 0xb5, 0x17, - 0x66, 0xfe, 0x28, 0x73, 0x5c, 0x22, 0x1b, 0xd3, 0x65, 0xdf, 0xbe, 0x84, 0x4d, 0xe1, 0x4e, 0x78, - 0x30, 0x13, 0x34, 0xe2, 0xc3, 0xc0, 0x1f, 0x45, 0xe6, 0xba, 0xb6, 0x18, 0xc3, 0x3d, 0x8d, 0xa2, - 0x3a, 0x54, 0xaf, 0x38, 0xa7, 0x9e, 0x3b, 0x71, 0x05, 0x8d, 0x98, 0x30, 0x0b, 0xca, 0xf5, 0xf2, - 0x15, 0xe7, 0x1d, 0x89, 0xf5, 0x98, 0x90, 0xfe, 0x05, 0x33, 0x71, 0x1d, 0xb8, 0xfe, 0x35, 0x1d, - 0x8e, 0x99, 0x4f, 0xdd, 0x91, 0x59, 0x3c, 0xca, 0x1c, 0xe7, 0xc8, 0x46, 0x82, 0x37, 0xc7, 0xcc, - 0xb7, 0x46, 0xe8, 0x10, 0x40, 0xdd, 0x41, 0x99, 0x33, 0x4b, 0xea, 0xc4, 0x92, 0x44, 0x94, 0x2d, - 0x74, 0x06, 0x65, 0x15, 0x60, 0x3a, 0x76, 0x7d, 0x11, 0x99, 0x70, 0x94, 0x3d, 0x2e, 0x9f, 0x19, - 0x27, 0x9e, 0x2f, 0x63, 0x4d, 0xa4, 0xe4, 0xdc, 0xf5, 0x05, 0x49, 0x2b, 0x21, 0x0c, 0x45, 0x19, - 0x59, 0x2a, 0xbc, 0x5b, 0xb3, 0xac, 0x36, 0xbc, 0x38, 0x99, 0x67, 0xe9, 0xe4, 0x61, 0x5a, 0x4e, - 0x5a, 0x3c, 0x12, 0x7d, 0xef, 0x16, 0xfb, 0x22, 0xbc, 0x23, 0x85, 0x91, 0x5e, 0xd5, 0xbe, 0x83, - 0x4a, 0x5a, 0x20, 0x13, 0x75, 0xc3, 0xef, 0x54, 0xee, 0x72, 0x44, 0x7e, 0xa2, 0x1d, 0xc8, 0xdf, - 0x32, 0x6f, 0xc6, 0x55, 0xf2, 0x2a, 0x44, 0x2f, 0xbe, 0x5b, 0x7b, 0x99, 0xa9, 0xbf, 0x84, 0xed, - 0x7e, 0xc8, 0x86, 0x37, 0xf7, 0xf2, 0x7f, 0x3f, 0xb3, 0x99, 0x07, 0x99, 0xad, 0xff, 0x05, 0xaa, - 0xf1, 0xa6, 0x9e, 0x60, 0x62, 0x16, 0xa1, 0x5f, 0x43, 0x3e, 0x12, 0x4c, 0x70, 0xa5, 0xbc, 0x71, - 0xb6, 0x97, 0xba, 0x4a, 0x4a, 0x91, 0x13, 0xad, 0x85, 0x6a, 0x50, 0x9c, 0x86, 0xdc, 0x9d, 0xb0, - 0xeb, 0xc4, 0xad, 0xf9, 0x1a, 0xd5, 0x21, 0xaf, 0x36, 0xab, 0x8a, 0x2a, 0x9f, 0x55, 0xd2, 0x61, - 0x24, 0x5a, 0x54, 0xff, 0x1d, 0x6c, 0xaa, 0x75, 0x9b, 0xf3, 0x8f, 0x55, 0xed, 0x1e, 0x14, 0xd8, - 0x44, 0xa7, 0x5f, 0x57, 0xee, 0x3a, 0x9b, 0xc8, 0xcc, 0xd7, 0x47, 0x60, 0x2c, 0xf6, 0x47, 0xd3, - 0xc0, 0x8f, 0xb8, 0xac, 0x06, 0x69, 0x5c, 0x16, 0x83, 0xac, 0x9c, 0x89, 0xdc, 0x95, 0x51, 0xbb, - 0x36, 0x62, 0xbc, 0xcd, 0x79, 0x37, 0x62, 0x02, 0x3d, 0xd7, 0x45, 0x48, 0xbd, 0x60, 0x78, 0x23, - 0xcb, 0x9a, 0xdd, 0xc5, 0xe6, 0xab, 0x12, 0xee, 0x04, 0xc3, 0x9b, 0x96, 0x04, 0xeb, 0x7f, 0xd4, - 0xed, 0xd5, 0x0f, 0xb4, 0xef, 0xbf, 0x38, 0xbc, 0x8b, 0x10, 0xac, 0x3d, 0x1e, 0x02, 0x0a, 0xdb, - 0x4b, 0xc6, 0xe3, 0x5b, 0xa4, 0x23, 0x9b, 0xb9, 0x17, 0xd9, 0xaf, 0xa0, 0x70, 0xc5, 0x5c, 0x6f, - 0x16, 0x26, 0x86, 0x51, 0x2a, 0x4d, 0x6d, 0x2d, 0x21, 0x89, 0x4a, 0xfd, 0x43, 0x01, 0x0a, 0x31, - 0x88, 0xce, 0x20, 0x37, 0x0c, 0x46, 0x49, 0x76, 0x3f, 0x7d, 0xb8, 0x2d, 0xf9, 0xdb, 0x0c, 0x46, - 0x9c, 0x28, 0x5d, 0xf4, 0x7b, 0xd8, 0x90, 0x4d, 0xe5, 0x73, 0x8f, 0xce, 0xa6, 0x23, 0x36, 0x4f, - 0xa8, 0x99, 0xda, 0xdd, 0xd4, 0x0a, 0x03, 0x25, 0x27, 0xd5, 0x61, 0x7a, 0x89, 0xf6, 0xa1, 0x34, - 0x16, 0xde, 0x50, 0x67, 0x22, 0xa7, 0x0a, 0xba, 0x28, 0x01, 0x95, 0x83, 0x3a, 0x54, 0x03, 0xdf, - 0x0d, 0x7c, 0x1a, 0x8d, 0x19, 0x3d, 0xfb, 0xe6, 0x5b, 0xc5, 0x17, 0x15, 0x52, 0x56, 0x60, 0x6f, - 0xcc, 0xce, 0xbe, 0xf9, 0x16, 0x3d, 0x83, 0xb2, 0xea, 0x5a, 0xfe, 0x7e, 0xea, 0x86, 0x77, 0x8a, - 0x28, 0xaa, 0x44, 0x35, 0x32, 0x56, 0x88, 0x6c, 0x8d, 0x2b, 0x8f, 0x5d, 0x47, 0x8a, 0x1c, 0xaa, - 0x44, 0x2f, 0xd0, 0xd7, 0xb0, 0x13, 0xc7, 0x80, 0x46, 0xc1, 0x2c, 0x1c, 0x72, 0xea, 0xfa, 0x23, - 0xfe, 0x5e, 0x51, 0x43, 0x95, 0xa0, 0x58, 0xd6, 0x53, 0x22, 0x4b, 0x4a, 0xd0, 0x2e, 0xac, 0x8f, - 0xb9, 0x7b, 0x3d, 0xd6, 0xd4, 0x50, 0x25, 0xf1, 0xaa, 0xfe, 0x8f, 0x3c, 0x94, 0x53, 0x81, 0x41, - 0x15, 0x28, 0x12, 0xdc, 0xc3, 0xe4, 0x2d, 0x6e, 0x19, 0x9f, 0xa0, 0x63, 0xf8, 0xc2, 0xb2, 0x9b, - 0x0e, 0x21, 0xb8, 0xd9, 0xa7, 0x0e, 0xa1, 0x03, 0xfb, 0x8d, 0xed, 0xfc, 0x68, 0xd3, 0x8b, 0xc6, - 0xbb, 0x2e, 0xb6, 0xfb, 0xb4, 0x85, 0xfb, 0x0d, 0xab, 0xd3, 0x33, 0x32, 0xe8, 0x00, 0xcc, 0x85, - 0x66, 0x22, 0x6e, 0x74, 0x9d, 0x81, 0xdd, 0x37, 0xd6, 0xd0, 0x33, 0xd8, 0x6f, 0x5b, 0x76, 0xa3, - 0x43, 0x17, 0x3a, 0xcd, 0x4e, 0xff, 0x2d, 0xc5, 0x3f, 0x5d, 0x58, 0xe4, 0x9d, 0x91, 0x5d, 0xa5, - 0x70, 0xde, 0xef, 0x34, 0x13, 0x0b, 0x39, 0xf4, 0x14, 0x9e, 0x68, 0x05, 0xbd, 0x85, 0xf6, 0x1d, - 0x87, 0xf6, 0x1c, 0xc7, 0x36, 0xf2, 0x68, 0x0b, 0xaa, 0x96, 0xfd, 0xb6, 0xd1, 0xb1, 0x5a, 0x94, - 0xe0, 0x46, 0xa7, 0x6b, 0xac, 0xa3, 0x6d, 0xd8, 0xbc, 0xaf, 0x57, 0x90, 0x26, 0x12, 0x3d, 0xc7, - 0xb6, 0x1c, 0x9b, 0xbe, 0xc5, 0xa4, 0x67, 0x39, 0xb6, 0x51, 0x44, 0xbb, 0x80, 0x96, 0x45, 0xe7, - 0xdd, 0x46, 0xd3, 0x28, 0xa1, 0x27, 0xb0, 0xb5, 0x8c, 0xbf, 0xc1, 0xef, 0x0c, 0x40, 0x26, 0xec, - 0x68, 0xc7, 0xe8, 0x2b, 0xdc, 0x71, 0x7e, 0xa4, 0x5d, 0xcb, 0xb6, 0xba, 0x83, 0xae, 0x51, 0x46, - 0x3b, 0x60, 0xb4, 0x31, 0xa6, 0x96, 0xdd, 0x1b, 0xb4, 0xdb, 0x56, 0xd3, 0xc2, 0x76, 0xdf, 0xa8, - 0xe8, 0x93, 0x57, 0x5d, 0xbc, 0x2a, 0x37, 0x34, 0xcf, 0x1b, 0xb6, 0x8d, 0x3b, 0xb4, 0x65, 0xf5, - 0x1a, 0xaf, 0x3a, 0xb8, 0x65, 0x6c, 0xa0, 0x43, 0x78, 0xda, 0xc7, 0xdd, 0x0b, 0x87, 0x34, 0xc8, - 0x3b, 0x9a, 0xc8, 0xdb, 0x0d, 0xab, 0x33, 0x20, 0xd8, 0xd8, 0x44, 0x9f, 0xc1, 0x21, 0xc1, 0x3f, - 0x0c, 0x2c, 0x82, 0x5b, 0xd4, 0x76, 0x5a, 0x98, 0xb6, 0x71, 0xa3, 0x3f, 0x20, 0x98, 0x76, 0xad, - 0x5e, 0xcf, 0xb2, 0xbf, 0x37, 0x0c, 0xf4, 0x05, 0x1c, 0xcd, 0x55, 0xe6, 0x06, 0xee, 0x69, 0x6d, - 0xc9, 0xfb, 0x25, 0x29, 0xb5, 0xf1, 0x4f, 0x7d, 0x7a, 0x81, 0x31, 0x31, 0x10, 0xaa, 0xc1, 0xee, - 0xe2, 0x78, 0x7d, 0x40, 0x7c, 0xf6, 0xb6, 0x94, 0x5d, 0x60, 0xd2, 0x6d, 0xd8, 0x32, 0xc1, 0x4b, - 0xb2, 0x1d, 0xe9, 0xf6, 0x42, 0x76, 0xdf, 0xed, 0x27, 0x08, 0xc1, 0x46, 0x2a, 0x2b, 0xed, 0x06, - 0x31, 0x76, 0xd1, 0x0e, 0x6c, 0x26, 0x1e, 0x24, 0x8a, 0xff, 0x2e, 0xa0, 0x3d, 0x40, 0x03, 0x9b, - 0xe0, 0x46, 0x4b, 0x06, 0x64, 0x2e, 0xf8, 0x4f, 0xe1, 0x75, 0xae, 0xb8, 0x66, 0x64, 0xeb, 0xff, - 0xcc, 0x42, 0x75, 0xa9, 0x2f, 0xd1, 0x01, 0x94, 0x22, 0xf7, 0xda, 0x67, 0x42, 0x32, 0x87, 0x26, - 0x95, 0x05, 0xa0, 0xde, 0xc6, 0x31, 0x73, 0x7d, 0xcd, 0x66, 0x9a, 0xcd, 0x4b, 0x0a, 0x51, 0x5c, - 0xb6, 0x07, 0x85, 0xe4, 0x6d, 0xcd, 0xaa, 0x1e, 0x5e, 0x1f, 0xea, 0x37, 0xf5, 0x00, 0x4a, 0x92, - 0x2e, 0x23, 0xc1, 0x26, 0x53, 0xd5, 0xde, 0x55, 0xb2, 0x00, 0xd0, 0xe7, 0x50, 0x9d, 0xf0, 0x28, - 0x62, 0xd7, 0x9c, 0xea, 0x16, 0x05, 0xa5, 0x51, 0x89, 0xc1, 0xb6, 0xea, 0xd4, 0xcf, 0x21, 0xa1, - 0x8c, 0x58, 0x29, 0xaf, 0x95, 0x62, 0x50, 0x2b, 0xdd, 0x67, 0x6b, 0xc1, 0x62, 0x26, 0x48, 0xb3, - 0xb5, 0x60, 0xe8, 0x05, 0x6c, 0x69, 0xba, 0x71, 0x7d, 0x77, 0x32, 0x9b, 0x68, 0xda, 0x29, 0x28, - 0x97, 0x37, 0x15, 0xed, 0x68, 0x5c, 0xb1, 0xcf, 0x53, 0x28, 0x5e, 0xb2, 0x88, 0xcb, 0x87, 0x22, - 0xa6, 0x85, 0x82, 0x5c, 0xb7, 0x39, 0x97, 0x22, 0xf9, 0x7c, 0x84, 0x92, 0xf0, 0x34, 0x1b, 0x14, - 0xae, 0x38, 0x27, 0x32, 0x8e, 0xf3, 0x13, 0xd8, 0xfb, 0xc5, 0x09, 0xe5, 0xd4, 0x09, 0x1a, 0x57, - 0x27, 0xbc, 0x80, 0x2d, 0xfe, 0x5e, 0x84, 0x8c, 0x06, 0x53, 0xf6, 0xf3, 0x8c, 0xd3, 0x11, 0x13, - 0xcc, 0xac, 0xa8, 0xe0, 0x6e, 0x2a, 0x81, 0xa3, 0xf0, 0x16, 0x13, 0xac, 0x7e, 0x00, 0x35, 0xc2, - 0x23, 0x2e, 0xba, 0x6e, 0x14, 0xb9, 0x81, 0xdf, 0x0c, 0x7c, 0x11, 0x06, 0x5e, 0xfc, 0xde, 0xd4, - 0x0f, 0x61, 0x7f, 0xa5, 0x54, 0x3f, 0x18, 0x72, 0xf3, 0x0f, 0x33, 0x1e, 0xde, 0xad, 0xde, 0x7c, - 0x07, 0xfb, 0x2b, 0xa5, 0xf1, 0x6b, 0xf3, 0x15, 0xe4, 0xfd, 0x60, 0xc4, 0x23, 0x33, 0xa3, 0x26, - 0x98, 0xdd, 0x14, 0xb5, 0xdb, 0xc1, 0x88, 0x9f, 0xbb, 0x91, 0x08, 0xc2, 0x3b, 0xa2, 0x95, 0xa4, - 0xf6, 0x94, 0xb9, 0x61, 0x64, 0xae, 0x3d, 0xd0, 0xbe, 0x60, 0x6e, 0x38, 0xd7, 0x56, 0x4a, 0xf5, - 0xbf, 0x66, 0xa0, 0x9c, 0x32, 0x22, 0x49, 0x76, 0x3a, 0xbb, 0x4c, 0x86, 0x9b, 0x0a, 0x89, 0x57, - 0xe8, 0x39, 0x6c, 0x78, 0x2c, 0x12, 0x54, 0xf2, 0x32, 0x95, 0x29, 0x8d, 0x1f, 0xe3, 0x7b, 0x28, - 0x3a, 0x01, 0x14, 0x88, 0x31, 0x0f, 0x69, 0x34, 0x1b, 0x0e, 0x79, 0x14, 0xd1, 0x69, 0x18, 0x5c, - 0xaa, 0x9a, 0x5c, 0x23, 0x2b, 0x24, 0xaf, 0x73, 0xc5, 0x9c, 0x91, 0xaf, 0x7f, 0xc8, 0x40, 0x39, - 0xe5, 0x9c, 0xac, 0x5a, 0x79, 0x19, 0x7a, 0x15, 0x06, 0x93, 0xa4, 0x17, 0xe6, 0x00, 0x32, 0xa1, - 0xa0, 0x16, 0x22, 0x88, 0x1b, 0x21, 0x59, 0x2e, 0x57, 0x7b, 0x56, 0x39, 0x98, 0xaa, 0xf6, 0x33, - 0xd8, 0x99, 0xb8, 0x3e, 0x9d, 0x72, 0x9f, 0x79, 0xee, 0x9f, 0x39, 0x4d, 0xa6, 0x96, 0x9c, 0x52, - 0x5c, 0x29, 0x43, 0x75, 0xa8, 0x2c, 0xdd, 0x24, 0xaf, 0x6e, 0xb2, 0x84, 0xa1, 0x97, 0xb0, 0xa7, - 0xa2, 0xc0, 0x84, 0xe0, 0x93, 0xa9, 0x48, 0x2e, 0x78, 0x35, 0xf3, 0x54, 0x0f, 0x14, 0xc9, 0x63, - 0xe2, 0xfa, 0xdf, 0x33, 0xb0, 0xf5, 0x6a, 0xe6, 0x7a, 0xa3, 0xa5, 0xd9, 0xe5, 0x29, 0x14, 0xe5, - 0xf1, 0xa9, 0xd9, 0x48, 0x0e, 0x58, 0xaa, 0x60, 0x57, 0x0d, 0xfb, 0x6b, 0x2b, 0x87, 0xfd, 0x55, - 0x63, 0x77, 0x76, 0xe5, 0xd8, 0xfd, 0x0c, 0xca, 0xe3, 0x60, 0x4a, 0x75, 0xa2, 0x23, 0x33, 0x77, - 0x94, 0x3d, 0xae, 0x10, 0x18, 0x07, 0xd3, 0x0b, 0x8d, 0xd4, 0x5f, 0x02, 0x4a, 0x3b, 0x19, 0x57, - 0xe5, 0x7c, 0x7c, 0xca, 0x3c, 0x3a, 0x3e, 0xbd, 0xf8, 0x5b, 0x06, 0x2a, 0xe9, 0xc9, 0x14, 0x55, - 0xa1, 0x64, 0xd9, 0xb4, 0xdd, 0xb1, 0xbe, 0x3f, 0xef, 0x1b, 0x9f, 0xc8, 0x65, 0x6f, 0xd0, 0x6c, - 0x62, 0xdc, 0xc2, 0x2d, 0x23, 0x23, 0xd9, 0x55, 0x12, 0x25, 0x6e, 0xd1, 0xbe, 0xd5, 0xc5, 0xce, - 0x40, 0xbe, 0xbb, 0xdb, 0xb0, 0x19, 0x63, 0xb6, 0x43, 0x89, 0x33, 0xe8, 0x63, 0x23, 0x8b, 0x0c, - 0xa8, 0xc4, 0x20, 0x26, 0xc4, 0x21, 0x46, 0x4e, 0x3e, 0x16, 0x31, 0xf2, 0xf0, 0x0d, 0x4f, 0x9e, - 0xf8, 0xfc, 0xd9, 0xbf, 0x72, 0xb0, 0xae, 0x1c, 0x0c, 0xd1, 0x39, 0x94, 0x53, 0xe3, 0x3f, 0x3a, - 0xfc, 0xe8, 0xbf, 0x05, 0x35, 0x73, 0xf5, 0xa8, 0x3d, 0x8b, 0xbe, 0xce, 0xa0, 0xd7, 0x50, 0x49, - 0x0f, 0xf8, 0x28, 0x3d, 0xb8, 0xad, 0x98, 0xfc, 0x3f, 0x6a, 0xeb, 0x0d, 0x18, 0x38, 0x12, 0xee, - 0x44, 0x0e, 0x6a, 0xf1, 0xe8, 0x8c, 0x6a, 0x29, 0xfd, 0x7b, 0xf3, 0x78, 0x6d, 0x7f, 0xa5, 0x2c, - 0xce, 0x50, 0x47, 0x5f, 0x31, 0x1e, 0x5e, 0x1f, 0x5c, 0x71, 0x79, 0x62, 0xae, 0x7d, 0xfa, 0x98, - 0x38, 0xb6, 0x36, 0x82, 0xed, 0x15, 0x0c, 0x87, 0x7e, 0x95, 0xf6, 0xe0, 0x51, 0x7e, 0xac, 0x3d, - 0xff, 0x7f, 0x6a, 0x8b, 0x53, 0x56, 0x50, 0xe1, 0xd2, 0x29, 0x8f, 0x13, 0xe9, 0xd2, 0x29, 0x1f, - 0x63, 0x54, 0x0b, 0x60, 0x51, 0xd1, 0xe8, 0x20, 0xb5, 0xeb, 0x41, 0x37, 0xd6, 0x0e, 0x1f, 0x91, - 0x6a, 0x53, 0xaf, 0x7e, 0xf3, 0x87, 0xd3, 0x6b, 0x57, 0x8c, 0x67, 0x97, 0x27, 0xc3, 0x60, 0x72, - 0xea, 0xc9, 0x89, 0xd4, 0x77, 0xfd, 0x6b, 0x9f, 0x8b, 0x3f, 0x05, 0xe1, 0xcd, 0xa9, 0xe7, 0x8f, - 0x4e, 0x55, 0x63, 0x9c, 0xce, 0xad, 0x5c, 0xae, 0xab, 0x1f, 0x06, 0x7e, 0xfb, 0xbf, 0x00, 0x00, - 0x00, 0xff, 0xff, 0xbc, 0x7f, 0xf9, 0xc1, 0x48, 0x10, 0x00, 0x00, + // 2248 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x58, 0xcd, 0x76, 0xdb, 0xb8, + 0xf5, 0x1f, 0x4a, 0x94, 0x2d, 0x5d, 0x7d, 0x98, 0x86, 0x33, 0x8e, 0xfe, 0x72, 0x32, 0xa3, 0xe1, + 0x7f, 0x26, 0xd1, 0x49, 0x33, 0x76, 0xc6, 0xed, 0x69, 0xe7, 0xb4, 0x9d, 0x69, 0x65, 0x89, 0x8e, + 0x99, 0xc8, 0xa4, 0x06, 0x92, 0x9d, 0xa4, 0x59, 0xe0, 0xd0, 0x12, 0x64, 0xb1, 0xe6, 0x87, 0x4a, + 0x42, 0xc9, 0x78, 0xd9, 0x5d, 0x4f, 0xb7, 0x7d, 0x88, 0xee, 0xfa, 0x04, 0xdd, 0xf6, 0x39, 0xba, + 0xed, 0x13, 0x74, 0xdd, 0x03, 0x10, 0x94, 0x28, 0x5b, 0x4e, 0xda, 0x8d, 0x4d, 0xfc, 0xee, 0xc5, + 0xc5, 0xbd, 0xb8, 0xbf, 0x8b, 0x0b, 0x08, 0x76, 0xa3, 0x70, 0xce, 0x68, 0x14, 0xcd, 0x46, 0x07, + 0xc9, 0xd7, 0xfe, 0x2c, 0x0a, 0x59, 0x88, 0x4a, 0x0b, 0xbc, 0x51, 0x8a, 0x66, 0xa3, 0x04, 0xd5, + 0xff, 0xb1, 0x01, 0x68, 0x40, 0x83, 0x71, 0xdf, 0xb9, 0xf6, 0x69, 0xc0, 0x30, 0xfd, 0xc3, 0x9c, + 0xc6, 0x0c, 0x21, 0x50, 0xc7, 0x34, 0x66, 0x75, 0xa5, 0xa9, 0xb4, 0x2a, 0x58, 0x7c, 0x23, 0x0d, + 0xf2, 0x8e, 0xcf, 0xea, 0xb9, 0xa6, 0xd2, 0xca, 0x63, 0xfe, 0x89, 0xfe, 0x0f, 0x8a, 0x8e, 0xcf, + 0x88, 0x1f, 0x3b, 0xac, 0x5e, 0x11, 0xf0, 0xa6, 0xe3, 0xb3, 0xd3, 0xd8, 0x61, 0xe8, 0x0b, 0xa8, + 0xcc, 0x12, 0x93, 0x64, 0xea, 0xc4, 0xd3, 0x7a, 0x5e, 0x18, 0x2a, 0x4b, 0xec, 0xc4, 0x89, 0xa7, + 0xa8, 0x05, 0xda, 0xc4, 0x0d, 0x1c, 0x8f, 0x8c, 0x3c, 0xf6, 0x8e, 0x8c, 0xa9, 0xc7, 0x9c, 0xba, + 0xda, 0x54, 0x5a, 0x05, 0x5c, 0x13, 0x78, 0xc7, 0x63, 0xef, 0xba, 0x1c, 0x45, 0x8f, 0x61, 0x2b, + 0x35, 0x16, 0x25, 0x0e, 0xd6, 0x0b, 0x4d, 0xa5, 0x55, 0xc2, 0xb5, 0xd9, 0xaa, 0xdb, 0x8f, 0x61, + 0x8b, 0xb9, 0x3e, 0x0d, 0xe7, 0x8c, 0xc4, 0x74, 0x14, 0x06, 0xe3, 0xb8, 0xbe, 0x91, 0x58, 0x94, + 0xf0, 0x20, 0x41, 0x91, 0x0e, 0xd5, 0x09, 0xa5, 0xc4, 0x73, 0x7d, 0x97, 0x11, 0xee, 0xfe, 0xa6, + 0x70, 0xbf, 0x3c, 0xa1, 0xb4, 0xc7, 0xb1, 0x81, 0xc3, 0xd0, 0x97, 0x50, 0x5b, 0xea, 0x88, 0x18, + 0xab, 0x42, 0xa9, 0x92, 0x2a, 0x89, 0x40, 0x9f, 0x82, 0x16, 0xce, 0xd9, 0x65, 0xe8, 0x06, 0x97, + 0x64, 0x34, 0x75, 0x02, 0xe2, 0x8e, 0xeb, 0xc5, 0xa6, 0xd2, 0x52, 0x8f, 0x72, 0xcf, 0x14, 0x5c, + 0x4b, 0x65, 0x9d, 0xa9, 0x13, 0x98, 0x63, 0xf4, 0x08, 0xb6, 0x3c, 0x27, 0x66, 0x64, 0x1a, 0xce, + 0xc8, 0x6c, 0x7e, 0x71, 0x45, 0xaf, 0xeb, 0x35, 0xb1, 0x33, 0x55, 0x0e, 0x9f, 0x84, 0xb3, 0xbe, + 0x00, 0xd1, 0x43, 0x00, 0xb1, 0x2b, 0x62, 0xf1, 0x7a, 0x49, 0xc4, 0x50, 0xe2, 0x88, 0x58, 0x18, + 0x7d, 0x03, 0x65, 0x91, 0x4d, 0x32, 0x75, 0x03, 0x16, 0xd7, 0xa1, 0x99, 0x6f, 0x95, 0x0f, 0xb5, + 0x7d, 0x2f, 0xe0, 0x89, 0xc5, 0x5c, 0x72, 0xe2, 0x06, 0x0c, 0x43, 0x94, 0x7e, 0xc6, 0x68, 0x0c, + 0x3b, 0x3c, 0x8b, 0x64, 0x34, 0x8f, 0x59, 0xe8, 0x93, 0x88, 0x8e, 0xc2, 0x68, 0x1c, 0xd7, 0xcb, + 0x62, 0xea, 0xcf, 0xf6, 0x17, 0xe4, 0xd8, 0xbf, 0xcd, 0x86, 0xfd, 0x2e, 0x8d, 0x59, 0x47, 0xcc, + 0xc3, 0xc9, 0x34, 0x23, 0x60, 0xd1, 0x35, 0xde, 0x1e, 0xdf, 0xc4, 0xd1, 0x53, 0x40, 0x8e, 0xe7, + 0x85, 0xef, 0x49, 0x4c, 0xbd, 0x09, 0x91, 0xd9, 0xa9, 0x6f, 0x35, 0x95, 0x56, 0x11, 0x6b, 0x42, + 0x32, 0xa0, 0xde, 0x44, 0x9a, 0x47, 0x3f, 0x87, 0xaa, 0xf0, 0x69, 0x42, 0x1d, 0x36, 0x8f, 0x68, + 0x5c, 0xd7, 0x9a, 0xf9, 0x56, 0xed, 0x70, 0x5b, 0x06, 0x72, 0x9c, 0xc0, 0x47, 0x2e, 0xc3, 0x15, + 0xae, 0x27, 0xc7, 0x31, 0xda, 0x83, 0x92, 0xef, 0xfc, 0x48, 0x66, 0x4e, 0xc4, 0xe2, 0xfa, 0x76, + 0x53, 0x69, 0x55, 0x71, 0xd1, 0x77, 0x7e, 0xec, 0xf3, 0x31, 0xda, 0x87, 0x9d, 0x20, 0x24, 0x6e, + 0x30, 0xf1, 0xdc, 0xcb, 0x29, 0x23, 0xf3, 0xd9, 0xd8, 0x61, 0x34, 0xae, 0x23, 0xe1, 0xc3, 0x76, + 0x10, 0x9a, 0x52, 0x72, 0x96, 0x08, 0x1a, 0x5d, 0xd8, 0x5d, 0x1f, 0x1f, 0x27, 0x3c, 0x4f, 0x10, + 0xaf, 0x01, 0x15, 0xf3, 0x4f, 0x74, 0x0f, 0x0a, 0xef, 0x1c, 0x6f, 0x4e, 0x45, 0x11, 0x54, 0x70, + 0x32, 0xf8, 0x65, 0xee, 0x5b, 0x45, 0x9f, 0xc2, 0xce, 0x30, 0x72, 0x46, 0x57, 0x37, 0xea, 0xe8, + 0x66, 0x19, 0x28, 0xb7, 0xcb, 0xe0, 0x0e, 0x7f, 0x73, 0x77, 0xf8, 0xab, 0x7f, 0x0f, 0x5b, 0x22, + 0xc3, 0xc7, 0x94, 0x7e, 0xa8, 0x5a, 0xef, 0x03, 0xaf, 0x45, 0xc1, 0xed, 0xa4, 0x62, 0x37, 0x1c, + 0x9f, 0xd3, 0x5a, 0x1f, 0x83, 0xb6, 0x9c, 0x1f, 0xcf, 0xc2, 0x20, 0xa6, 0xbc, 0x14, 0x39, 0x01, + 0x38, 0x87, 0x39, 0xe5, 0x05, 0xd9, 0x15, 0x31, 0xab, 0x26, 0xf1, 0x63, 0x4a, 0x05, 0xdd, 0x1f, + 0x25, 0x15, 0x46, 0xbc, 0x70, 0x74, 0xc5, 0x6b, 0xd6, 0xb9, 0x96, 0xe6, 0xab, 0x1c, 0xee, 0x85, + 0xa3, 0xab, 0x2e, 0x07, 0xf5, 0xb7, 0xc9, 0xb1, 0x32, 0x0c, 0xc5, 0x5a, 0xff, 0xc3, 0x76, 0xe8, + 0x50, 0x10, 0x5c, 0x14, 0x66, 0xcb, 0x87, 0x95, 0x2c, 0xa9, 0x71, 0x22, 0xd2, 0xdf, 0xc2, 0xce, + 0x8a, 0x71, 0x19, 0x45, 0x03, 0x8a, 0xb3, 0x88, 0xba, 0xbe, 0x73, 0x49, 0xa5, 0xe5, 0xc5, 0x18, + 0xb5, 0x60, 0x73, 0xe2, 0xb8, 0xde, 0x3c, 0x4a, 0x0d, 0xd7, 0x52, 0x92, 0x25, 0x28, 0x4e, 0xc5, + 0xfa, 0x03, 0x68, 0x60, 0x1a, 0x53, 0x76, 0xea, 0xc6, 0xb1, 0x1b, 0x06, 0x9d, 0x30, 0x60, 0x51, + 0xe8, 0xc9, 0x08, 0xf4, 0x87, 0xb0, 0xb7, 0x56, 0x9a, 0xb8, 0xc0, 0x27, 0xff, 0x30, 0xa7, 0xd1, + 0xf5, 0xfa, 0xc9, 0x3f, 0xc0, 0xde, 0x5a, 0xa9, 0xf4, 0xff, 0x29, 0x14, 0x66, 0x8e, 0x1b, 0xf1, + 0xdc, 0xf3, 0xa2, 0xdc, 0xcd, 0x14, 0x65, 0xdf, 0x71, 0xa3, 0x13, 0x37, 0x66, 0x61, 0x74, 0x8d, + 0x13, 0xa5, 0x17, 0x6a, 0x51, 0xd1, 0x72, 0xfa, 0x9f, 0x15, 0x28, 0x67, 0x84, 0xbc, 0x34, 0x82, + 0x70, 0x4c, 0xc9, 0x24, 0x0a, 0xfd, 0x74, 0x13, 0x38, 0x70, 0x1c, 0x85, 0x3e, 0xe7, 0x84, 0x10, + 0xb2, 0x50, 0x12, 0x78, 0x83, 0x0f, 0x87, 0x21, 0xfa, 0x1a, 0x36, 0xa7, 0x89, 0x01, 0x71, 0x10, + 0x96, 0x0f, 0x77, 0x6e, 0xac, 0xdd, 0x75, 0x98, 0x83, 0x53, 0x9d, 0x17, 0x6a, 0x31, 0xaf, 0xa9, + 0x2f, 0xd4, 0xa2, 0xaa, 0x15, 0x5e, 0xa8, 0xc5, 0x82, 0xb6, 0xf1, 0x42, 0x2d, 0x6e, 0x68, 0x9b, + 0xfa, 0xbf, 0x14, 0x28, 0xa6, 0xda, 0xdc, 0x13, 0xbe, 0xa5, 0x84, 0xf3, 0x42, 0x92, 0xa9, 0xc8, + 0x81, 0xa1, 0xeb, 0x53, 0xd4, 0x84, 0x8a, 0x10, 0xae, 0x52, 0x14, 0x38, 0xd6, 0x16, 0x34, 0x15, + 0x27, 0x74, 0xaa, 0x21, 0xf8, 0xa8, 0xca, 0x13, 0x3a, 0x51, 0x49, 0x9b, 0x4c, 0x3c, 0x1f, 0x8d, + 0x68, 0x1c, 0x27, 0xab, 0x14, 0x12, 0x15, 0x89, 0x89, 0x85, 0x1e, 0xc1, 0x56, 0xaa, 0x92, 0xae, + 0xb5, 0x91, 0xf0, 0x55, 0xc2, 0x72, 0xb9, 0x16, 0x68, 0x59, 0x3d, 0x7f, 0xd9, 0x13, 0x6a, 0x4b, + 0x45, 0xbe, 0x68, 0x12, 0xbc, 0xfe, 0x7b, 0xb8, 0x2f, 0x52, 0xd9, 0x8f, 0xc2, 0x0b, 0xe7, 0xc2, + 0xf5, 0x5c, 0x76, 0x9d, 0x92, 0x9c, 0x07, 0x1e, 0x85, 0x3e, 0xe1, 0x7b, 0x9b, 0xa6, 0x80, 0x03, + 0x56, 0x38, 0xa6, 0x3c, 0x05, 0x2c, 0x4c, 0x44, 0x32, 0x05, 0x2c, 0x14, 0x82, 0x6c, 0x2f, 0xcd, + 0xaf, 0xf4, 0x52, 0xfd, 0x0a, 0xea, 0xb7, 0xd7, 0x92, 0x9c, 0x69, 0x42, 0x79, 0xb6, 0x84, 0xc5, + 0x72, 0x0a, 0xce, 0x42, 0xd9, 0xdc, 0xe6, 0x3e, 0x9e, 0x5b, 0xfd, 0xaf, 0x0a, 0x6c, 0x1f, 0xcd, + 0x5d, 0x6f, 0xbc, 0x52, 0xb8, 0x59, 0xef, 0x94, 0xd5, 0x4e, 0xbf, 0xae, 0x8d, 0xe7, 0xd6, 0xb6, + 0xf1, 0x75, 0xad, 0x32, 0x7f, 0x67, 0xab, 0xfc, 0x1c, 0xca, 0xcb, 0x2e, 0x19, 0xd7, 0xd5, 0x66, + 0xbe, 0x55, 0xc1, 0x30, 0x4d, 0x5b, 0x64, 0xac, 0x7f, 0x0b, 0x28, 0xeb, 0xa8, 0xdc, 0x90, 0xc5, + 0xf9, 0xa1, 0xdc, 0x7d, 0x7e, 0x3c, 0x80, 0xc6, 0x60, 0x7e, 0x11, 0x8f, 0x22, 0xf7, 0x82, 0x9e, + 0x30, 0x6f, 0x64, 0xbc, 0xa3, 0x01, 0x8b, 0xd3, 0x2a, 0xfd, 0xb7, 0x0a, 0xa5, 0x05, 0xca, 0x8f, + 0x67, 0x37, 0x18, 0x85, 0x7e, 0xea, 0x74, 0x40, 0x3d, 0xee, 0x77, 0xd2, 0x14, 0xb6, 0x53, 0x51, + 0x27, 0x91, 0x98, 0x63, 0xae, 0xbf, 0x12, 0xa4, 0xd4, 0xcf, 0x25, 0xfa, 0xd9, 0x18, 0x13, 0xfd, + 0x16, 0x68, 0x0b, 0xfb, 0x53, 0xe6, 0x8d, 0x16, 0x9b, 0x82, 0x6b, 0x29, 0xce, 0x9d, 0x49, 0x34, + 0x17, 0x96, 0x53, 0x4d, 0x35, 0xd1, 0x4c, 0x71, 0xa9, 0xf9, 0x05, 0x54, 0x78, 0x3d, 0xc4, 0xcc, + 0xf1, 0x67, 0x24, 0x88, 0x45, 0x5d, 0xa8, 0xb8, 0xbc, 0xc0, 0xac, 0x18, 0x7d, 0x07, 0x40, 0x79, + 0x7c, 0x84, 0x5d, 0xcf, 0xa8, 0x28, 0x89, 0xda, 0xe1, 0x67, 0x19, 0x62, 0x2c, 0x36, 0x60, 0x5f, + 0xfc, 0x1d, 0x5e, 0xcf, 0x28, 0x2e, 0xd1, 0xf4, 0x13, 0x7d, 0x0f, 0xd5, 0x49, 0x18, 0xbd, 0x77, + 0xa2, 0x31, 0x11, 0xa0, 0x3c, 0x36, 0xee, 0x67, 0x2c, 0x1c, 0x27, 0x72, 0x31, 0xfd, 0xe4, 0x13, + 0x5c, 0x99, 0x64, 0xc6, 0xe8, 0x25, 0xa0, 0x74, 0xbe, 0xa8, 0xf2, 0xc4, 0x48, 0x51, 0x18, 0xd9, + 0xbb, 0x6d, 0x84, 0x1f, 0xd2, 0xa9, 0x21, 0x6d, 0x72, 0x03, 0x43, 0xbf, 0x82, 0x4a, 0x4c, 0x19, + 0xf3, 0xa8, 0x34, 0x53, 0x12, 0x66, 0x76, 0x57, 0xee, 0x34, 0x5c, 0x9c, 0x5a, 0x28, 0xc7, 0xcb, + 0x21, 0x3a, 0x82, 0x2d, 0xcf, 0x0d, 0xae, 0xb2, 0x6e, 0x80, 0x98, 0x5f, 0xcf, 0xcc, 0xef, 0xb9, + 0xc1, 0x55, 0xd6, 0x87, 0xaa, 0x97, 0x05, 0xf4, 0x5f, 0x43, 0x69, 0xb1, 0x4b, 0xa8, 0x0c, 0x9b, + 0x67, 0xd6, 0x4b, 0xcb, 0x7e, 0x65, 0x69, 0x9f, 0xa0, 0x22, 0xa8, 0x03, 0xc3, 0xea, 0x6a, 0x0a, + 0x87, 0xb1, 0xd1, 0x31, 0xcc, 0x73, 0x43, 0xcb, 0xf1, 0xc1, 0xb1, 0x8d, 0x5f, 0xb5, 0x71, 0x57, + 0xcb, 0x1f, 0x6d, 0x42, 0x41, 0xac, 0xab, 0xff, 0x5d, 0x81, 0xa2, 0xc8, 0x60, 0x30, 0x09, 0xd1, + 0x4f, 0x60, 0x41, 0x2e, 0x71, 0xb8, 0xf1, 0x86, 0x2b, 0x58, 0x57, 0xc5, 0x0b, 0xc2, 0x0c, 0x25, + 0xce, 0x95, 0x17, 0xd4, 0x58, 0x28, 0xe7, 0x12, 0xe5, 0x54, 0xb0, 0x50, 0x7e, 0x92, 0xb1, 0xbc, + 0x72, 0xe4, 0xa8, 0x78, 0x2b, 0x15, 0xa4, 0x27, 0xec, 0x93, 0x8c, 0xe1, 0x95, 0x93, 0x58, 0xc5, + 0x5b, 0xa9, 0x40, 0xea, 0xea, 0xbf, 0x80, 0x4a, 0x36, 0xe7, 0xe8, 0x31, 0xa8, 0x6e, 0x30, 0x09, + 0x65, 0x21, 0xee, 0xdc, 0x20, 0x17, 0x0f, 0x12, 0x0b, 0x05, 0x1d, 0x81, 0x76, 0x33, 0xcf, 0x7a, + 0x15, 0xca, 0x99, 0xa4, 0xe9, 0xff, 0x54, 0xa0, 0xba, 0x92, 0x84, 0xff, 0xda, 0x3a, 0xfa, 0x0e, + 0x2a, 0xef, 0xdd, 0x88, 0x92, 0x6c, 0xfb, 0xaf, 0x1d, 0x36, 0x56, 0xdb, 0x7f, 0xfa, 0xbf, 0x13, + 0x8e, 0x29, 0x2e, 0x73, 0x7d, 0x09, 0xa0, 0xdf, 0x40, 0x4d, 0xce, 0x24, 0x63, 0xca, 0x1c, 0xd7, + 0x13, 0x5b, 0x55, 0x5b, 0xa1, 0x87, 0xd4, 0xed, 0x0a, 0x39, 0xae, 0x4e, 0xb2, 0x43, 0xf4, 0xd5, + 0xd2, 0x40, 0xcc, 0x22, 0x37, 0xb8, 0x14, 0xfb, 0x57, 0x5a, 0xa8, 0x0d, 0x04, 0xc8, 0x1b, 0x79, + 0x55, 0x5e, 0x1e, 0x07, 0xcc, 0x61, 0xf3, 0x18, 0x7d, 0x0d, 0x85, 0x98, 0x39, 0xf2, 0x24, 0xab, + 0xad, 0xd4, 0x56, 0x46, 0x91, 0xe2, 0x44, 0x6b, 0xe5, 0xf6, 0x93, 0xbb, 0x75, 0xfb, 0x29, 0xf0, + 0x13, 0x23, 0x39, 0x45, 0xcb, 0x87, 0x48, 0x06, 0x7f, 0x32, 0xec, 0x75, 0xda, 0x8c, 0x51, 0x7f, + 0xc6, 0x70, 0xa2, 0x90, 0x74, 0xb7, 0x27, 0x7f, 0x54, 0xa1, 0xba, 0x12, 0xd4, 0x2a, 0xab, 0xab, + 0x50, 0xb2, 0x6c, 0xd2, 0x35, 0x86, 0x6d, 0xb3, 0xa7, 0x29, 0x48, 0x83, 0x8a, 0x6d, 0x99, 0xb6, + 0x45, 0xba, 0x46, 0xc7, 0xee, 0x72, 0x7e, 0x7f, 0x0a, 0xdb, 0x3d, 0xd3, 0x7a, 0x49, 0x2c, 0x7b, + 0x48, 0x8c, 0x9e, 0xf9, 0xdc, 0x3c, 0xea, 0x19, 0x5a, 0x1e, 0xdd, 0x03, 0xcd, 0xb6, 0x48, 0xe7, + 0xa4, 0x6d, 0x5a, 0x64, 0x68, 0x9e, 0x1a, 0xf6, 0xd9, 0x50, 0x53, 0x39, 0xca, 0x1d, 0x21, 0xc6, + 0xeb, 0x8e, 0x61, 0x74, 0x07, 0xe4, 0xb4, 0xfd, 0x5a, 0x2b, 0xa0, 0x3a, 0xdc, 0x33, 0xad, 0xc1, + 0xd9, 0xf1, 0xb1, 0xd9, 0x31, 0x0d, 0x6b, 0x48, 0x8e, 0xda, 0xbd, 0xb6, 0xd5, 0x31, 0xb4, 0x0d, + 0xb4, 0x0b, 0xc8, 0xb4, 0x3a, 0xf6, 0x69, 0xbf, 0x67, 0x0c, 0x0d, 0x92, 0xd6, 0xd1, 0x26, 0xda, + 0x81, 0x2d, 0x61, 0xa7, 0xdd, 0xed, 0x92, 0xe3, 0xb6, 0xd9, 0x33, 0xba, 0x5a, 0x91, 0x7b, 0x22, + 0x35, 0x06, 0xa4, 0x6b, 0x0e, 0xda, 0x47, 0x1c, 0x2e, 0xf1, 0x35, 0x4d, 0xeb, 0xdc, 0x36, 0x3b, + 0x06, 0xe9, 0x70, 0xb3, 0x1c, 0x05, 0xae, 0x9c, 0xa2, 0x67, 0x56, 0xd7, 0xc0, 0xfd, 0xb6, 0xd9, + 0xd5, 0xca, 0x68, 0x0f, 0xee, 0xa7, 0xb0, 0xf1, 0xba, 0x6f, 0xe2, 0x37, 0x64, 0x68, 0xdb, 0x64, + 0x60, 0xdb, 0x96, 0x56, 0xc9, 0x5a, 0xe2, 0xd1, 0xda, 0x7d, 0xc3, 0xd2, 0xaa, 0xe8, 0x3e, 0xec, + 0x9c, 0xf6, 0xfb, 0x24, 0x95, 0xa4, 0xc1, 0xd6, 0xb8, 0x7a, 0xbb, 0xdb, 0xc5, 0xc6, 0x60, 0x40, + 0x4e, 0xcd, 0xc1, 0x69, 0x7b, 0xd8, 0x39, 0xd1, 0xb6, 0x78, 0x48, 0x03, 0x63, 0x48, 0x86, 0xf6, + 0xb0, 0xdd, 0x5b, 0xe2, 0x1a, 0x77, 0x68, 0x89, 0xf3, 0x45, 0x7b, 0xf6, 0x2b, 0x6d, 0x9b, 0x6f, + 0x38, 0x87, 0xed, 0x73, 0xe9, 0x22, 0xe2, 0xb1, 0xcb, 0xf4, 0xa4, 0x6b, 0x6a, 0x3b, 0x1c, 0x34, + 0xad, 0xf3, 0x76, 0xcf, 0xec, 0x92, 0x97, 0xc6, 0x1b, 0x71, 0x0e, 0xdd, 0xe3, 0x60, 0xe2, 0x19, + 0xe9, 0x63, 0xfb, 0x39, 0x77, 0x44, 0xfb, 0x14, 0x21, 0xa8, 0x75, 0x4c, 0xdc, 0x39, 0xeb, 0xb5, + 0x31, 0xc1, 0xf6, 0xd9, 0xd0, 0xd0, 0x76, 0x9f, 0xfc, 0x4d, 0x81, 0x4a, 0x96, 0x67, 0x3c, 0xeb, + 0xa6, 0x45, 0x8e, 0x7b, 0xe6, 0xf3, 0x93, 0x61, 0x42, 0x82, 0xc1, 0x59, 0x87, 0xa7, 0xcc, 0xe0, + 0xe7, 0x1b, 0x82, 0x5a, 0xb2, 0xe9, 0x8b, 0x60, 0x73, 0x7c, 0x2d, 0x89, 0x59, 0xb6, 0xb4, 0x9b, + 0xe7, 0xce, 0x4b, 0xd0, 0xc0, 0xd8, 0xc6, 0x9a, 0x8a, 0xbe, 0x84, 0xa6, 0x44, 0x78, 0x5e, 0x31, + 0x36, 0x3a, 0x43, 0xd2, 0x6f, 0xbf, 0x39, 0xe5, 0x69, 0x4f, 0x48, 0x36, 0xd0, 0x0a, 0xe8, 0x73, + 0xd8, 0x5b, 0x68, 0xad, 0xe3, 0xc5, 0xe1, 0x5f, 0x36, 0x61, 0x43, 0xb4, 0xf9, 0x08, 0xfd, 0x16, + 0xaa, 0x99, 0x67, 0xec, 0xf9, 0x21, 0x7a, 0xf8, 0xc1, 0x07, 0x6e, 0x23, 0x7d, 0x0c, 0x48, 0xf8, + 0x99, 0x82, 0x8e, 0xa0, 0x96, 0x7d, 0xcf, 0x9d, 0x1f, 0xa2, 0x6c, 0x77, 0x5c, 0xf3, 0xd4, 0x5b, + 0x63, 0xe3, 0x25, 0x68, 0x46, 0xcc, 0x5c, 0x9f, 0x17, 0xa9, 0x7c, 0x71, 0xa1, 0x46, 0xc6, 0xca, + 0x8d, 0x67, 0x5c, 0x63, 0x6f, 0xad, 0x4c, 0xde, 0x6b, 0x7a, 0xfc, 0x40, 0x5c, 0xbc, 0x79, 0x6e, + 0x05, 0xb4, 0xfa, 0xd0, 0x6a, 0x7c, 0x76, 0x97, 0x58, 0x5a, 0x1b, 0xc3, 0xce, 0x9a, 0x67, 0x0c, + 0xfa, 0x2a, 0xeb, 0xc1, 0x9d, 0x8f, 0xa0, 0xc6, 0xa3, 0x8f, 0xa9, 0x2d, 0x57, 0x59, 0xf3, 0xde, + 0x59, 0x59, 0xe5, 0xee, 0xd7, 0xd2, 0xca, 0x2a, 0x1f, 0x7a, 0x36, 0xbd, 0x05, 0xed, 0xe6, 0xf5, + 0x18, 0xe9, 0x37, 0xe7, 0xde, 0xbe, 0xa7, 0x37, 0xfe, 0xff, 0x83, 0x3a, 0xd2, 0xb8, 0x09, 0xb0, + 0xbc, 0x64, 0xa2, 0x07, 0x99, 0x29, 0xb7, 0x2e, 0xc9, 0x8d, 0x87, 0x77, 0x48, 0xa5, 0xa9, 0x21, + 0xec, 0xac, 0xb9, 0x75, 0xae, 0xec, 0xc6, 0xdd, 0xb7, 0xd2, 0xc6, 0xbd, 0x75, 0x97, 0xb3, 0x67, + 0x0a, 0x3a, 0x4d, 0x78, 0x91, 0xfe, 0xa4, 0xf2, 0x11, 0xa2, 0xd7, 0xd7, 0x37, 0x91, 0x79, 0xac, + 0xe7, 0xff, 0x94, 0x53, 0x9e, 0x29, 0xc8, 0x86, 0x4a, 0x96, 0xdc, 0x1f, 0x65, 0xfd, 0xc7, 0x0c, + 0x1e, 0x7d, 0xf3, 0xbb, 0x83, 0x4b, 0x97, 0x4d, 0xe7, 0x17, 0xfb, 0xa3, 0xd0, 0x3f, 0x10, 0xbf, + 0x64, 0x04, 0x6e, 0x70, 0x19, 0x50, 0xf6, 0x3e, 0x8c, 0xae, 0x0e, 0xbc, 0x60, 0x7c, 0x20, 0xea, + 0xe6, 0x60, 0x61, 0xe7, 0x62, 0x43, 0xfc, 0x34, 0xf9, 0xd3, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, + 0x16, 0x00, 0xbc, 0x19, 0xca, 0x14, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -1476,14 +1903,14 @@ const _ = grpc.SupportPackageIsVersion4 // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type RouterClient interface { //* - //SendPayment attempts to route a payment described by the passed + //SendPaymentV2 attempts to route a payment described by the passed //PaymentRequest to the final destination. The call returns a stream of - //payment status updates. - SendPayment(ctx context.Context, in *SendPaymentRequest, opts ...grpc.CallOption) (Router_SendPaymentClient, error) + //payment updates. + SendPaymentV2(ctx context.Context, in *SendPaymentRequest, opts ...grpc.CallOption) (Router_SendPaymentV2Client, error) //* - //TrackPayment returns an update stream for the payment identified by the + //TrackPaymentV2 returns an update stream for the payment identified by the //payment hash. - TrackPayment(ctx context.Context, in *TrackPaymentRequest, opts ...grpc.CallOption) (Router_TrackPaymentClient, error) + TrackPaymentV2(ctx context.Context, in *TrackPaymentRequest, opts ...grpc.CallOption) (Router_TrackPaymentV2Client, error) //* //EstimateRouteFee allows callers to obtain a lower bound w.r.t how much it //may cost to send an HTLC to the target end destination. @@ -1502,10 +1929,27 @@ type RouterClient interface { //It is a development feature. QueryMissionControl(ctx context.Context, in *QueryMissionControlRequest, opts ...grpc.CallOption) (*QueryMissionControlResponse, error) //* + //QueryProbability returns the current success probability estimate for a + //given node pair and amount. + QueryProbability(ctx context.Context, in *QueryProbabilityRequest, opts ...grpc.CallOption) (*QueryProbabilityResponse, error) + //* //BuildRoute builds a fully specified route based on a list of hop public //keys. It retrieves the relevant channel policies from the graph in order to //calculate the correct fees and time locks. BuildRoute(ctx context.Context, in *BuildRouteRequest, opts ...grpc.CallOption) (*BuildRouteResponse, error) + //* + //SubscribeHtlcEvents creates a uni-directional stream from the server to + //the client which delivers a stream of htlc events. + SubscribeHtlcEvents(ctx context.Context, in *SubscribeHtlcEventsRequest, opts ...grpc.CallOption) (Router_SubscribeHtlcEventsClient, error) + //* + //Deprecated, use SendPaymentV2. SendPayment attempts to route a payment + //described by the passed PaymentRequest to the final destination. The call + //returns a stream of payment status updates. + SendPayment(ctx context.Context, in *SendPaymentRequest, opts ...grpc.CallOption) (Router_SendPaymentClient, error) + //* + //Deprecated, use TrackPaymentV2. TrackPayment returns an update stream for + //the payment identified by the payment hash. + TrackPayment(ctx context.Context, in *TrackPaymentRequest, opts ...grpc.CallOption) (Router_TrackPaymentClient, error) } type routerClient struct { @@ -1516,12 +1960,12 @@ func NewRouterClient(cc *grpc.ClientConn) RouterClient { return &routerClient{cc} } -func (c *routerClient) SendPayment(ctx context.Context, in *SendPaymentRequest, opts ...grpc.CallOption) (Router_SendPaymentClient, error) { - stream, err := c.cc.NewStream(ctx, &_Router_serviceDesc.Streams[0], "/routerrpc.Router/SendPayment", opts...) +func (c *routerClient) SendPaymentV2(ctx context.Context, in *SendPaymentRequest, opts ...grpc.CallOption) (Router_SendPaymentV2Client, error) { + stream, err := c.cc.NewStream(ctx, &_Router_serviceDesc.Streams[0], "/routerrpc.Router/SendPaymentV2", opts...) if err != nil { return nil, err } - x := &routerSendPaymentClient{stream} + x := &routerSendPaymentV2Client{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -1531,29 +1975,29 @@ func (c *routerClient) SendPayment(ctx context.Context, in *SendPaymentRequest, return x, nil } -type Router_SendPaymentClient interface { - Recv() (*PaymentStatus, error) +type Router_SendPaymentV2Client interface { + Recv() (*lnrpc.Payment, error) grpc.ClientStream } -type routerSendPaymentClient struct { +type routerSendPaymentV2Client struct { grpc.ClientStream } -func (x *routerSendPaymentClient) Recv() (*PaymentStatus, error) { - m := new(PaymentStatus) +func (x *routerSendPaymentV2Client) Recv() (*lnrpc.Payment, error) { + m := new(lnrpc.Payment) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } -func (c *routerClient) TrackPayment(ctx context.Context, in *TrackPaymentRequest, opts ...grpc.CallOption) (Router_TrackPaymentClient, error) { - stream, err := c.cc.NewStream(ctx, &_Router_serviceDesc.Streams[1], "/routerrpc.Router/TrackPayment", opts...) +func (c *routerClient) TrackPaymentV2(ctx context.Context, in *TrackPaymentRequest, opts ...grpc.CallOption) (Router_TrackPaymentV2Client, error) { + stream, err := c.cc.NewStream(ctx, &_Router_serviceDesc.Streams[1], "/routerrpc.Router/TrackPaymentV2", opts...) if err != nil { return nil, err } - x := &routerTrackPaymentClient{stream} + x := &routerTrackPaymentV2Client{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -1563,17 +2007,17 @@ func (c *routerClient) TrackPayment(ctx context.Context, in *TrackPaymentRequest return x, nil } -type Router_TrackPaymentClient interface { - Recv() (*PaymentStatus, error) +type Router_TrackPaymentV2Client interface { + Recv() (*lnrpc.Payment, error) grpc.ClientStream } -type routerTrackPaymentClient struct { +type routerTrackPaymentV2Client struct { grpc.ClientStream } -func (x *routerTrackPaymentClient) Recv() (*PaymentStatus, error) { - m := new(PaymentStatus) +func (x *routerTrackPaymentV2Client) Recv() (*lnrpc.Payment, error) { + m := new(lnrpc.Payment) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } @@ -1616,6 +2060,15 @@ func (c *routerClient) QueryMissionControl(ctx context.Context, in *QueryMission return out, nil } +func (c *routerClient) QueryProbability(ctx context.Context, in *QueryProbabilityRequest, opts ...grpc.CallOption) (*QueryProbabilityResponse, error) { + out := new(QueryProbabilityResponse) + err := c.cc.Invoke(ctx, "/routerrpc.Router/QueryProbability", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *routerClient) BuildRoute(ctx context.Context, in *BuildRouteRequest, opts ...grpc.CallOption) (*BuildRouteResponse, error) { out := new(BuildRouteResponse) err := c.cc.Invoke(ctx, "/routerrpc.Router/BuildRoute", in, out, opts...) @@ -1625,17 +2078,115 @@ func (c *routerClient) BuildRoute(ctx context.Context, in *BuildRouteRequest, op return out, nil } +func (c *routerClient) SubscribeHtlcEvents(ctx context.Context, in *SubscribeHtlcEventsRequest, opts ...grpc.CallOption) (Router_SubscribeHtlcEventsClient, error) { + stream, err := c.cc.NewStream(ctx, &_Router_serviceDesc.Streams[2], "/routerrpc.Router/SubscribeHtlcEvents", opts...) + if err != nil { + return nil, err + } + x := &routerSubscribeHtlcEventsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Router_SubscribeHtlcEventsClient interface { + Recv() (*HtlcEvent, error) + grpc.ClientStream +} + +type routerSubscribeHtlcEventsClient struct { + grpc.ClientStream +} + +func (x *routerSubscribeHtlcEventsClient) Recv() (*HtlcEvent, error) { + m := new(HtlcEvent) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Deprecated: Do not use. +func (c *routerClient) SendPayment(ctx context.Context, in *SendPaymentRequest, opts ...grpc.CallOption) (Router_SendPaymentClient, error) { + stream, err := c.cc.NewStream(ctx, &_Router_serviceDesc.Streams[3], "/routerrpc.Router/SendPayment", opts...) + if err != nil { + return nil, err + } + x := &routerSendPaymentClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Router_SendPaymentClient interface { + Recv() (*PaymentStatus, error) + grpc.ClientStream +} + +type routerSendPaymentClient struct { + grpc.ClientStream +} + +func (x *routerSendPaymentClient) Recv() (*PaymentStatus, error) { + m := new(PaymentStatus) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Deprecated: Do not use. +func (c *routerClient) TrackPayment(ctx context.Context, in *TrackPaymentRequest, opts ...grpc.CallOption) (Router_TrackPaymentClient, error) { + stream, err := c.cc.NewStream(ctx, &_Router_serviceDesc.Streams[4], "/routerrpc.Router/TrackPayment", opts...) + if err != nil { + return nil, err + } + x := &routerTrackPaymentClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Router_TrackPaymentClient interface { + Recv() (*PaymentStatus, error) + grpc.ClientStream +} + +type routerTrackPaymentClient struct { + grpc.ClientStream +} + +func (x *routerTrackPaymentClient) Recv() (*PaymentStatus, error) { + m := new(PaymentStatus) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + // RouterServer is the server API for Router service. type RouterServer interface { //* - //SendPayment attempts to route a payment described by the passed + //SendPaymentV2 attempts to route a payment described by the passed //PaymentRequest to the final destination. The call returns a stream of - //payment status updates. - SendPayment(*SendPaymentRequest, Router_SendPaymentServer) error + //payment updates. + SendPaymentV2(*SendPaymentRequest, Router_SendPaymentV2Server) error //* - //TrackPayment returns an update stream for the payment identified by the + //TrackPaymentV2 returns an update stream for the payment identified by the //payment hash. - TrackPayment(*TrackPaymentRequest, Router_TrackPaymentServer) error + TrackPaymentV2(*TrackPaymentRequest, Router_TrackPaymentV2Server) error //* //EstimateRouteFee allows callers to obtain a lower bound w.r.t how much it //may cost to send an HTLC to the target end destination. @@ -1654,55 +2205,72 @@ type RouterServer interface { //It is a development feature. QueryMissionControl(context.Context, *QueryMissionControlRequest) (*QueryMissionControlResponse, error) //* + //QueryProbability returns the current success probability estimate for a + //given node pair and amount. + QueryProbability(context.Context, *QueryProbabilityRequest) (*QueryProbabilityResponse, error) + //* //BuildRoute builds a fully specified route based on a list of hop public //keys. It retrieves the relevant channel policies from the graph in order to //calculate the correct fees and time locks. BuildRoute(context.Context, *BuildRouteRequest) (*BuildRouteResponse, error) + //* + //SubscribeHtlcEvents creates a uni-directional stream from the server to + //the client which delivers a stream of htlc events. + SubscribeHtlcEvents(*SubscribeHtlcEventsRequest, Router_SubscribeHtlcEventsServer) error + //* + //Deprecated, use SendPaymentV2. SendPayment attempts to route a payment + //described by the passed PaymentRequest to the final destination. The call + //returns a stream of payment status updates. + SendPayment(*SendPaymentRequest, Router_SendPaymentServer) error + //* + //Deprecated, use TrackPaymentV2. TrackPayment returns an update stream for + //the payment identified by the payment hash. + TrackPayment(*TrackPaymentRequest, Router_TrackPaymentServer) error } func RegisterRouterServer(s *grpc.Server, srv RouterServer) { s.RegisterService(&_Router_serviceDesc, srv) } -func _Router_SendPayment_Handler(srv interface{}, stream grpc.ServerStream) error { +func _Router_SendPaymentV2_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(SendPaymentRequest) if err := stream.RecvMsg(m); err != nil { return err } - return srv.(RouterServer).SendPayment(m, &routerSendPaymentServer{stream}) + return srv.(RouterServer).SendPaymentV2(m, &routerSendPaymentV2Server{stream}) } -type Router_SendPaymentServer interface { - Send(*PaymentStatus) error +type Router_SendPaymentV2Server interface { + Send(*lnrpc.Payment) error grpc.ServerStream } -type routerSendPaymentServer struct { +type routerSendPaymentV2Server struct { grpc.ServerStream } -func (x *routerSendPaymentServer) Send(m *PaymentStatus) error { +func (x *routerSendPaymentV2Server) Send(m *lnrpc.Payment) error { return x.ServerStream.SendMsg(m) } -func _Router_TrackPayment_Handler(srv interface{}, stream grpc.ServerStream) error { +func _Router_TrackPaymentV2_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(TrackPaymentRequest) if err := stream.RecvMsg(m); err != nil { return err } - return srv.(RouterServer).TrackPayment(m, &routerTrackPaymentServer{stream}) + return srv.(RouterServer).TrackPaymentV2(m, &routerTrackPaymentV2Server{stream}) } -type Router_TrackPaymentServer interface { - Send(*PaymentStatus) error +type Router_TrackPaymentV2Server interface { + Send(*lnrpc.Payment) error grpc.ServerStream } -type routerTrackPaymentServer struct { +type routerTrackPaymentV2Server struct { grpc.ServerStream } -func (x *routerTrackPaymentServer) Send(m *PaymentStatus) error { +func (x *routerTrackPaymentV2Server) Send(m *lnrpc.Payment) error { return x.ServerStream.SendMsg(m) } @@ -1778,6 +2346,24 @@ func _Router_QueryMissionControl_Handler(srv interface{}, ctx context.Context, d return interceptor(ctx, in, info, handler) } +func _Router_QueryProbability_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryProbabilityRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RouterServer).QueryProbability(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/routerrpc.Router/QueryProbability", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RouterServer).QueryProbability(ctx, req.(*QueryProbabilityRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Router_BuildRoute_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(BuildRouteRequest) if err := dec(in); err != nil { @@ -1796,6 +2382,69 @@ func _Router_BuildRoute_Handler(srv interface{}, ctx context.Context, dec func(i return interceptor(ctx, in, info, handler) } +func _Router_SubscribeHtlcEvents_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SubscribeHtlcEventsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(RouterServer).SubscribeHtlcEvents(m, &routerSubscribeHtlcEventsServer{stream}) +} + +type Router_SubscribeHtlcEventsServer interface { + Send(*HtlcEvent) error + grpc.ServerStream +} + +type routerSubscribeHtlcEventsServer struct { + grpc.ServerStream +} + +func (x *routerSubscribeHtlcEventsServer) Send(m *HtlcEvent) error { + return x.ServerStream.SendMsg(m) +} + +func _Router_SendPayment_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SendPaymentRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(RouterServer).SendPayment(m, &routerSendPaymentServer{stream}) +} + +type Router_SendPaymentServer interface { + Send(*PaymentStatus) error + grpc.ServerStream +} + +type routerSendPaymentServer struct { + grpc.ServerStream +} + +func (x *routerSendPaymentServer) Send(m *PaymentStatus) error { + return x.ServerStream.SendMsg(m) +} + +func _Router_TrackPayment_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(TrackPaymentRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(RouterServer).TrackPayment(m, &routerTrackPaymentServer{stream}) +} + +type Router_TrackPaymentServer interface { + Send(*PaymentStatus) error + grpc.ServerStream +} + +type routerTrackPaymentServer struct { + grpc.ServerStream +} + +func (x *routerTrackPaymentServer) Send(m *PaymentStatus) error { + return x.ServerStream.SendMsg(m) +} + var _Router_serviceDesc = grpc.ServiceDesc{ ServiceName: "routerrpc.Router", HandlerType: (*RouterServer)(nil), @@ -1816,12 +2465,31 @@ var _Router_serviceDesc = grpc.ServiceDesc{ MethodName: "QueryMissionControl", Handler: _Router_QueryMissionControl_Handler, }, + { + MethodName: "QueryProbability", + Handler: _Router_QueryProbability_Handler, + }, { MethodName: "BuildRoute", Handler: _Router_BuildRoute_Handler, }, }, Streams: []grpc.StreamDesc{ + { + StreamName: "SendPaymentV2", + Handler: _Router_SendPaymentV2_Handler, + ServerStreams: true, + }, + { + StreamName: "TrackPaymentV2", + Handler: _Router_TrackPaymentV2_Handler, + ServerStreams: true, + }, + { + StreamName: "SubscribeHtlcEvents", + Handler: _Router_SubscribeHtlcEvents_Handler, + ServerStreams: true, + }, { StreamName: "SendPayment", Handler: _Router_SendPayment_Handler, diff --git a/lnrpc/routerrpc/router.proto b/lnrpc/routerrpc/router.proto index 7a5012bb6d..673ffb986b 100644 --- a/lnrpc/routerrpc/router.proto +++ b/lnrpc/routerrpc/router.proto @@ -10,12 +10,23 @@ message SendPaymentRequest { /// The identity pubkey of the payment recipient bytes dest = 1; - /// Number of satoshis to send. + /** + Number of satoshis to send. + + The fields amt and amt_msat are mutually exclusive. + */ int64 amt = 2; + /** + Number of millisatoshis to send. + + The fields amt and amt_msat are mutually exclusive. + */ + int64 amt_msat = 12; + /// The hash to use within the payment's HTLC bytes payment_hash = 3; - + /** The CLTV delta from the current height that should be used to set the timelock for the final hop. @@ -44,16 +55,34 @@ message SendPaymentRequest { If this field is left to the default value of 0, only zero-fee routes will be considered. This usually means single hop routes connecting directly to the destination. To send the payment without a fee limit, use max int here. + + The fields fee_limit_sat and fee_limit_msat are mutually exclusive. */ int64 fee_limit_sat = 7; + /** + The maximum number of millisatoshis that will be paid as a fee of the + payment. If this field is left to the default value of 0, only zero-fee + routes will be considered. This usually means single hop routes connecting + directly to the destination. To send the payment without a fee limit, use + max int here. + + The fields fee_limit_sat and fee_limit_msat are mutually exclusive. + */ + int64 fee_limit_msat = 13; + /** The channel id of the channel that must be taken to the first hop. If zero, any channel may be used. */ - uint64 outgoing_chan_id = 8; + uint64 outgoing_chan_id = 8 [jstype = JS_STRING]; - /** + /** + The pubkey of the last hop of the route. If empty, any hop may be used. + */ + bytes last_hop_pubkey = 14; + + /** An optional maximum total time lock for the route. This should not exceed lnd's `--max-cltv-expiry` setting. If zero, then the value of `--max-cltv-expiry` is enforced. @@ -63,72 +92,53 @@ message SendPaymentRequest { /** Optional route hints to reach the destination through private channels. */ - repeated lnrpc.RouteHint route_hints = 10 [json_name = "route_hints"]; + repeated lnrpc.RouteHint route_hints = 10; - /** + /** An optional field that can be used to pass an arbitrary set of TLV records to a peer which understands the new records. This can be used to pass - application specific data during the payment attempt. + application specific data during the payment attempt. Record types are + required to be in the custom range >= 65536. When using REST, the values + must be encoded as base64. */ - map dest_tlv = 11; -} + map dest_custom_records = 11; -message TrackPaymentRequest { - /// The hash of the payment to look up. - bytes payment_hash = 1; -} + /// If set, circular payments to self are permitted. + bool allow_self_payment = 15; -enum PaymentState { /** - Payment is still in flight. + Features assumed to be supported by the final node. All transitive feature + dependencies must also be set properly. For a given feature bit pair, either + optional or remote may be set, but not both. If this field is nil or empty, + the router will try to load destination features from the graph as a + fallback. */ - IN_FLIGHT = 0; + repeated lnrpc.FeatureBit dest_features = 16; /** - Payment completed successfully. + The maximum number of partial payments that may be use to complete the full + amount. */ - SUCCEEDED = 1; + uint32 max_parts = 17; /** - There are more routes to try, but the payment timeout was exceeded. + If set, only the final payment update is streamed back. Intermediate updates + that show which htlcs are still in flight are suppressed. */ - FAILED_TIMEOUT = 2; - - /** - All possible routes were tried and failed permanently. Or were no - routes to the destination at all. - */ - FAILED_NO_ROUTE = 3; - - /** - A non-recoverable error has occured. - */ - FAILED_ERROR = 4; - - /** - Payment details incorrect (unknown hash, invalid amt or - invalid final cltv delta) - */ - FAILED_INCORRECT_PAYMENT_DETAILS = 5; + bool no_inflight_updates = 18; } - -message PaymentStatus { - /// Current state the payment is in. - PaymentState state = 1; - - /** - The pre-image of the payment when state is SUCCEEDED. - */ - bytes preimage = 2; +message TrackPaymentRequest { + /// The hash of the payment to look up. + bytes payment_hash = 1; /** - The taken route when state is SUCCEEDED. + If set, only the final payment update is streamed back. Intermediate updates + that show which htlcs are still in flight are suppressed. */ - lnrpc.Route route = 3; + bool no_inflight_updates = 2; } - message RouteFeeRequest { /** The destination once wishes to obtain a routing fee quote to. @@ -169,296 +179,382 @@ message SendToRouteResponse { bytes preimage = 1; /// The failure message in case the payment failed. - Failure failure = 2; + lnrpc.Failure failure = 2; } -message Failure { - enum FailureCode { - /** - The numbers assigned in this enumeration match the failure codes as - defined in BOLT #4. Because protobuf 3 requires enums to start with 0, - a RESERVED value is added. - */ - RESERVED = 0; - - INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS = 1; - INCORRECT_PAYMENT_AMOUNT = 2; - FINAL_INCORRECT_CLTV_EXPIRY = 3; - FINAL_INCORRECT_HTLC_AMOUNT = 4; - FINAL_EXPIRY_TOO_SOON = 5; - INVALID_REALM = 6; - EXPIRY_TOO_SOON = 7; - INVALID_ONION_VERSION = 8; - INVALID_ONION_HMAC = 9; - INVALID_ONION_KEY = 10; - AMOUNT_BELOW_MINIMUM = 11; - FEE_INSUFFICIENT = 12; - INCORRECT_CLTV_EXPIRY = 13; - CHANNEL_DISABLED = 14; - TEMPORARY_CHANNEL_FAILURE = 15; - REQUIRED_NODE_FEATURE_MISSING = 16; - REQUIRED_CHANNEL_FEATURE_MISSING = 17; - UNKNOWN_NEXT_PEER = 18; - TEMPORARY_NODE_FAILURE = 19; - PERMANENT_NODE_FAILURE = 20; - PERMANENT_CHANNEL_FAILURE = 21; - EXPIRY_TOO_FAR = 22; - - /** - The error source is known, but the failure itself couldn't be decoded. - */ - UNKNOWN_FAILURE = 998; - - /** - An unreadable failure result is returned if the received failure message - cannot be decrypted. In that case the error source is unknown. - */ - UNREADABLE_FAILURE = 999; - } - - /// Failure code as defined in the Lightning spec - FailureCode code = 1; +message ResetMissionControlRequest { +} - reserved 2; +message ResetMissionControlResponse { +} - /// An optional channel update message. - ChannelUpdate channel_update = 3; +message QueryMissionControlRequest { +} - /// A failure type-dependent htlc value. - uint64 htlc_msat = 4; +/// QueryMissionControlResponse contains mission control state. +message QueryMissionControlResponse { + reserved 1; - /// The sha256 sum of the onion payload. - bytes onion_sha_256 = 5; + /// Node pair-level mission control state. + repeated PairHistory pairs = 2; +} - /// A failure type-dependent cltv expiry value. - uint32 cltv_expiry = 6; +/// PairHistory contains the mission control state for a particular node pair. +message PairHistory { + /// The source node pubkey of the pair. + bytes node_from = 1; - /// A failure type-dependent flags value. - uint32 flags = 7; + /// The destination node pubkey of the pair. + bytes node_to = 2; - /** - The position in the path of the intermediate or final node that generated - the failure message. Position zero is the sender node. - **/ - uint32 failure_source_index = 8; + reserved 3, 4, 5, 6; - /// A failure type-dependent block height. - uint32 height = 9; + PairData history = 7; } +message PairData { + /// Time of last failure. + int64 fail_time = 1; -message ChannelUpdate { /** - The signature that validates the announced data and proves the ownership - of node id. + Lowest amount that failed to forward rounded to whole sats. This may be + set to zero if the failure is independent of amount. */ - bytes signature = 1; + int64 fail_amt_sat = 2; /** - The target chain that this channel was opened within. This value - should be the genesis hash of the target chain. Along with the short - channel ID, this uniquely identifies the channel globally in a - blockchain. + Lowest amount that failed to forward in millisats. This may be + set to zero if the failure is independent of amount. */ - bytes chain_hash = 2; + int64 fail_amt_msat = 4; + + reserved 3; + + /// Time of last success. + int64 success_time = 5; + /// Highest amount that we could successfully forward rounded to whole sats. + int64 success_amt_sat = 6; + + /// Highest amount that we could successfully forward in millisats. + int64 success_amt_msat = 7; +} + +message QueryProbabilityRequest { + /// The source node pubkey of the pair. + bytes from_node = 1; + + /// The destination node pubkey of the pair. + bytes to_node = 2; + + /// The amount for which to calculate a probability. + int64 amt_msat = 3; +} + +message QueryProbabilityResponse { + /// The success probability for the requested pair. + double probability = 1; + + /// The historical data for the requested pair. + PairData history = 2; +} + +message BuildRouteRequest { /** - The unique description of the funding transaction. + The amount to send expressed in msat. If set to zero, the minimum routable + amount is used. */ - uint64 chan_id = 3; + int64 amt_msat = 1; /** - A timestamp that allows ordering in the case of multiple announcements. - We should ignore the message if timestamp is not greater than the - last-received. + CLTV delta from the current height that should be used for the timelock + of the final hop */ - uint32 timestamp = 4; + int32 final_cltv_delta = 2; /** - The bitfield that describes whether optional fields are present in this - update. Currently, the least-significant bit must be set to 1 if the - optional field MaxHtlc is present. + The channel id of the channel that must be taken to the first hop. If zero, + any channel may be used. */ - uint32 message_flags = 10; + uint64 outgoing_chan_id = 3 [jstype = JS_STRING]; /** - The bitfield that describes additional meta-data concerning how the - update is to be interpreted. Currently, the least-significant bit must be - set to 0 if the creating node corresponds to the first node in the - previously sent channel announcement and 1 otherwise. If the second bit - is set, then the channel is set to be disabled. + A list of hops that defines the route. This does not include the source hop + pubkey. */ - uint32 channel_flags = 5; + repeated bytes hop_pubkeys = 4; +} +message BuildRouteResponse { /** - The minimum number of blocks this node requires to be added to the expiry - of HTLCs. This is a security parameter determined by the node operator. - This value represents the required gap between the time locks of the - incoming and outgoing HTLC's set to this node. + Fully specified route that can be used to execute the payment. */ - uint32 time_lock_delta = 6; + lnrpc.Route route = 1; +} +message SubscribeHtlcEventsRequest { +} + +/** +HtlcEvent contains the htlc event that was processed. These are served on a +best-effort basis; events are not persisted, delivery is not guaranteed +(in the event of a crash in the switch, forward events may be lost) and +some events may be replayed upon restart. Events consumed from this package +should be de-duplicated by the htlc's unique combination of incoming and +outgoing channel id and htlc id. [EXPERIMENTAL] +*/ +message HtlcEvent { /** - The minimum HTLC value which will be accepted. + The short channel id that the incoming htlc arrived at our node on. This + value is zero for sends. */ - uint64 htlc_minimum_msat = 7; + uint64 incoming_channel_id = 1; /** - The base fee that must be used for incoming HTLC's to this particular - channel. This value will be tacked onto the required for a payment - independent of the size of the payment. + The short channel id that the outgoing htlc left our node on. This value + is zero for receives. */ - uint32 base_fee = 8; + uint64 outgoing_channel_id = 2; /** - The fee rate that will be charged per millionth of a satoshi. + Incoming id is the index of the incoming htlc in the incoming channel. + This value is zero for sends. */ - uint32 fee_rate = 9; - + uint64 incoming_htlc_id = 3; + /** - The maximum HTLC value which will be accepted. + Outgoing id is the index of the outgoing htlc in the outgoing channel. + This value is zero for receives. */ - uint64 htlc_maximum_msat = 11; - + uint64 outgoing_htlc_id = 4; + /** - The set of data that was appended to this message, some of which we may - not actually know how to iterate or parse. By holding onto this data, we - ensure that we're able to properly validate the set of signatures that - cover these new fields, and ensure we're able to make upgrades to the - network in a forwards compatible manner. + The time in unix nanoseconds that the event occurred. */ - bytes extra_opaque_data = 12; + uint64 timestamp_ns = 5; + + enum EventType { + UNKNOWN = 0; + SEND = 1; + RECEIVE = 2; + FORWARD = 3; + } + + /** + The event type indicates whether the htlc was part of a send, receive or + forward. + */ + EventType event_type = 6; + + oneof event { + ForwardEvent forward_event = 7; + ForwardFailEvent forward_fail_event = 8; + SettleEvent settle_event = 9; + LinkFailEvent link_fail_event = 10; + } } -message ResetMissionControlRequest{} -message ResetMissionControlResponse{} +message HtlcInfo { + // The timelock on the incoming htlc. + uint32 incoming_timelock = 1; -message QueryMissionControlRequest {} + // The timelock on the outgoing htlc. + uint32 outgoing_timelock = 2; -/// QueryMissionControlResponse contains mission control state. -message QueryMissionControlResponse { - /// Node-level mission control state. - repeated NodeHistory nodes = 1 [json_name = "nodes"]; + // The amount of the incoming htlc. + uint64 incoming_amt_msat = 3; - /// Node pair-level mission control state. - repeated PairHistory pairs = 2 [json_name = "pairs"]; + // The amount of the outgoing htlc. + uint64 outgoing_amt_msat = 4; } -/// NodeHistory contains the mission control state for a particular node. -message NodeHistory { - /// Node pubkey - bytes pubkey = 1 [json_name = "pubkey"]; - - /// Time stamp of last failure. Set to zero if no failure happened yet. - int64 last_fail_time = 2 [json_name = "last_fail_time"]; +message ForwardEvent { + // Info contains details about the htlc that was forwarded. + HtlcInfo info = 1; +} - /** - Estimation of success probability of forwarding towards peers of this node - for which no specific history is available. - **/ - float other_success_prob = 3 [json_name = "other_success_prob"]; +message ForwardFailEvent { +} - reserved 4; +message SettleEvent { } -/// PairHistory contains the mission control state for a particular node pair. -message PairHistory { - /// The source node pubkey of the pair. - bytes node_from = 1 [json_name ="node_from"]; - - /// The destination node pubkey of the pair. - bytes node_to = 2 [json_name="node_to"]; - - /// Time stamp of last result. - int64 timestamp = 3 [json_name = "timestamp"]; +message LinkFailEvent { + // Info contains details about the htlc that we failed. + HtlcInfo info = 1; - /// Minimum penalization amount (only applies to failed attempts). - int64 min_penalize_amt_sat = 4 [json_name = "min_penalize_amt_sat"]; + // FailureCode is the BOLT error code for the failure. + lnrpc.Failure.FailureCode wire_failure = 2; - /// Estimation of success probability for this pair. - float success_prob = 5 [json_name = "success_prob"]; + /** + FailureDetail provides additional information about the reason for the + failure. This detail enriches the information provided by the wire message + and may be 'no detail' if the wire message requires no additional metadata. + */ + FailureDetail failure_detail = 3; - /// Whether the last payment attempt through this pair was successful. - bool last_attempt_successful = 6 [json_name = "last_attempt_successful"]; + // A string representation of the link failure. + string failure_string = 4; } -message BuildRouteRequest { +enum FailureDetail { + UNKNOWN = 0; + NO_DETAIL = 1; + ONION_DECODE = 2; + LINK_NOT_ELIGIBLE = 3; + ON_CHAIN_TIMEOUT = 4; + HTLC_EXCEEDS_MAX = 5; + INSUFFICIENT_BALANCE = 6; + INCOMPLETE_FORWARD = 7; + HTLC_ADD_FAILED = 8; + FORWARDS_DISABLED = 9; + INVOICE_CANCELED = 10; + INVOICE_UNDERPAID = 11; + INVOICE_EXPIRY_TOO_SOON = 12; + INVOICE_NOT_OPEN = 13; + MPP_INVOICE_TIMEOUT = 14; + ADDRESS_MISMATCH = 15; + SET_TOTAL_MISMATCH = 16; + SET_TOTAL_TOO_LOW = 17; + SET_OVERPAID = 18; + UNKNOWN_INVOICE = 19; + INVALID_KEYSEND = 20; + MPP_IN_PROGRESS = 21; + CIRCULAR_ROUTE = 22; +} + +enum PaymentState { /** - The amount to send expressed in msat. If set to zero, the minimum routable - amount is used. + Payment is still in flight. */ - int64 amt_msat = 1; + IN_FLIGHT = 0; /** - CLTV delta from the current height that should be used for the timelock - of the final hop + Payment completed successfully. */ - int32 final_cltv_delta = 2; + SUCCEEDED = 1; /** - The channel id of the channel that must be taken to the first hop. If zero, - any channel may be used. + There are more routes to try, but the payment timeout was exceeded. + */ + FAILED_TIMEOUT = 2; + + /** + All possible routes were tried and failed permanently. Or were no + routes to the destination at all. */ - uint64 outgoing_chan_id = 3; + FAILED_NO_ROUTE = 3; /** - A list of hops that defines the route. This does not include the source hop - pubkey. + A non-recoverable error has occured. */ - repeated bytes hop_pubkeys = 4; + FAILED_ERROR = 4; + + /** + Payment details incorrect (unknown hash, invalid amt or + invalid final cltv delta) + */ + FAILED_INCORRECT_PAYMENT_DETAILS = 5; + + /** + Insufficient local balance. + */ + FAILED_INSUFFICIENT_BALANCE = 6; } -message BuildRouteResponse { +message PaymentStatus { + /// Current state the payment is in. + PaymentState state = 1; + /** - Fully specified route that can be used to execute the payment. + The pre-image of the payment when state is SUCCEEDED. */ - lnrpc.Route route = 1; + bytes preimage = 2; + + reserved 3; + + /** + The HTLCs made in attempt to settle the payment [EXPERIMENTAL]. + */ + repeated lnrpc.HTLCAttempt htlcs = 4; } service Router { /** - SendPayment attempts to route a payment described by the passed + SendPaymentV2 attempts to route a payment described by the passed PaymentRequest to the final destination. The call returns a stream of - payment status updates. + payment updates. */ - rpc SendPayment(SendPaymentRequest) returns (stream PaymentStatus); + rpc SendPaymentV2 (SendPaymentRequest) returns (stream lnrpc.Payment); /** - TrackPayment returns an update stream for the payment identified by the + TrackPaymentV2 returns an update stream for the payment identified by the payment hash. */ - rpc TrackPayment(TrackPaymentRequest) returns (stream PaymentStatus); - + rpc TrackPaymentV2 (TrackPaymentRequest) returns (stream lnrpc.Payment); /** EstimateRouteFee allows callers to obtain a lower bound w.r.t how much it may cost to send an HTLC to the target end destination. */ - rpc EstimateRouteFee(RouteFeeRequest) returns (RouteFeeResponse); + rpc EstimateRouteFee (RouteFeeRequest) returns (RouteFeeResponse); /** SendToRoute attempts to make a payment via the specified route. This method differs from SendPayment in that it allows users to specify a full route manually. This can be used for things like rebalancing, and atomic swaps. */ - rpc SendToRoute(SendToRouteRequest) returns (SendToRouteResponse); + rpc SendToRoute (SendToRouteRequest) returns (SendToRouteResponse); /** ResetMissionControl clears all mission control state and starts with a clean slate. */ - rpc ResetMissionControl(ResetMissionControlRequest) returns (ResetMissionControlResponse); - + rpc ResetMissionControl (ResetMissionControlRequest) + returns (ResetMissionControlResponse); + /** QueryMissionControl exposes the internal mission control state to callers. It is a development feature. */ - rpc QueryMissionControl(QueryMissionControlRequest) returns (QueryMissionControlResponse); + rpc QueryMissionControl (QueryMissionControlRequest) + returns (QueryMissionControlResponse); + + /** + QueryProbability returns the current success probability estimate for a + given node pair and amount. + */ + rpc QueryProbability (QueryProbabilityRequest) + returns (QueryProbabilityResponse); /** BuildRoute builds a fully specified route based on a list of hop public keys. It retrieves the relevant channel policies from the graph in order to calculate the correct fees and time locks. */ - rpc BuildRoute(BuildRouteRequest) returns (BuildRouteResponse); + rpc BuildRoute (BuildRouteRequest) returns (BuildRouteResponse); + + /** + SubscribeHtlcEvents creates a uni-directional stream from the server to + the client which delivers a stream of htlc events. + */ + rpc SubscribeHtlcEvents (SubscribeHtlcEventsRequest) + returns (stream HtlcEvent); + + /** + Deprecated, use SendPaymentV2. SendPayment attempts to route a payment + described by the passed PaymentRequest to the final destination. The call + returns a stream of payment status updates. + */ + rpc SendPayment(SendPaymentRequest) returns (stream PaymentStatus) { + option deprecated = true; + } + + /** + Deprecated, use TrackPaymentV2. TrackPayment returns an update stream for + the payment identified by the payment hash. + */ + rpc TrackPayment(TrackPaymentRequest) returns (stream PaymentStatus) { + option deprecated = true; + } } diff --git a/lnrpc/routerrpc/router_backend.go b/lnrpc/routerrpc/router_backend.go index 5a036383a3..354e5e29d3 100644 --- a/lnrpc/routerrpc/router_backend.go +++ b/lnrpc/routerrpc/router_backend.go @@ -12,11 +12,15 @@ import ( "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcutil" + "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/htlcswitch" "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/record" "github.com/lightningnetwork/lnd/routing" "github.com/lightningnetwork/lnd/routing/route" - "github.com/lightningnetwork/lnd/tlv" + "github.com/lightningnetwork/lnd/subscribe" "github.com/lightningnetwork/lnd/zpay32" ) @@ -42,8 +46,9 @@ type RouterBackend struct { // routes. FindRoute func(source, target route.Vertex, amt lnwire.MilliSatoshi, restrictions *routing.RestrictParams, - destTlvRecords []tlv.Record, - finalExpiry ...uint16) (*route.Route, error) + destCustomRecords record.CustomSet, + routeHints map[route.Vertex][]*channeldb.ChannelEdgePolicy, + finalExpiry uint16) (*route.Route, error) MissionControl MissionControl @@ -60,6 +65,14 @@ type RouterBackend struct { // MaxTotalTimelock is the maximum total time lock a route is allowed to // have. MaxTotalTimelock uint32 + + // DefaultFinalCltvDelta is the default value used as final cltv delta + // when an RPC caller doesn't specify a value. + DefaultFinalCltvDelta uint16 + + // SubscribeHtlcEvents returns a subscription client for the node's + // htlc events. + SubscribeHtlcEvents func() (*subscribe.Client, error) } // MissionControl defines the mission control dependencies of routerrpc. @@ -76,6 +89,11 @@ type MissionControl interface { // GetHistorySnapshot takes a snapshot from the current mission control // state and actual probability estimates. GetHistorySnapshot() *routing.MissionControlSnapshot + + // GetPairHistorySnapshot returns the stored history for a given node + // pair. + GetPairHistorySnapshot(fromNode, + toNode route.Vertex) routing.TimedPairResult } // QueryRoutes attempts to query the daemons' Channel Router for a possible @@ -121,15 +139,17 @@ func (r *RouterBackend) QueryRoutes(ctx context.Context, // Currently, within the bootstrap phase of the network, we limit the // largest payment size allotted to (2^32) - 1 mSAT or 4.29 million // satoshis. - amt := btcutil.Amount(in.Amt) - amtMSat := lnwire.NewMSatFromSatoshis(amt) - if amtMSat > r.MaxPaymentMSat { + amt, err := lnrpc.UnmarshallAmt(in.Amt, in.AmtMsat) + if err != nil { + return nil, err + } + if amt > r.MaxPaymentMSat { return nil, fmt.Errorf("payment of %v is too large, max payment "+ "allowed is %v", amt, r.MaxPaymentMSat.ToSatoshis()) } // Unmarshall restrictions from request. - feeLimit := calculateFeeLimit(in.FeeLimit, amtMSat) + feeLimit := lnrpc.CalculateFeeLimit(in.FeeLimit, amt) ignoredNodes := make(map[route.Vertex]struct{}) for _, ignorePubKey := range in.IgnoredNodes { @@ -184,13 +204,18 @@ func (r *RouterBackend) QueryRoutes(ctx context.Context, // We need to subtract the final delta before passing it into path // finding. The optimal path is independent of the final cltv delta and // the path finding algorithm is unaware of this value. - finalCLTVDelta := uint16(zpay32.DefaultFinalCLTVDelta) + finalCLTVDelta := r.DefaultFinalCltvDelta if in.FinalCltvDelta != 0 { finalCLTVDelta = uint16(in.FinalCltvDelta) } cltvLimit -= uint32(finalCLTVDelta) - var destTLV map[uint64][]byte + // Parse destination feature bits. + features, err := UnmarshalFeatures(in.DestFeatures) + if err != nil { + return nil, err + } + restrictions := &routing.RestrictParams{ FeeLimit: feeLimit, ProbabilitySource: func(fromNode, toNode route.Vertex, @@ -216,14 +241,43 @@ func (r *RouterBackend) QueryRoutes(ctx context.Context, fromNode, toNode, amt, ) }, - DestPayloadTLV: len(destTLV) != 0, - CltvLimit: cltvLimit, + DestCustomRecords: record.CustomSet(in.DestCustomRecords), + CltvLimit: cltvLimit, + DestFeatures: features, + } + + // Pass along an outgoing channel restriction if specified. + if in.OutgoingChanId != 0 { + restrictions.OutgoingChannelID = &in.OutgoingChanId + } + + // Pass along a last hop restriction if specified. + if len(in.LastHopPubkey) > 0 { + lastHop, err := route.NewVertexFromBytes( + in.LastHopPubkey, + ) + if err != nil { + return nil, err + } + restrictions.LastHop = &lastHop } // If we have any TLV records destined for the final hop, then we'll // attempt to decode them now into a form that the router can more // easily manipulate. - destTlvRecords, err := tlv.MapToRecords(destTLV) + customRecords := record.CustomSet(in.DestCustomRecords) + if err := customRecords.Validate(); err != nil { + return nil, err + } + + // Convert route hints to an edge map. + routeHints, err := unmarshallRouteHints(in.RouteHints) + if err != nil { + return nil, err + } + routeHintEdges, err := routing.RouteHintsToEdges( + routeHints, targetPubKey, + ) if err != nil { return nil, err } @@ -232,8 +286,8 @@ func (r *RouterBackend) QueryRoutes(ctx context.Context, // can carry `in.Amt` satoshis _including_ the total fee required on // the route. route, err := r.FindRoute( - sourcePubKey, targetPubKey, amtMSat, restrictions, - destTlvRecords, finalCLTVDelta, + sourcePubKey, targetPubKey, amt, restrictions, + customRecords, routeHintEdges, finalCLTVDelta, ) if err != nil { return nil, err @@ -301,27 +355,6 @@ func (r *RouterBackend) rpcEdgeToPair(e *lnrpc.EdgeLocator) ( return pair, nil } -// calculateFeeLimit returns the fee limit in millisatoshis. If a percentage -// based fee limit has been requested, we'll factor in the ratio provided with -// the amount of the payment. -func calculateFeeLimit(feeLimit *lnrpc.FeeLimit, - amount lnwire.MilliSatoshi) lnwire.MilliSatoshi { - - switch feeLimit.GetLimit().(type) { - case *lnrpc.FeeLimit_Fixed: - return lnwire.NewMSatFromSatoshis( - btcutil.Amount(feeLimit.GetFixed()), - ) - case *lnrpc.FeeLimit_Percent: - return amount * lnwire.MilliSatoshi(feeLimit.GetPercent()) / 100 - default: - // If a fee limit was not specified, we'll use the payment's - // amount as an upper bound in order to avoid payment attempts - // from incurring fees higher than the payment amount itself. - return amount - } -} - // MarshallRoute marshalls an internal route to an rpc route struct. func (r *RouterBackend) MarshallRoute(route *route.Route) (*lnrpc.Route, error) { resp := &lnrpc.Route{ @@ -347,6 +380,17 @@ func (r *RouterBackend) MarshallRoute(route *route.Route) (*lnrpc.Route, error) chanCapacity = incomingAmt.ToSatoshis() } + // Extract the MPP fields if present on this hop. + var mpp *lnrpc.MPPRecord + if hop.MPP != nil { + addr := hop.MPP.PaymentAddr() + + mpp = &lnrpc.MPPRecord{ + PaymentAddr: addr[:], + TotalAmtMsat: int64(hop.MPP.TotalMsat()), + } + } + resp.Hops[i] = &lnrpc.Hop{ ChanId: hop.ChannelID, ChanCapacity: int64(chanCapacity), @@ -358,7 +402,9 @@ func (r *RouterBackend) MarshallRoute(route *route.Route) (*lnrpc.Route, error) PubKey: hex.EncodeToString( hop.PubKeyBytes[:], ), - TlvPayload: !hop.LegacyPayload, + CustomRecords: hop.CustomRecords, + TlvPayload: !hop.LegacyPayload, + MppRecord: mpp, } incomingAmt = hop.AmtToForward } @@ -366,78 +412,67 @@ func (r *RouterBackend) MarshallRoute(route *route.Route) (*lnrpc.Route, error) return resp, nil } -// UnmarshallHopByChannelLookup unmarshalls an rpc hop for which the pub key is -// not known. This function will query the channel graph with channel id to -// retrieve both endpoints and determine the hop pubkey using the previous hop -// pubkey. If the channel is unknown, an error is returned. -func (r *RouterBackend) UnmarshallHopByChannelLookup(hop *lnrpc.Hop, - prevPubKeyBytes [33]byte) (*route.Hop, error) { +// UnmarshallHopWithPubkey unmarshalls an rpc hop for which the pubkey has +// already been extracted. +func UnmarshallHopWithPubkey(rpcHop *lnrpc.Hop, pubkey route.Vertex) (*route.Hop, + error) { - // Discard edge policies, because they may be nil. - node1, node2, err := r.FetchChannelEndpoints(hop.ChanId) - if err != nil { + customRecords := record.CustomSet(rpcHop.CustomRecords) + if err := customRecords.Validate(); err != nil { return nil, err } - var pubKeyBytes [33]byte - switch { - case prevPubKeyBytes == node1: - pubKeyBytes = node2 - case prevPubKeyBytes == node2: - pubKeyBytes = node1 - default: - return nil, fmt.Errorf("channel edge does not match expected node") - } - - var tlvRecords []tlv.Record - - return &route.Hop{ - OutgoingTimeLock: hop.Expiry, - AmtToForward: lnwire.MilliSatoshi(hop.AmtToForwardMsat), - PubKeyBytes: pubKeyBytes, - ChannelID: hop.ChanId, - TLVRecords: tlvRecords, - LegacyPayload: !hop.TlvPayload, - }, nil -} - -// UnmarshallKnownPubkeyHop unmarshalls an rpc hop that contains the hop pubkey. -// The channel graph doesn't need to be queried because all information required -// for sending the payment is present. -func UnmarshallKnownPubkeyHop(hop *lnrpc.Hop) (*route.Hop, error) { - pubKey, err := hex.DecodeString(hop.PubKey) + mpp, err := UnmarshalMPP(rpcHop.MppRecord) if err != nil { - return nil, fmt.Errorf("cannot decode pubkey %s", hop.PubKey) + return nil, err } - var pubKeyBytes [33]byte - copy(pubKeyBytes[:], pubKey) - - var tlvRecords []tlv.Record - return &route.Hop{ - OutgoingTimeLock: hop.Expiry, - AmtToForward: lnwire.MilliSatoshi(hop.AmtToForwardMsat), - PubKeyBytes: pubKeyBytes, - ChannelID: hop.ChanId, - TLVRecords: tlvRecords, - LegacyPayload: !hop.TlvPayload, + OutgoingTimeLock: rpcHop.Expiry, + AmtToForward: lnwire.MilliSatoshi(rpcHop.AmtToForwardMsat), + PubKeyBytes: pubkey, + ChannelID: rpcHop.ChanId, + CustomRecords: customRecords, + LegacyPayload: !rpcHop.TlvPayload, + MPP: mpp, }, nil } // UnmarshallHop unmarshalls an rpc hop that may or may not contain a node // pubkey. -func (r *RouterBackend) UnmarshallHop(hop *lnrpc.Hop, +func (r *RouterBackend) UnmarshallHop(rpcHop *lnrpc.Hop, prevNodePubKey [33]byte) (*route.Hop, error) { - if hop.PubKey == "" { - // If no pub key is given of the hop, the local channel - // graph needs to be queried to complete the information - // necessary for routing. - return r.UnmarshallHopByChannelLookup(hop, prevNodePubKey) + var pubKeyBytes [33]byte + if rpcHop.PubKey != "" { + // Unmarshall the provided hop pubkey. + pubKey, err := hex.DecodeString(rpcHop.PubKey) + if err != nil { + return nil, fmt.Errorf("cannot decode pubkey %s", + rpcHop.PubKey) + } + copy(pubKeyBytes[:], pubKey) + } else { + // If no pub key is given of the hop, the local channel graph + // needs to be queried to complete the information necessary for + // routing. Discard edge policies, because they may be nil. + node1, node2, err := r.FetchChannelEndpoints(rpcHop.ChanId) + if err != nil { + return nil, err + } + + switch { + case prevNodePubKey == node1: + pubKeyBytes = node2 + case prevNodePubKey == node2: + pubKeyBytes = node1 + default: + return nil, fmt.Errorf("channel edge does not match " + + "expected node") + } } - return UnmarshallKnownPubkeyHop(hop) + return UnmarshallHopWithPubkey(rpcHop, pubKeyBytes) } // UnmarshallRoute unmarshalls an rpc route. For hops that don't specify a @@ -454,6 +489,13 @@ func (r *RouterBackend) UnmarshallRoute(rpcroute *lnrpc.Route) ( return nil, err } + if routeHop.AmtToForward > r.MaxPaymentMSat { + return nil, fmt.Errorf("payment of %v is too large, "+ + "max payment allowed is %v", + routeHop.AmtToForward, + r.MaxPaymentMSat.ToSatoshis()) + } + hops[i] = routeHop prevNodePubKey = routeHop.PubKeyBytes @@ -485,6 +527,17 @@ func (r *RouterBackend) extractIntentFromSendRequest( payIntent.OutgoingChannelID = &rpcPayReq.OutgoingChanId } + // Pass along a last hop restriction if specified. + if len(rpcPayReq.LastHopPubkey) > 0 { + lastHop, err := route.NewVertexFromBytes( + rpcPayReq.LastHopPubkey, + ) + if err != nil { + return nil, err + } + payIntent.LastHop = &lastHop + } + // Take the CLTV limit from the request if set, otherwise use the max. cltvLimit, err := ValidateCLTVLimit( uint32(rpcPayReq.CltvLimit), r.MaxTotalTimelock, @@ -494,24 +547,32 @@ func (r *RouterBackend) extractIntentFromSendRequest( } payIntent.CltvLimit = cltvLimit + // Take max htlcs from the request. Map zero to one for backwards + // compatibility. + maxParts := rpcPayReq.MaxParts + if maxParts == 0 { + maxParts = 1 + } + payIntent.MaxParts = maxParts + // Take fee limit from request. - payIntent.FeeLimit = lnwire.NewMSatFromSatoshis( - btcutil.Amount(rpcPayReq.FeeLimitSat), + payIntent.FeeLimit, err = lnrpc.UnmarshallAmt( + rpcPayReq.FeeLimitSat, rpcPayReq.FeeLimitMsat, ) + if err != nil { + return nil, err + } // Set payment attempt timeout. if rpcPayReq.TimeoutSeconds == 0 { return nil, errors.New("timeout_seconds must be specified") } - var destTLV map[uint64][]byte - if len(destTLV) != 0 { - var err error - payIntent.FinalDestRecords, err = tlv.MapToRecords(destTLV) - if err != nil { - return nil, err - } + customRecords := record.CustomSet(rpcPayReq.DestCustomRecords) + if err := customRecords.Validate(); err != nil { + return nil, err } + payIntent.DestCustomRecords = customRecords payIntent.PayAttemptTimeout = time.Second * time.Duration(rpcPayReq.TimeoutSeconds) @@ -525,6 +586,14 @@ func (r *RouterBackend) extractIntentFromSendRequest( } payIntent.RouteHints = routeHints + // Unmarshall either sat or msat amount from request. + reqAmt, err := lnrpc.UnmarshallAmt( + rpcPayReq.Amt, rpcPayReq.AmtMsat, + ) + if err != nil { + return nil, err + } + // If the payment request field isn't blank, then the details of the // invoice are encoded entirely within the encoded payReq. So we'll // attempt to decode it, populating the payment accordingly. @@ -562,17 +631,15 @@ func (r *RouterBackend) extractIntentFromSendRequest( // We override the amount to pay with the amount provided from // the payment request. if payReq.MilliSat == nil { - if rpcPayReq.Amt == 0 { + if reqAmt == 0 { return nil, errors.New("amount must be " + "specified when paying a zero amount " + "invoice") } - payIntent.Amount = lnwire.NewMSatFromSatoshis( - btcutil.Amount(rpcPayReq.Amt), - ) + payIntent.Amount = reqAmt } else { - if rpcPayReq.Amt != 0 { + if reqAmt != 0 { return nil, errors.New("amount must not be " + "specified when paying a non-zero " + " amount invoice") @@ -589,6 +656,9 @@ func (r *RouterBackend) extractIntentFromSendRequest( payIntent.RouteHints = append( payIntent.RouteHints, payReq.RouteHints..., ) + payIntent.DestFeatures = payReq.Features + payIntent.PaymentAddr = payReq.PaymentAddr + payIntent.PaymentRequest = []byte(rpcPayReq.PaymentRequest) } else { // Otherwise, If the payment request field was not specified // (and a custom route wasn't specified), construct the payment @@ -606,32 +676,31 @@ func (r *RouterBackend) extractIntentFromSendRequest( payIntent.FinalCLTVDelta = uint16(rpcPayReq.FinalCltvDelta) } else { - payIntent.FinalCLTVDelta = zpay32.DefaultFinalCLTVDelta + payIntent.FinalCLTVDelta = r.DefaultFinalCltvDelta } // Amount. - if rpcPayReq.Amt == 0 { + if reqAmt == 0 { return nil, errors.New("amount must be specified") } - payIntent.Amount = lnwire.NewMSatFromSatoshis( - btcutil.Amount(rpcPayReq.Amt), - ) + payIntent.Amount = reqAmt // Payment hash. copy(payIntent.PaymentHash[:], rpcPayReq.PaymentHash) - } - // Currently, within the bootstrap phase of the network, we limit the - // largest payment size allotted to (2^32) - 1 mSAT or 4.29 million - // satoshis. - if payIntent.Amount > r.MaxPaymentMSat { - // In this case, we'll send an error to the caller, but - // continue our loop for the next payment. - return payIntent, fmt.Errorf("payment of %v is too large, "+ - "max payment allowed is %v", payIntent.Amount, - r.MaxPaymentMSat) + // Parse destination feature bits. + features, err := UnmarshalFeatures(rpcPayReq.DestFeatures) + if err != nil { + return nil, err + } + payIntent.DestFeatures = features + } + + // Check for disallowed payments to self. + if !rpcPayReq.AllowSelfPayment && payIntent.Target == r.SelfNode { + return nil, errors.New("self-payments not allowed") } return payIntent, nil @@ -681,6 +750,29 @@ func unmarshallHopHint(rpcHint *lnrpc.HopHint) (zpay32.HopHint, error) { }, nil } +// UnmarshalFeatures converts a list of uint32's into a valid feature vector. +// This method checks that feature bit pairs aren't assigned toegether, and +// validates transitive dependencies. +func UnmarshalFeatures( + rpcFeatures []lnrpc.FeatureBit) (*lnwire.FeatureVector, error) { + + // If no destination features are specified we'll return nil to signal + // that the router should try to use the graph as a fallback. + if rpcFeatures == nil { + return nil, nil + } + + raw := lnwire.NewRawFeatureVector() + for _, bit := range rpcFeatures { + err := raw.SafeSet(lnwire.FeatureBit(bit)) + if err != nil { + return nil, err + } + } + + return lnwire.NewFeatureVector(raw, lnwire.Features), nil +} + // ValidatePayReqExpiry checks if the passed payment request has expired. In // the case it has expired, an error will be returned. func ValidatePayReqExpiry(payReq *zpay32.Invoice) error { @@ -707,3 +799,412 @@ func ValidateCLTVLimit(val, max uint32) (uint32, error) { return val, nil } } + +// UnmarshalMPP accepts the mpp_total_amt_msat and mpp_payment_addr fields from +// an RPC request and converts into an record.MPP object. An error is returned +// if the payment address is not 0 or 32 bytes. If the total amount and payment +// address are zero-value, the return value will be nil signaling there is no +// MPP record to attach to this hop. Otherwise, a non-nil reocrd will be +// contained combining the provided values. +func UnmarshalMPP(reqMPP *lnrpc.MPPRecord) (*record.MPP, error) { + // If no MPP record was submitted, assume the user wants to send a + // regular payment. + if reqMPP == nil { + return nil, nil + } + + reqTotal := reqMPP.TotalAmtMsat + reqAddr := reqMPP.PaymentAddr + + switch { + + // No MPP fields were provided. + case reqTotal == 0 && len(reqAddr) == 0: + return nil, fmt.Errorf("missing total_msat and payment_addr") + + // Total is present, but payment address is missing. + case reqTotal > 0 && len(reqAddr) == 0: + return nil, fmt.Errorf("missing payment_addr") + + // Payment address is present, but total is missing. + case reqTotal == 0 && len(reqAddr) > 0: + return nil, fmt.Errorf("missing total_msat") + } + + addr, err := lntypes.MakeHash(reqAddr) + if err != nil { + return nil, fmt.Errorf("unable to parse "+ + "payment_addr: %v", err) + } + + total := lnwire.MilliSatoshi(reqTotal) + + return record.NewMPP(total, addr), nil +} + +// MarshalHTLCAttempt constructs an RPC HTLCAttempt from the db representation. +func (r *RouterBackend) MarshalHTLCAttempt( + htlc channeldb.HTLCAttempt) (*lnrpc.HTLCAttempt, error) { + + route, err := r.MarshallRoute(&htlc.Route) + if err != nil { + return nil, err + } + + rpcAttempt := &lnrpc.HTLCAttempt{ + AttemptTimeNs: MarshalTimeNano(htlc.AttemptTime), + Route: route, + } + + switch { + case htlc.Settle != nil: + rpcAttempt.Status = lnrpc.HTLCAttempt_SUCCEEDED + rpcAttempt.ResolveTimeNs = MarshalTimeNano( + htlc.Settle.SettleTime, + ) + + case htlc.Failure != nil: + rpcAttempt.Status = lnrpc.HTLCAttempt_FAILED + rpcAttempt.ResolveTimeNs = MarshalTimeNano( + htlc.Failure.FailTime, + ) + + var err error + rpcAttempt.Failure, err = marshallHtlcFailure(htlc.Failure) + if err != nil { + return nil, err + } + default: + rpcAttempt.Status = lnrpc.HTLCAttempt_IN_FLIGHT + } + + return rpcAttempt, nil +} + +// marshallHtlcFailure marshalls htlc fail info from the database to its rpc +// representation. +func marshallHtlcFailure(failure *channeldb.HTLCFailInfo) (*lnrpc.Failure, + error) { + + rpcFailure := &lnrpc.Failure{ + FailureSourceIndex: failure.FailureSourceIndex, + } + + switch failure.Reason { + + case channeldb.HTLCFailUnknown: + rpcFailure.Code = lnrpc.Failure_UNKNOWN_FAILURE + + case channeldb.HTLCFailUnreadable: + rpcFailure.Code = lnrpc.Failure_UNREADABLE_FAILURE + + case channeldb.HTLCFailInternal: + rpcFailure.Code = lnrpc.Failure_INTERNAL_FAILURE + + case channeldb.HTLCFailMessage: + err := marshallWireError(failure.Message, rpcFailure) + if err != nil { + return nil, err + } + + default: + return nil, errors.New("unknown htlc failure reason") + } + + return rpcFailure, nil +} + +// MarshalTimeNano converts a time.Time into its nanosecond representation. If +// the time is zero, this method simply returns 0, since calling UnixNano() on a +// zero-valued time is undefined. +func MarshalTimeNano(t time.Time) int64 { + if t.IsZero() { + return 0 + } + return t.UnixNano() +} + +// marshallError marshall an error as received from the switch to rpc structs +// suitable for returning to the caller of an rpc method. +// +// Because of difficulties with using protobuf oneof constructs in some +// languages, the decision was made here to use a single message format for all +// failure messages with some fields left empty depending on the failure type. +func marshallError(sendError error) (*lnrpc.Failure, error) { + response := &lnrpc.Failure{} + + if sendError == htlcswitch.ErrUnreadableFailureMessage { + response.Code = lnrpc.Failure_UNREADABLE_FAILURE + return response, nil + } + + rtErr, ok := sendError.(htlcswitch.ClearTextError) + if !ok { + return nil, sendError + } + + err := marshallWireError(rtErr.WireMessage(), response) + if err != nil { + return nil, err + } + + // If the ClearTextError received is a ForwardingError, the error + // originated from a node along the route, not locally on our outgoing + // link. We set failureSourceIdx to the index of the node where the + // failure occurred. If the error is not a ForwardingError, the failure + // occurred at our node, so we leave the index as 0 to indicate that + // we failed locally. + fErr, ok := rtErr.(*htlcswitch.ForwardingError) + if ok { + response.FailureSourceIndex = uint32(fErr.FailureSourceIdx) + } + + return response, nil +} + +// marshallError marshall an error as received from the switch to rpc structs +// suitable for returning to the caller of an rpc method. +// +// Because of difficulties with using protobuf oneof constructs in some +// languages, the decision was made here to use a single message format for all +// failure messages with some fields left empty depending on the failure type. +func marshallWireError(msg lnwire.FailureMessage, + response *lnrpc.Failure) error { + + switch onionErr := msg.(type) { + + case *lnwire.FailIncorrectDetails: + response.Code = lnrpc.Failure_INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS + response.Height = onionErr.Height() + + case *lnwire.FailIncorrectPaymentAmount: + response.Code = lnrpc.Failure_INCORRECT_PAYMENT_AMOUNT + + case *lnwire.FailFinalIncorrectCltvExpiry: + response.Code = lnrpc.Failure_FINAL_INCORRECT_CLTV_EXPIRY + response.CltvExpiry = onionErr.CltvExpiry + + case *lnwire.FailFinalIncorrectHtlcAmount: + response.Code = lnrpc.Failure_FINAL_INCORRECT_HTLC_AMOUNT + response.HtlcMsat = uint64(onionErr.IncomingHTLCAmount) + + case *lnwire.FailFinalExpiryTooSoon: + response.Code = lnrpc.Failure_FINAL_EXPIRY_TOO_SOON + + case *lnwire.FailInvalidRealm: + response.Code = lnrpc.Failure_INVALID_REALM + + case *lnwire.FailExpiryTooSoon: + response.Code = lnrpc.Failure_EXPIRY_TOO_SOON + response.ChannelUpdate = marshallChannelUpdate(&onionErr.Update) + + case *lnwire.FailExpiryTooFar: + response.Code = lnrpc.Failure_EXPIRY_TOO_FAR + + case *lnwire.FailInvalidOnionVersion: + response.Code = lnrpc.Failure_INVALID_ONION_VERSION + response.OnionSha_256 = onionErr.OnionSHA256[:] + + case *lnwire.FailInvalidOnionHmac: + response.Code = lnrpc.Failure_INVALID_ONION_HMAC + response.OnionSha_256 = onionErr.OnionSHA256[:] + + case *lnwire.FailInvalidOnionKey: + response.Code = lnrpc.Failure_INVALID_ONION_KEY + response.OnionSha_256 = onionErr.OnionSHA256[:] + + case *lnwire.FailAmountBelowMinimum: + response.Code = lnrpc.Failure_AMOUNT_BELOW_MINIMUM + response.ChannelUpdate = marshallChannelUpdate(&onionErr.Update) + response.HtlcMsat = uint64(onionErr.HtlcMsat) + + case *lnwire.FailFeeInsufficient: + response.Code = lnrpc.Failure_FEE_INSUFFICIENT + response.ChannelUpdate = marshallChannelUpdate(&onionErr.Update) + response.HtlcMsat = uint64(onionErr.HtlcMsat) + + case *lnwire.FailIncorrectCltvExpiry: + response.Code = lnrpc.Failure_INCORRECT_CLTV_EXPIRY + response.ChannelUpdate = marshallChannelUpdate(&onionErr.Update) + response.CltvExpiry = onionErr.CltvExpiry + + case *lnwire.FailChannelDisabled: + response.Code = lnrpc.Failure_CHANNEL_DISABLED + response.ChannelUpdate = marshallChannelUpdate(&onionErr.Update) + response.Flags = uint32(onionErr.Flags) + + case *lnwire.FailTemporaryChannelFailure: + response.Code = lnrpc.Failure_TEMPORARY_CHANNEL_FAILURE + response.ChannelUpdate = marshallChannelUpdate(onionErr.Update) + + case *lnwire.FailRequiredNodeFeatureMissing: + response.Code = lnrpc.Failure_REQUIRED_NODE_FEATURE_MISSING + + case *lnwire.FailRequiredChannelFeatureMissing: + response.Code = lnrpc.Failure_REQUIRED_CHANNEL_FEATURE_MISSING + + case *lnwire.FailUnknownNextPeer: + response.Code = lnrpc.Failure_UNKNOWN_NEXT_PEER + + case *lnwire.FailTemporaryNodeFailure: + response.Code = lnrpc.Failure_TEMPORARY_NODE_FAILURE + + case *lnwire.FailPermanentNodeFailure: + response.Code = lnrpc.Failure_PERMANENT_NODE_FAILURE + + case *lnwire.FailPermanentChannelFailure: + response.Code = lnrpc.Failure_PERMANENT_CHANNEL_FAILURE + + case *lnwire.FailMPPTimeout: + response.Code = lnrpc.Failure_MPP_TIMEOUT + + case nil: + response.Code = lnrpc.Failure_UNKNOWN_FAILURE + + default: + return fmt.Errorf("cannot marshall failure %T", onionErr) + } + + return nil +} + +// marshallChannelUpdate marshalls a channel update as received over the wire to +// the router rpc format. +func marshallChannelUpdate(update *lnwire.ChannelUpdate) *lnrpc.ChannelUpdate { + if update == nil { + return nil + } + + return &lnrpc.ChannelUpdate{ + Signature: update.Signature[:], + ChainHash: update.ChainHash[:], + ChanId: update.ShortChannelID.ToUint64(), + Timestamp: update.Timestamp, + MessageFlags: uint32(update.MessageFlags), + ChannelFlags: uint32(update.ChannelFlags), + TimeLockDelta: uint32(update.TimeLockDelta), + HtlcMinimumMsat: uint64(update.HtlcMinimumMsat), + BaseFee: update.BaseFee, + FeeRate: update.FeeRate, + HtlcMaximumMsat: uint64(update.HtlcMaximumMsat), + ExtraOpaqueData: update.ExtraOpaqueData, + } +} + +// MarshallPayment marshall a payment to its rpc representation. +func (r *RouterBackend) MarshallPayment(payment *channeldb.MPPayment) ( + *lnrpc.Payment, error) { + + // Fetch the payment's preimage and the total paid in fees. + var ( + fee lnwire.MilliSatoshi + preimage lntypes.Preimage + ) + for _, htlc := range payment.HTLCs { + // If any of the htlcs have settled, extract a valid + // preimage. + if htlc.Settle != nil { + preimage = htlc.Settle.Preimage + fee += htlc.Route.TotalFees() + } + } + + msatValue := int64(payment.Info.Value) + satValue := int64(payment.Info.Value.ToSatoshis()) + + status, err := convertPaymentStatus(payment.Status) + if err != nil { + return nil, err + } + + htlcs := make([]*lnrpc.HTLCAttempt, 0, len(payment.HTLCs)) + for _, dbHTLC := range payment.HTLCs { + htlc, err := r.MarshalHTLCAttempt(dbHTLC) + if err != nil { + return nil, err + } + + htlcs = append(htlcs, htlc) + } + + paymentHash := payment.Info.PaymentHash + creationTimeNS := MarshalTimeNano(payment.Info.CreationTime) + + failureReason, err := marshallPaymentFailureReason( + payment.FailureReason, + ) + if err != nil { + return nil, err + } + + return &lnrpc.Payment{ + PaymentHash: hex.EncodeToString(paymentHash[:]), + Value: satValue, + ValueMsat: msatValue, + ValueSat: satValue, + CreationDate: payment.Info.CreationTime.Unix(), + CreationTimeNs: creationTimeNS, + Fee: int64(fee.ToSatoshis()), + FeeSat: int64(fee.ToSatoshis()), + FeeMsat: int64(fee), + PaymentPreimage: hex.EncodeToString(preimage[:]), + PaymentRequest: string(payment.Info.PaymentRequest), + Status: status, + Htlcs: htlcs, + PaymentIndex: payment.SequenceNum, + FailureReason: failureReason, + }, nil +} + +// convertPaymentStatus converts a channeldb.PaymentStatus to the type expected +// by the RPC. +func convertPaymentStatus(dbStatus channeldb.PaymentStatus) ( + lnrpc.Payment_PaymentStatus, error) { + + switch dbStatus { + case channeldb.StatusUnknown: + return lnrpc.Payment_UNKNOWN, nil + + case channeldb.StatusInFlight: + return lnrpc.Payment_IN_FLIGHT, nil + + case channeldb.StatusSucceeded: + return lnrpc.Payment_SUCCEEDED, nil + + case channeldb.StatusFailed: + return lnrpc.Payment_FAILED, nil + + default: + return 0, fmt.Errorf("unhandled payment status %v", dbStatus) + } +} + +// marshallPaymentFailureReason marshalls the failure reason to the corresponding rpc +// type. +func marshallPaymentFailureReason(reason *channeldb.FailureReason) ( + lnrpc.PaymentFailureReason, error) { + + if reason == nil { + return lnrpc.PaymentFailureReason_FAILURE_REASON_NONE, nil + } + + switch *reason { + + case channeldb.FailureReasonTimeout: + return lnrpc.PaymentFailureReason_FAILURE_REASON_TIMEOUT, nil + + case channeldb.FailureReasonNoRoute: + return lnrpc.PaymentFailureReason_FAILURE_REASON_NO_ROUTE, nil + + case channeldb.FailureReasonError: + return lnrpc.PaymentFailureReason_FAILURE_REASON_ERROR, nil + + case channeldb.FailureReasonPaymentDetails: + return lnrpc.PaymentFailureReason_FAILURE_REASON_INCORRECT_PAYMENT_DETAILS, nil + + case channeldb.FailureReasonInsufficientBalance: + return lnrpc.PaymentFailureReason_FAILURE_REASON_INSUFFICIENT_BALANCE, nil + } + + return 0, errors.New("unknown failure reason") +} diff --git a/lnrpc/routerrpc/router_backend_test.go b/lnrpc/routerrpc/router_backend_test.go index 329200dccc..33a0ccfba3 100644 --- a/lnrpc/routerrpc/router_backend_test.go +++ b/lnrpc/routerrpc/router_backend_test.go @@ -7,10 +7,11 @@ import ( "testing" "github.com/btcsuite/btcutil" + "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/record" "github.com/lightningnetwork/lnd/routing" "github.com/lightningnetwork/lnd/routing/route" - "github.com/lightningnetwork/lnd/tlv" "github.com/lightningnetwork/lnd/lnrpc" ) @@ -18,6 +19,7 @@ import ( const ( destKey = "0286098b97bc843372b4426d4b276cea9aa2f48f0428d6f5b66ae101befc14f8b4" ignoreNodeKey = "02f274f48f3c0d590449a6776e3ce8825076ac376e470e992246eebc565ef8bb2a" + hintNodeKey = "0274e7fb33eafd74fe1acb6db7680bb4aa78e9c839a6e954e38abfad680f645ef7" testMissionControlProb = 0.5 ) @@ -34,14 +36,17 @@ var ( // and passed onto path finding. func TestQueryRoutes(t *testing.T) { t.Run("no mission control", func(t *testing.T) { - testQueryRoutes(t, false) + testQueryRoutes(t, false, false) + }) + t.Run("no mission control and msat", func(t *testing.T) { + testQueryRoutes(t, false, true) }) t.Run("with mission control", func(t *testing.T) { - testQueryRoutes(t, true) + testQueryRoutes(t, true, false) }) } -func testQueryRoutes(t *testing.T, useMissionControl bool) { +func testQueryRoutes(t *testing.T, useMissionControl bool, useMsat bool) { ignoreNodeBytes, err := hex.DecodeString(ignoreNodeKey) if err != nil { t.Fatal(err) @@ -55,16 +60,31 @@ func testQueryRoutes(t *testing.T, useMissionControl bool) { t.Fatal(err) } + var ( + lastHop = route.Vertex{64} + outgoingChan = uint64(383322) + ) + + hintNode, err := route.NewVertexFromStr(hintNodeKey) + if err != nil { + t.Fatal(err) + } + + rpcRouteHints := []*lnrpc.RouteHint{ + { + HopHints: []*lnrpc.HopHint{ + { + ChanId: 38484, + NodeId: hintNodeKey, + }, + }, + }, + } + request := &lnrpc.QueryRoutesRequest{ PubKey: destKey, - Amt: 100000, FinalCltvDelta: 100, - FeeLimit: &lnrpc.FeeLimit{ - Limit: &lnrpc.FeeLimit_Fixed{ - Fixed: 250, - }, - }, - IgnoredNodes: [][]byte{ignoreNodeBytes}, + IgnoredNodes: [][]byte{ignoreNodeBytes}, IgnoredEdges: []*lnrpc.EdgeLocator{{ ChannelId: 555, DirectionReverse: true, @@ -74,14 +94,36 @@ func testQueryRoutes(t *testing.T, useMissionControl bool) { To: node2[:], }}, UseMissionControl: useMissionControl, + LastHopPubkey: lastHop[:], + OutgoingChanId: outgoingChan, + DestFeatures: []lnrpc.FeatureBit{lnrpc.FeatureBit_MPP_OPT}, + RouteHints: rpcRouteHints, + } + + amtSat := int64(100000) + if useMsat { + request.AmtMsat = amtSat * 1000 + request.FeeLimit = &lnrpc.FeeLimit{ + Limit: &lnrpc.FeeLimit_FixedMsat{ + FixedMsat: 250000, + }, + } + } else { + request.Amt = amtSat + request.FeeLimit = &lnrpc.FeeLimit{ + Limit: &lnrpc.FeeLimit_Fixed{ + Fixed: 250, + }, + } } findRoute := func(source, target route.Vertex, amt lnwire.MilliSatoshi, restrictions *routing.RestrictParams, - _ []tlv.Record, - finalExpiry ...uint16) (*route.Route, error) { + _ record.CustomSet, + routeHints map[route.Vertex][]*channeldb.ChannelEdgePolicy, + finalExpiry uint16) (*route.Route, error) { - if int64(amt) != request.Amt*1000 { + if int64(amt) != amtSat*1000 { t.Fatal("unexpected amount") } @@ -113,6 +155,22 @@ func testQueryRoutes(t *testing.T, useMissionControl bool) { t.Fatal("expecting 0% probability for ignored pair") } + if *restrictions.LastHop != lastHop { + t.Fatal("unexpected last hop") + } + + if *restrictions.OutgoingChannelID != outgoingChan { + t.Fatal("unexpected outgoing channel id") + } + + if !restrictions.DestFeatures.HasFeature(lnwire.MPPOptional) { + t.Fatal("unexpected dest features") + } + + if _, ok := routeHints[hintNode]; !ok { + t.Fatal("expected route hint") + } + expectedProb := 1.0 if useMissionControl { expectedProb = testMissionControlProb @@ -174,3 +232,129 @@ func (m *mockMissionControl) ResetHistory() error { func (m *mockMissionControl) GetHistorySnapshot() *routing.MissionControlSnapshot { return nil } + +func (m *mockMissionControl) GetPairHistorySnapshot(fromNode, + toNode route.Vertex) routing.TimedPairResult { + + return routing.TimedPairResult{} +} + +type mppOutcome byte + +const ( + valid mppOutcome = iota + invalid + nompp +) + +type unmarshalMPPTest struct { + name string + mpp *lnrpc.MPPRecord + outcome mppOutcome +} + +// TestUnmarshalMPP checks both positive and negative cases of UnmarshalMPP to +// assert that an MPP record is only returned when both fields are properly +// specified. It also asserts that zero-values for both inputs is also valid, +// but returns a nil record. +func TestUnmarshalMPP(t *testing.T) { + tests := []unmarshalMPPTest{ + { + name: "nil record", + mpp: nil, + outcome: nompp, + }, + { + name: "invalid total or addr", + mpp: &lnrpc.MPPRecord{ + PaymentAddr: nil, + TotalAmtMsat: 0, + }, + outcome: invalid, + }, + { + name: "valid total only", + mpp: &lnrpc.MPPRecord{ + PaymentAddr: nil, + TotalAmtMsat: 8, + }, + outcome: invalid, + }, + { + name: "valid addr only", + mpp: &lnrpc.MPPRecord{ + PaymentAddr: bytes.Repeat([]byte{0x02}, 32), + TotalAmtMsat: 0, + }, + outcome: invalid, + }, + { + name: "valid total and invalid addr", + mpp: &lnrpc.MPPRecord{ + PaymentAddr: []byte{0x02}, + TotalAmtMsat: 8, + }, + outcome: invalid, + }, + { + name: "valid total and valid addr", + mpp: &lnrpc.MPPRecord{ + PaymentAddr: bytes.Repeat([]byte{0x02}, 32), + TotalAmtMsat: 8, + }, + outcome: valid, + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + testUnmarshalMPP(t, test) + }) + } +} + +func testUnmarshalMPP(t *testing.T, test unmarshalMPPTest) { + mpp, err := UnmarshalMPP(test.mpp) + switch test.outcome { + + // Valid arguments should result in no error, a non-nil MPP record, and + // the fields should be set correctly. + case valid: + if err != nil { + t.Fatalf("unable to parse mpp record: %v", err) + } + if mpp == nil { + t.Fatalf("mpp payload should be non-nil") + } + if int64(mpp.TotalMsat()) != test.mpp.TotalAmtMsat { + t.Fatalf("incorrect total msat") + } + addr := mpp.PaymentAddr() + if !bytes.Equal(addr[:], test.mpp.PaymentAddr) { + t.Fatalf("incorrect payment addr") + } + + // Invalid arguments should produce a failure and nil MPP record. + case invalid: + if err == nil { + t.Fatalf("expected failure for invalid mpp") + } + if mpp != nil { + t.Fatalf("mpp payload should be nil for failure") + } + + // Arguments that produce no MPP field should return no error and no MPP + // record. + case nompp: + if err != nil { + t.Fatalf("failure for args resulting for no-mpp") + } + if mpp != nil { + t.Fatalf("mpp payload should be nil for no-mpp") + } + + default: + t.Fatalf("test case has non-standard outcome") + } +} diff --git a/lnrpc/routerrpc/router_server.go b/lnrpc/routerrpc/router_server.go index 37e68474e9..e38c648b57 100644 --- a/lnrpc/routerrpc/router_server.go +++ b/lnrpc/routerrpc/router_server.go @@ -1,5 +1,3 @@ -// +build routerrpc - package routerrpc import ( @@ -9,10 +7,10 @@ import ( "io/ioutil" "os" "path/filepath" + "sync/atomic" "github.com/btcsuite/btcutil" "github.com/lightningnetwork/lnd/channeldb" - "github.com/lightningnetwork/lnd/htlcswitch" "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lnwire" @@ -33,6 +31,8 @@ const ( ) var ( + errServerShuttingDown = errors.New("routerrpc server shutting down") + // macaroonOps are the set of capabilities that our minted macaroon (if // it doesn't already exist) will have. macaroonOps = []bakery.Op{ @@ -48,7 +48,7 @@ var ( // macPermissions maps RPC calls to the permissions they require. macPermissions = map[string][]bakery.Op{ - "/routerrpc.Router/SendPayment": {{ + "/routerrpc.Router/SendPaymentV2": {{ Entity: "offchain", Action: "write", }}, @@ -56,7 +56,7 @@ var ( Entity: "offchain", Action: "write", }}, - "/routerrpc.Router/TrackPayment": {{ + "/routerrpc.Router/TrackPaymentV2": {{ Entity: "offchain", Action: "read", }}, @@ -68,6 +68,10 @@ var ( Entity: "offchain", Action: "read", }}, + "/routerrpc.Router/QueryProbability": {{ + Entity: "offchain", + Action: "read", + }}, "/routerrpc.Router/ResetMissionControl": {{ Entity: "offchain", Action: "write", @@ -76,6 +80,18 @@ var ( Entity: "offchain", Action: "read", }}, + "/routerrpc.Router/SubscribeHtlcEvents": {{ + Entity: "offchain", + Action: "read", + }}, + "/routerrpc.Router/SendPayment": {{ + Entity: "offchain", + Action: "write", + }}, + "/routerrpc.Router/TrackPayment": {{ + Entity: "offchain", + Action: "read", + }}, } // DefaultRouterMacFilename is the default name of the router macaroon @@ -87,7 +103,12 @@ var ( // Server is a stand alone sub RPC server which exposes functionality that // allows clients to route arbitrary payment through the Lightning Network. type Server struct { + started int32 // To be used atomically. + shutdown int32 // To be used atomically. + cfg *Config + + quit chan struct{} } // A compile time check to ensure that Server fully implements the RouterServer @@ -147,7 +168,8 @@ func New(cfg *Config) (*Server, lnrpc.MacaroonPerms, error) { } routerServer := &Server{ - cfg: cfg, + cfg: cfg, + quit: make(chan struct{}), } return routerServer, macPermissions, nil @@ -157,6 +179,10 @@ func New(cfg *Config) (*Server, lnrpc.MacaroonPerms, error) { // // NOTE: This is part of the lnrpc.SubServer interface. func (s *Server) Start() error { + if atomic.AddInt32(&s.started, 1) != 1 { + return nil + } + return nil } @@ -164,6 +190,11 @@ func (s *Server) Start() error { // // NOTE: This is part of the lnrpc.SubServer interface. func (s *Server) Stop() error { + if atomic.AddInt32(&s.shutdown, 1) != 1 { + return nil + } + + close(s.quit) return nil } @@ -191,13 +222,13 @@ func (s *Server) RegisterWithRootServer(grpcServer *grpc.Server) error { return nil } -// SendPayment attempts to route a payment described by the passed +// SendPaymentV2 attempts to route a payment described by the passed // PaymentRequest to the final destination. If we are unable to route the // payment, or cannot find a route that satisfies the constraints in the // PaymentRequest, then an error will be returned. Otherwise, the payment // pre-image, along with the final route will be returned. -func (s *Server) SendPayment(req *SendPaymentRequest, - stream Router_SendPaymentServer) error { +func (s *Server) SendPaymentV2(req *SendPaymentRequest, + stream Router_SendPaymentV2Server) error { payment, err := s.cfg.RouterBackend.extractIntentFromSendRequest(req) if err != nil { @@ -224,7 +255,7 @@ func (s *Server) SendPayment(req *SendPaymentRequest, return err } - return s.trackPayment(payment.PaymentHash, stream) + return s.trackPayment(payment.PaymentHash, stream, req.NoInflightUpdates) } // EstimateRouteFee allows callers to obtain a lower bound w.r.t how much it @@ -256,7 +287,7 @@ func (s *Server) EstimateRouteFee(ctx context.Context, &routing.RestrictParams{ FeeLimit: feeLimit, CltvLimit: s.cfg.RouterBackend.MaxTotalTimelock, - }, nil, + }, nil, nil, s.cfg.RouterBackend.DefaultFinalCltvDelta, ) if err != nil { return nil, err @@ -308,144 +339,6 @@ func (s *Server) SendToRoute(ctx context.Context, }, nil } -// marshallError marshall an error as received from the switch to rpc structs -// suitable for returning to the caller of an rpc method. -// -// Because of difficulties with using protobuf oneof constructs in some -// languages, the decision was made here to use a single message format for all -// failure messages with some fields left empty depending on the failure type. -func marshallError(sendError error) (*Failure, error) { - response := &Failure{} - - if sendError == htlcswitch.ErrUnreadableFailureMessage { - response.Code = Failure_UNREADABLE_FAILURE - return response, nil - } - - fErr, ok := sendError.(*htlcswitch.ForwardingError) - if !ok { - return nil, sendError - } - - switch onionErr := fErr.FailureMessage.(type) { - - case *lnwire.FailIncorrectDetails: - response.Code = Failure_INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS - response.Height = onionErr.Height() - - case *lnwire.FailIncorrectPaymentAmount: - response.Code = Failure_INCORRECT_PAYMENT_AMOUNT - - case *lnwire.FailFinalIncorrectCltvExpiry: - response.Code = Failure_FINAL_INCORRECT_CLTV_EXPIRY - response.CltvExpiry = onionErr.CltvExpiry - - case *lnwire.FailFinalIncorrectHtlcAmount: - response.Code = Failure_FINAL_INCORRECT_HTLC_AMOUNT - response.HtlcMsat = uint64(onionErr.IncomingHTLCAmount) - - case *lnwire.FailFinalExpiryTooSoon: - response.Code = Failure_FINAL_EXPIRY_TOO_SOON - - case *lnwire.FailInvalidRealm: - response.Code = Failure_INVALID_REALM - - case *lnwire.FailExpiryTooSoon: - response.Code = Failure_EXPIRY_TOO_SOON - response.ChannelUpdate = marshallChannelUpdate(&onionErr.Update) - - case *lnwire.FailExpiryTooFar: - response.Code = Failure_EXPIRY_TOO_FAR - - case *lnwire.FailInvalidOnionVersion: - response.Code = Failure_INVALID_ONION_VERSION - response.OnionSha_256 = onionErr.OnionSHA256[:] - - case *lnwire.FailInvalidOnionHmac: - response.Code = Failure_INVALID_ONION_HMAC - response.OnionSha_256 = onionErr.OnionSHA256[:] - - case *lnwire.FailInvalidOnionKey: - response.Code = Failure_INVALID_ONION_KEY - response.OnionSha_256 = onionErr.OnionSHA256[:] - - case *lnwire.FailAmountBelowMinimum: - response.Code = Failure_AMOUNT_BELOW_MINIMUM - response.ChannelUpdate = marshallChannelUpdate(&onionErr.Update) - response.HtlcMsat = uint64(onionErr.HtlcMsat) - - case *lnwire.FailFeeInsufficient: - response.Code = Failure_FEE_INSUFFICIENT - response.ChannelUpdate = marshallChannelUpdate(&onionErr.Update) - response.HtlcMsat = uint64(onionErr.HtlcMsat) - - case *lnwire.FailIncorrectCltvExpiry: - response.Code = Failure_INCORRECT_CLTV_EXPIRY - response.ChannelUpdate = marshallChannelUpdate(&onionErr.Update) - response.CltvExpiry = onionErr.CltvExpiry - - case *lnwire.FailChannelDisabled: - response.Code = Failure_CHANNEL_DISABLED - response.ChannelUpdate = marshallChannelUpdate(&onionErr.Update) - response.Flags = uint32(onionErr.Flags) - - case *lnwire.FailTemporaryChannelFailure: - response.Code = Failure_TEMPORARY_CHANNEL_FAILURE - response.ChannelUpdate = marshallChannelUpdate(onionErr.Update) - - case *lnwire.FailRequiredNodeFeatureMissing: - response.Code = Failure_REQUIRED_NODE_FEATURE_MISSING - - case *lnwire.FailRequiredChannelFeatureMissing: - response.Code = Failure_REQUIRED_CHANNEL_FEATURE_MISSING - - case *lnwire.FailUnknownNextPeer: - response.Code = Failure_UNKNOWN_NEXT_PEER - - case *lnwire.FailTemporaryNodeFailure: - response.Code = Failure_TEMPORARY_NODE_FAILURE - - case *lnwire.FailPermanentNodeFailure: - response.Code = Failure_PERMANENT_NODE_FAILURE - - case *lnwire.FailPermanentChannelFailure: - response.Code = Failure_PERMANENT_CHANNEL_FAILURE - - case nil: - response.Code = Failure_UNKNOWN_FAILURE - - default: - return nil, fmt.Errorf("cannot marshall failure %T", onionErr) - } - - response.FailureSourceIndex = uint32(fErr.FailureSourceIdx) - - return response, nil -} - -// marshallChannelUpdate marshalls a channel update as received over the wire to -// the router rpc format. -func marshallChannelUpdate(update *lnwire.ChannelUpdate) *ChannelUpdate { - if update == nil { - return nil - } - - return &ChannelUpdate{ - Signature: update.Signature[:], - ChainHash: update.ChainHash[:], - ChanId: update.ShortChannelID.ToUint64(), - Timestamp: update.Timestamp, - MessageFlags: uint32(update.MessageFlags), - ChannelFlags: uint32(update.ChannelFlags), - TimeLockDelta: uint32(update.TimeLockDelta), - HtlcMinimumMsat: uint64(update.HtlcMinimumMsat), - BaseFee: update.BaseFee, - FeeRate: update.FeeRate, - HtlcMaximumMsat: uint64(update.HtlcMaximumMsat), - ExtraOpaqueData: update.ExtraOpaqueData, - } -} - // ResetMissionControl clears all mission control state and starts with a clean // slate. func (s *Server) ResetMissionControl(ctx context.Context, @@ -466,53 +359,78 @@ func (s *Server) QueryMissionControl(ctx context.Context, snapshot := s.cfg.RouterBackend.MissionControl.GetHistorySnapshot() - rpcNodes := make([]*NodeHistory, 0, len(snapshot.Nodes)) - for _, n := range snapshot.Nodes { - // Copy node struct to prevent loop variable binding bugs. - node := n - - rpcNode := NodeHistory{ - Pubkey: node.Node[:], - LastFailTime: node.LastFail.Unix(), - OtherSuccessProb: float32( - node.OtherSuccessProb, - ), - } - - rpcNodes = append(rpcNodes, &rpcNode) - } - rpcPairs := make([]*PairHistory, 0, len(snapshot.Pairs)) for _, p := range snapshot.Pairs { // Prevent binding to loop variable. pair := p rpcPair := PairHistory{ - NodeFrom: pair.Pair.From[:], - NodeTo: pair.Pair.To[:], - Timestamp: pair.Timestamp.Unix(), - MinPenalizeAmtSat: int64( - pair.MinPenalizeAmt.ToSatoshis(), - ), - SuccessProb: float32(pair.SuccessProb), - LastAttemptSuccessful: pair.LastAttemptSuccessful, + NodeFrom: pair.Pair.From[:], + NodeTo: pair.Pair.To[:], + History: toRPCPairData(&pair.TimedPairResult), } rpcPairs = append(rpcPairs, &rpcPair) } response := QueryMissionControlResponse{ - Nodes: rpcNodes, Pairs: rpcPairs, } return &response, nil } -// TrackPayment returns a stream of payment state updates. The stream is +// toRPCPairData marshalls mission control pair data to the rpc struct. +func toRPCPairData(data *routing.TimedPairResult) *PairData { + rpcData := PairData{ + FailAmtSat: int64(data.FailAmt.ToSatoshis()), + FailAmtMsat: int64(data.FailAmt), + SuccessAmtSat: int64(data.SuccessAmt.ToSatoshis()), + SuccessAmtMsat: int64(data.SuccessAmt), + } + + if !data.FailTime.IsZero() { + rpcData.FailTime = data.FailTime.Unix() + } + + if !data.SuccessTime.IsZero() { + rpcData.SuccessTime = data.SuccessTime.Unix() + } + + return &rpcData +} + +// QueryProbability returns the current success probability estimate for a +// given node pair and amount. +func (s *Server) QueryProbability(ctx context.Context, + req *QueryProbabilityRequest) (*QueryProbabilityResponse, error) { + + fromNode, err := route.NewVertexFromBytes(req.FromNode) + if err != nil { + return nil, err + } + + toNode, err := route.NewVertexFromBytes(req.ToNode) + if err != nil { + return nil, err + } + + amt := lnwire.MilliSatoshi(req.AmtMsat) + + mc := s.cfg.RouterBackend.MissionControl + prob := mc.GetProbability(fromNode, toNode, amt) + history := mc.GetPairHistorySnapshot(fromNode, toNode) + + return &QueryProbabilityResponse{ + Probability: prob, + History: toRPCPairData(&history), + }, nil +} + +// TrackPaymentV2 returns a stream of payment state updates. The stream is // closed when the payment completes. -func (s *Server) TrackPayment(request *TrackPaymentRequest, - stream Router_TrackPaymentServer) error { +func (s *Server) TrackPaymentV2(request *TrackPaymentRequest, + stream Router_TrackPaymentV2Server) error { paymentHash, err := lntypes.MakeHash(request.PaymentHash) if err != nil { @@ -521,17 +439,17 @@ func (s *Server) TrackPayment(request *TrackPaymentRequest, log.Debugf("TrackPayment called for payment %v", paymentHash) - return s.trackPayment(paymentHash, stream) + return s.trackPayment(paymentHash, stream, request.NoInflightUpdates) } // trackPayment writes payment status updates to the provided stream. func (s *Server) trackPayment(paymentHash lntypes.Hash, - stream Router_TrackPaymentServer) error { + stream Router_TrackPaymentV2Server, noInflightUpdates bool) error { router := s.cfg.RouterBackend // Subscribe to the outcome of this payment. - inFlight, resultChan, err := router.Tower.SubscribePayment( + subscription, err := router.Tower.SubscribePayment( paymentHash, ) switch { @@ -540,91 +458,45 @@ func (s *Server) trackPayment(paymentHash lntypes.Hash, case err != nil: return err } + defer subscription.Close() - // If it is in flight, send a state update to the client. Payment status - // update streams are expected to always send the current payment state - // immediately. - if inFlight { - err = stream.Send(&PaymentStatus{ - State: PaymentState_IN_FLIGHT, - }) - if err != nil { - return err - } - } + // Stream updates back to the client. The first update is always the + // current state of the payment. + for { + select { + case item, ok := <-subscription.Updates: + if !ok { + // No more payment updates. + return nil + } + result := item.(*channeldb.MPPayment) - // Wait for the outcome of the payment. For payments that have - // completed, the result should already be waiting on the channel. - select { - case result := <-resultChan: - // Marshall result to rpc type. - var status PaymentStatus + // Skip in-flight updates unless requested. + if noInflightUpdates && + result.Status == channeldb.StatusInFlight { - if result.Success { - log.Debugf("Payment %v successfully completed", - paymentHash) + continue + } - status.State = PaymentState_SUCCEEDED - status.Preimage = result.Preimage[:] - status.Route, err = router.MarshallRoute( - result.Route, - ) + rpcPayment, err := router.MarshallPayment(result) if err != nil { return err } - } else { - state, err := marshallFailureReason( - result.FailureReason, - ) + + // Send event to the client. + err = stream.Send(rpcPayment) if err != nil { return err } - status.State = state - if result.Route != nil { - status.Route, err = router.MarshallRoute( - result.Route, - ) - if err != nil { - return err - } - } - } - - // Send event to the client. - err = stream.Send(&status) - if err != nil { - return err - } - - case <-stream.Context().Done(): - log.Debugf("Payment status stream %v canceled", paymentHash) - return stream.Context().Err() - } - - return nil -} -// marshallFailureReason marshalls the failure reason to the corresponding rpc -// type. -func marshallFailureReason(reason channeldb.FailureReason) ( - PaymentState, error) { + case <-s.quit: + return errServerShuttingDown - switch reason { - - case channeldb.FailureReasonTimeout: - return PaymentState_FAILED_TIMEOUT, nil - - case channeldb.FailureReasonNoRoute: - return PaymentState_FAILED_NO_ROUTE, nil - - case channeldb.FailureReasonError: - return PaymentState_FAILED_ERROR, nil - - case channeldb.FailureReasonIncorrectPaymentDetails: - return PaymentState_FAILED_INCORRECT_PAYMENT_DETAILS, nil + case <-stream.Context().Done(): + log.Debugf("Payment status stream %v canceled", paymentHash) + return stream.Context().Err() + } } - - return 0, errors.New("unknown failure reason") } // BuildRoute builds a route from a list of hop addresses. @@ -672,3 +544,42 @@ func (s *Server) BuildRoute(ctx context.Context, return routeResp, nil } + +// SubscribeHtlcEvents creates a uni-directional stream from the server to +// the client which delivers a stream of htlc events. +func (s *Server) SubscribeHtlcEvents(req *SubscribeHtlcEventsRequest, + stream Router_SubscribeHtlcEventsServer) error { + + htlcClient, err := s.cfg.RouterBackend.SubscribeHtlcEvents() + if err != nil { + return err + } + defer htlcClient.Cancel() + + for { + select { + case event := <-htlcClient.Updates(): + rpcEvent, err := rpcHtlcEvent(event) + if err != nil { + return err + } + + if err := stream.Send(rpcEvent); err != nil { + return err + } + + // If the stream's context is cancelled, return an error. + case <-stream.Context().Done(): + log.Debugf("htlc event stream cancelled") + return stream.Context().Err() + + // If the subscribe client terminates, exit with an error. + case <-htlcClient.Quit(): + return errors.New("htlc event subscription terminated") + + // If the server has been signalled to shut down, exit. + case <-s.quit: + return errServerShuttingDown + } + } +} diff --git a/lnrpc/routerrpc/router_server_deprecated.go b/lnrpc/routerrpc/router_server_deprecated.go new file mode 100644 index 0000000000..66f3d3689c --- /dev/null +++ b/lnrpc/routerrpc/router_server_deprecated.go @@ -0,0 +1,95 @@ +package routerrpc + +import ( + "encoding/hex" + "errors" + "fmt" + + "github.com/lightningnetwork/lnd/lnrpc" +) + +// legacyTrackPaymentServer is a wrapper struct that transforms a stream of main +// rpc payment structs into the legacy PaymentStatus format. +type legacyTrackPaymentServer struct { + Router_TrackPaymentServer +} + +// Send converts a Payment object and sends it as a PaymentStatus object on the +// embedded stream. +func (i *legacyTrackPaymentServer) Send(p *lnrpc.Payment) error { + var state PaymentState + switch p.Status { + case lnrpc.Payment_IN_FLIGHT: + state = PaymentState_IN_FLIGHT + case lnrpc.Payment_SUCCEEDED: + state = PaymentState_SUCCEEDED + case lnrpc.Payment_FAILED: + switch p.FailureReason { + case lnrpc.PaymentFailureReason_FAILURE_REASON_NONE: + return fmt.Errorf("expected fail reason") + + case lnrpc.PaymentFailureReason_FAILURE_REASON_TIMEOUT: + state = PaymentState_FAILED_TIMEOUT + + case lnrpc.PaymentFailureReason_FAILURE_REASON_NO_ROUTE: + state = PaymentState_FAILED_NO_ROUTE + + case lnrpc.PaymentFailureReason_FAILURE_REASON_ERROR: + state = PaymentState_FAILED_ERROR + + case lnrpc.PaymentFailureReason_FAILURE_REASON_INCORRECT_PAYMENT_DETAILS: + state = PaymentState_FAILED_INCORRECT_PAYMENT_DETAILS + + case lnrpc.PaymentFailureReason_FAILURE_REASON_INSUFFICIENT_BALANCE: + state = PaymentState_FAILED_INSUFFICIENT_BALANCE + + default: + return fmt.Errorf("unknown failure reason %v", + p.FailureReason) + } + default: + return fmt.Errorf("unknown state %v", p.Status) + } + + preimage, err := hex.DecodeString(p.PaymentPreimage) + if err != nil { + return err + } + + legacyState := PaymentStatus{ + State: state, + Preimage: preimage, + Htlcs: p.Htlcs, + } + + return i.Router_TrackPaymentServer.Send(&legacyState) +} + +// TrackPayment returns a stream of payment state updates. The stream is +// closed when the payment completes. +func (s *Server) TrackPayment(request *TrackPaymentRequest, + stream Router_TrackPaymentServer) error { + + legacyStream := legacyTrackPaymentServer{ + Router_TrackPaymentServer: stream, + } + return s.TrackPaymentV2(request, &legacyStream) +} + +// SendPayment attempts to route a payment described by the passed +// PaymentRequest to the final destination. If we are unable to route the +// payment, or cannot find a route that satisfies the constraints in the +// PaymentRequest, then an error will be returned. Otherwise, the payment +// pre-image, along with the final route will be returned. +func (s *Server) SendPayment(request *SendPaymentRequest, + stream Router_SendPaymentServer) error { + + if request.MaxParts > 1 { + return errors.New("for multi-part payments, use SendPaymentV2") + } + + legacyStream := legacyTrackPaymentServer{ + Router_TrackPaymentServer: stream, + } + return s.SendPaymentV2(request, &legacyStream) +} diff --git a/lnrpc/routerrpc/routing_config.go b/lnrpc/routerrpc/routing_config.go new file mode 100644 index 0000000000..98b3594cf3 --- /dev/null +++ b/lnrpc/routerrpc/routing_config.go @@ -0,0 +1,40 @@ +package routerrpc + +import ( + "time" + + "github.com/btcsuite/btcutil" +) + +// RoutingConfig contains the configurable parameters that control routing. +type RoutingConfig struct { + // MinRouteProbability is the minimum required route success probability + // to attempt the payment. + MinRouteProbability float64 `long:"minrtprob" description:"Minimum required route success probability to attempt the payment"` + + // AprioriHopProbability is the assumed success probability of a hop in + // a route when no other information is available. + AprioriHopProbability float64 `long:"apriorihopprob" description:"Assumed success probability of a hop in a route when no other information is available."` + + // AprioriWeight is a value in the range [0, 1] that defines to what + // extent historical results should be extrapolated to untried + // connections. Setting it to one will completely ignore historical + // results and always assume the configured a priori probability for + // untried connections. A value of zero will ignore the a priori + // probability completely and only base the probability on historical + // results, unless there are none available. + AprioriWeight float64 `long:"aprioriweight" description:"Weight of the a priori probability in success probability estimation. Valid values are in [0, 1]."` + + // PenaltyHalfLife defines after how much time a penalized node or + // channel is back at 50% probability. + PenaltyHalfLife time.Duration `long:"penaltyhalflife" description:"Defines the duration after which a penalized node or channel is back at 50% probability"` + + // AttemptCost is the virtual cost in path finding weight units of + // executing a payment attempt that fails. It is used to trade off + // potentially better routes against their probability of succeeding. + AttemptCost btcutil.Amount `long:"attemptcost" description:"The (virtual) cost in sats of a failed payment attempt"` + + // MaxMcHistory defines the maximum number of payment results that + // are held on disk by mission control. + MaxMcHistory int `long:"maxmchistory" description:"the maximum number of payment results that are held on disk by mission control"` +} diff --git a/lnrpc/routerrpc/subscribe_events.go b/lnrpc/routerrpc/subscribe_events.go new file mode 100644 index 0000000000..4454dbda2f --- /dev/null +++ b/lnrpc/routerrpc/subscribe_events.go @@ -0,0 +1,239 @@ +package routerrpc + +import ( + "fmt" + "time" + + "github.com/lightningnetwork/lnd/htlcswitch" + "github.com/lightningnetwork/lnd/invoices" + "github.com/lightningnetwork/lnd/lnrpc" +) + +// rpcHtlcEvent returns a rpc htlc event from a htlcswitch event. +func rpcHtlcEvent(htlcEvent interface{}) (*HtlcEvent, error) { + var ( + key htlcswitch.HtlcKey + timestamp time.Time + eventType htlcswitch.HtlcEventType + event isHtlcEvent_Event + ) + + switch e := htlcEvent.(type) { + case *htlcswitch.ForwardingEvent: + event = &HtlcEvent_ForwardEvent{ + ForwardEvent: &ForwardEvent{ + Info: rpcInfo(e.HtlcInfo), + }, + } + + key = e.HtlcKey + eventType = e.HtlcEventType + timestamp = e.Timestamp + + case *htlcswitch.ForwardingFailEvent: + event = &HtlcEvent_ForwardFailEvent{ + ForwardFailEvent: &ForwardFailEvent{}, + } + + key = e.HtlcKey + eventType = e.HtlcEventType + timestamp = e.Timestamp + + case *htlcswitch.LinkFailEvent: + failureCode, failReason, err := rpcFailReason( + e.LinkError, + ) + if err != nil { + return nil, err + } + + event = &HtlcEvent_LinkFailEvent{ + LinkFailEvent: &LinkFailEvent{ + Info: rpcInfo(e.HtlcInfo), + WireFailure: failureCode, + FailureDetail: failReason, + FailureString: e.LinkError.Error(), + }, + } + + key = e.HtlcKey + eventType = e.HtlcEventType + timestamp = e.Timestamp + + case *htlcswitch.SettleEvent: + event = &HtlcEvent_SettleEvent{ + SettleEvent: &SettleEvent{}, + } + + key = e.HtlcKey + eventType = e.HtlcEventType + timestamp = e.Timestamp + + default: + return nil, fmt.Errorf("unknown event type: %T", e) + } + + rpcEvent := &HtlcEvent{ + IncomingChannelId: key.IncomingCircuit.ChanID.ToUint64(), + OutgoingChannelId: key.OutgoingCircuit.ChanID.ToUint64(), + IncomingHtlcId: key.IncomingCircuit.HtlcID, + OutgoingHtlcId: key.OutgoingCircuit.HtlcID, + TimestampNs: uint64(timestamp.UnixNano()), + Event: event, + } + + // Convert the htlc event type to a rpc event. + switch eventType { + case htlcswitch.HtlcEventTypeSend: + rpcEvent.EventType = HtlcEvent_SEND + + case htlcswitch.HtlcEventTypeReceive: + rpcEvent.EventType = HtlcEvent_RECEIVE + + case htlcswitch.HtlcEventTypeForward: + rpcEvent.EventType = HtlcEvent_FORWARD + + default: + return nil, fmt.Errorf("unknown event type: %v", eventType) + } + + return rpcEvent, nil +} + +// rpcInfo returns a rpc struct containing the htlc information from the +// switch's htlc info struct. +func rpcInfo(info htlcswitch.HtlcInfo) *HtlcInfo { + return &HtlcInfo{ + IncomingTimelock: info.IncomingTimeLock, + OutgoingTimelock: info.OutgoingTimeLock, + IncomingAmtMsat: uint64(info.IncomingAmt), + OutgoingAmtMsat: uint64(info.OutgoingAmt), + } +} + +// rpcFailReason maps a lnwire failure message and failure detail to a rpc +// failure code and detail. +func rpcFailReason(linkErr *htlcswitch.LinkError) (lnrpc.Failure_FailureCode, + FailureDetail, error) { + + wireErr, err := marshallError(linkErr) + if err != nil { + return 0, 0, err + } + + switch failureDetail := linkErr.FailureDetail.(type) { + case invoices.FailResolutionResult: + fd, err := rpcFailureResolution(failureDetail) + return wireErr.GetCode(), fd, err + + case htlcswitch.OutgoingFailure: + fd, err := rpcOutgoingFailure(failureDetail) + return wireErr.GetCode(), fd, err + + default: + return 0, 0, fmt.Errorf("unknown failure "+ + "detail type: %T", linkErr.FailureDetail) + + } + +} + +// rpcFailureResolution maps an invoice failure resolution to a rpc failure +// detail. Invoice failures have no zero resolution results (every failure +// is accompanied with a result), so we error if we fail to match the result +// type. +func rpcFailureResolution(invoiceFailure invoices.FailResolutionResult) ( + FailureDetail, error) { + + switch invoiceFailure { + case invoices.ResultReplayToCanceled: + return FailureDetail_INVOICE_CANCELED, nil + + case invoices.ResultInvoiceAlreadyCanceled: + return FailureDetail_INVOICE_CANCELED, nil + + case invoices.ResultAmountTooLow: + return FailureDetail_INVOICE_UNDERPAID, nil + + case invoices.ResultExpiryTooSoon: + return FailureDetail_INVOICE_EXPIRY_TOO_SOON, nil + + case invoices.ResultCanceled: + return FailureDetail_INVOICE_CANCELED, nil + + case invoices.ResultInvoiceNotOpen: + return FailureDetail_INVOICE_NOT_OPEN, nil + + case invoices.ResultMppTimeout: + return FailureDetail_MPP_INVOICE_TIMEOUT, nil + + case invoices.ResultAddressMismatch: + return FailureDetail_ADDRESS_MISMATCH, nil + + case invoices.ResultHtlcSetTotalMismatch: + return FailureDetail_SET_TOTAL_MISMATCH, nil + + case invoices.ResultHtlcSetTotalTooLow: + return FailureDetail_SET_TOTAL_TOO_LOW, nil + + case invoices.ResultHtlcSetOverpayment: + return FailureDetail_SET_OVERPAID, nil + + case invoices.ResultInvoiceNotFound: + return FailureDetail_UNKNOWN_INVOICE, nil + + case invoices.ResultKeySendError: + return FailureDetail_INVALID_KEYSEND, nil + + case invoices.ResultMppInProgress: + return FailureDetail_MPP_IN_PROGRESS, nil + + default: + return 0, fmt.Errorf("unknown fail resolution: %v", + invoiceFailure.FailureString()) + } +} + +// rpcOutgoingFailure maps an outgoing failure to a rpc FailureDetail. If the +// failure detail is FailureDetailNone, which indicates that the failure was +// a wire message which required no further failure detail, we return a no +// detail failure detail to indicate that there was no additional information. +func rpcOutgoingFailure(failureDetail htlcswitch.OutgoingFailure) ( + FailureDetail, error) { + + switch failureDetail { + case htlcswitch.OutgoingFailureNone: + return FailureDetail_NO_DETAIL, nil + + case htlcswitch.OutgoingFailureDecodeError: + return FailureDetail_ONION_DECODE, nil + + case htlcswitch.OutgoingFailureLinkNotEligible: + return FailureDetail_LINK_NOT_ELIGIBLE, nil + + case htlcswitch.OutgoingFailureOnChainTimeout: + return FailureDetail_ON_CHAIN_TIMEOUT, nil + + case htlcswitch.OutgoingFailureHTLCExceedsMax: + return FailureDetail_HTLC_EXCEEDS_MAX, nil + + case htlcswitch.OutgoingFailureInsufficientBalance: + return FailureDetail_INSUFFICIENT_BALANCE, nil + + case htlcswitch.OutgoingFailureCircularRoute: + return FailureDetail_CIRCULAR_ROUTE, nil + + case htlcswitch.OutgoingFailureIncompleteForward: + return FailureDetail_INCOMPLETE_FORWARD, nil + + case htlcswitch.OutgoingFailureDownstreamHtlcAdd: + return FailureDetail_HTLC_ADD_FAILED, nil + + case htlcswitch.OutgoingFailureForwardsDisabled: + return FailureDetail_FORWARDS_DISABLED, nil + + default: + return 0, fmt.Errorf("unknown outgoing failure "+ + "detail: %v", failureDetail.FailureString()) + } +} diff --git a/lnrpc/rpc.pb.go b/lnrpc/rpc.pb.go index 189fd18747..2c55f6b0f4 100644 --- a/lnrpc/rpc.pb.go +++ b/lnrpc/rpc.pb.go @@ -59,6 +59,107 @@ func (AddressType) EnumDescriptor() ([]byte, []int) { return fileDescriptor_77a6da22d6a3feb1, []int{0} } +type CommitmentType int32 + +const ( + //* + //A channel using the legacy commitment format having tweaked to_remote + //keys. + CommitmentType_LEGACY CommitmentType = 0 + //* + //A channel that uses the modern commitment format where the key in the + //output of the remote party does not change each state. This makes back + //up and recovery easier as when the channel is closed, the funds go + //directly to that key. + CommitmentType_STATIC_REMOTE_KEY CommitmentType = 1 + //* + //A channel that uses a commitment format that has anchor outputs on the + //commitments, allowing fee bumping after a force close transaction has + //been broadcast. + CommitmentType_ANCHORS CommitmentType = 2 + //* + //Returned when the commitment type isn't known or unavailable. + CommitmentType_UNKNOWN_COMMITMENT_TYPE CommitmentType = 999 +) + +var CommitmentType_name = map[int32]string{ + 0: "LEGACY", + 1: "STATIC_REMOTE_KEY", + 2: "ANCHORS", + 999: "UNKNOWN_COMMITMENT_TYPE", +} + +var CommitmentType_value = map[string]int32{ + "LEGACY": 0, + "STATIC_REMOTE_KEY": 1, + "ANCHORS": 2, + "UNKNOWN_COMMITMENT_TYPE": 999, +} + +func (x CommitmentType) String() string { + return proto.EnumName(CommitmentType_name, int32(x)) +} + +func (CommitmentType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{1} +} + +type Initiator int32 + +const ( + Initiator_INITIATOR_UNKNOWN Initiator = 0 + Initiator_INITIATOR_LOCAL Initiator = 1 + Initiator_INITIATOR_REMOTE Initiator = 2 + Initiator_INITIATOR_BOTH Initiator = 3 +) + +var Initiator_name = map[int32]string{ + 0: "INITIATOR_UNKNOWN", + 1: "INITIATOR_LOCAL", + 2: "INITIATOR_REMOTE", + 3: "INITIATOR_BOTH", +} + +var Initiator_value = map[string]int32{ + "INITIATOR_UNKNOWN": 0, + "INITIATOR_LOCAL": 1, + "INITIATOR_REMOTE": 2, + "INITIATOR_BOTH": 3, +} + +func (x Initiator) String() string { + return proto.EnumName(Initiator_name, int32(x)) +} + +func (Initiator) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{2} +} + +type NodeMetricType int32 + +const ( + NodeMetricType_UNKNOWN NodeMetricType = 0 + NodeMetricType_BETWEENNESS_CENTRALITY NodeMetricType = 1 +) + +var NodeMetricType_name = map[int32]string{ + 0: "UNKNOWN", + 1: "BETWEENNESS_CENTRALITY", +} + +var NodeMetricType_value = map[string]int32{ + "UNKNOWN": 0, + "BETWEENNESS_CENTRALITY": 1, +} + +func (x NodeMetricType) String() string { + return proto.EnumName(NodeMetricType_name, int32(x)) +} + +func (NodeMetricType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{3} +} + type InvoiceHTLCState int32 const ( @@ -84,7 +185,128 @@ func (x InvoiceHTLCState) String() string { } func (InvoiceHTLCState) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{1} + return fileDescriptor_77a6da22d6a3feb1, []int{4} +} + +type PaymentFailureReason int32 + +const ( + //* + //Payment isn't failed (yet). + PaymentFailureReason_FAILURE_REASON_NONE PaymentFailureReason = 0 + //* + //There are more routes to try, but the payment timeout was exceeded. + PaymentFailureReason_FAILURE_REASON_TIMEOUT PaymentFailureReason = 1 + //* + //All possible routes were tried and failed permanently. Or were no + //routes to the destination at all. + PaymentFailureReason_FAILURE_REASON_NO_ROUTE PaymentFailureReason = 2 + //* + //A non-recoverable error has occured. + PaymentFailureReason_FAILURE_REASON_ERROR PaymentFailureReason = 3 + //* + //Payment details incorrect (unknown hash, invalid amt or + //invalid final cltv delta) + PaymentFailureReason_FAILURE_REASON_INCORRECT_PAYMENT_DETAILS PaymentFailureReason = 4 + //* + //Insufficient local balance. + PaymentFailureReason_FAILURE_REASON_INSUFFICIENT_BALANCE PaymentFailureReason = 5 +) + +var PaymentFailureReason_name = map[int32]string{ + 0: "FAILURE_REASON_NONE", + 1: "FAILURE_REASON_TIMEOUT", + 2: "FAILURE_REASON_NO_ROUTE", + 3: "FAILURE_REASON_ERROR", + 4: "FAILURE_REASON_INCORRECT_PAYMENT_DETAILS", + 5: "FAILURE_REASON_INSUFFICIENT_BALANCE", +} + +var PaymentFailureReason_value = map[string]int32{ + "FAILURE_REASON_NONE": 0, + "FAILURE_REASON_TIMEOUT": 1, + "FAILURE_REASON_NO_ROUTE": 2, + "FAILURE_REASON_ERROR": 3, + "FAILURE_REASON_INCORRECT_PAYMENT_DETAILS": 4, + "FAILURE_REASON_INSUFFICIENT_BALANCE": 5, +} + +func (x PaymentFailureReason) String() string { + return proto.EnumName(PaymentFailureReason_name, int32(x)) +} + +func (PaymentFailureReason) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{5} +} + +type FeatureBit int32 + +const ( + FeatureBit_DATALOSS_PROTECT_REQ FeatureBit = 0 + FeatureBit_DATALOSS_PROTECT_OPT FeatureBit = 1 + FeatureBit_INITIAL_ROUING_SYNC FeatureBit = 3 + FeatureBit_UPFRONT_SHUTDOWN_SCRIPT_REQ FeatureBit = 4 + FeatureBit_UPFRONT_SHUTDOWN_SCRIPT_OPT FeatureBit = 5 + FeatureBit_GOSSIP_QUERIES_REQ FeatureBit = 6 + FeatureBit_GOSSIP_QUERIES_OPT FeatureBit = 7 + FeatureBit_TLV_ONION_REQ FeatureBit = 8 + FeatureBit_TLV_ONION_OPT FeatureBit = 9 + FeatureBit_EXT_GOSSIP_QUERIES_REQ FeatureBit = 10 + FeatureBit_EXT_GOSSIP_QUERIES_OPT FeatureBit = 11 + FeatureBit_STATIC_REMOTE_KEY_REQ FeatureBit = 12 + FeatureBit_STATIC_REMOTE_KEY_OPT FeatureBit = 13 + FeatureBit_PAYMENT_ADDR_REQ FeatureBit = 14 + FeatureBit_PAYMENT_ADDR_OPT FeatureBit = 15 + FeatureBit_MPP_REQ FeatureBit = 16 + FeatureBit_MPP_OPT FeatureBit = 17 +) + +var FeatureBit_name = map[int32]string{ + 0: "DATALOSS_PROTECT_REQ", + 1: "DATALOSS_PROTECT_OPT", + 3: "INITIAL_ROUING_SYNC", + 4: "UPFRONT_SHUTDOWN_SCRIPT_REQ", + 5: "UPFRONT_SHUTDOWN_SCRIPT_OPT", + 6: "GOSSIP_QUERIES_REQ", + 7: "GOSSIP_QUERIES_OPT", + 8: "TLV_ONION_REQ", + 9: "TLV_ONION_OPT", + 10: "EXT_GOSSIP_QUERIES_REQ", + 11: "EXT_GOSSIP_QUERIES_OPT", + 12: "STATIC_REMOTE_KEY_REQ", + 13: "STATIC_REMOTE_KEY_OPT", + 14: "PAYMENT_ADDR_REQ", + 15: "PAYMENT_ADDR_OPT", + 16: "MPP_REQ", + 17: "MPP_OPT", +} + +var FeatureBit_value = map[string]int32{ + "DATALOSS_PROTECT_REQ": 0, + "DATALOSS_PROTECT_OPT": 1, + "INITIAL_ROUING_SYNC": 3, + "UPFRONT_SHUTDOWN_SCRIPT_REQ": 4, + "UPFRONT_SHUTDOWN_SCRIPT_OPT": 5, + "GOSSIP_QUERIES_REQ": 6, + "GOSSIP_QUERIES_OPT": 7, + "TLV_ONION_REQ": 8, + "TLV_ONION_OPT": 9, + "EXT_GOSSIP_QUERIES_REQ": 10, + "EXT_GOSSIP_QUERIES_OPT": 11, + "STATIC_REMOTE_KEY_REQ": 12, + "STATIC_REMOTE_KEY_OPT": 13, + "PAYMENT_ADDR_REQ": 14, + "PAYMENT_ADDR_OPT": 15, + "MPP_REQ": 16, + "MPP_OPT": 17, +} + +func (x FeatureBit) String() string { + return proto.EnumName(FeatureBit_name, int32(x)) +} + +func (FeatureBit) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{6} } type ChannelCloseSummary_ClosureType int32 @@ -158,13 +380,67 @@ func (Peer_SyncType) EnumDescriptor() ([]byte, []int) { return fileDescriptor_77a6da22d6a3feb1, []int{46, 0} } +type PeerEvent_EventType int32 + +const ( + PeerEvent_PEER_ONLINE PeerEvent_EventType = 0 + PeerEvent_PEER_OFFLINE PeerEvent_EventType = 1 +) + +var PeerEvent_EventType_name = map[int32]string{ + 0: "PEER_ONLINE", + 1: "PEER_OFFLINE", +} + +var PeerEvent_EventType_value = map[string]int32{ + "PEER_ONLINE": 0, + "PEER_OFFLINE": 1, +} + +func (x PeerEvent_EventType) String() string { + return proto.EnumName(PeerEvent_EventType_name, int32(x)) +} + +func (PeerEvent_EventType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{51, 0} +} + +type PendingChannelsResponse_ForceClosedChannel_AnchorState int32 + +const ( + PendingChannelsResponse_ForceClosedChannel_LIMBO PendingChannelsResponse_ForceClosedChannel_AnchorState = 0 + PendingChannelsResponse_ForceClosedChannel_RECOVERED PendingChannelsResponse_ForceClosedChannel_AnchorState = 1 + PendingChannelsResponse_ForceClosedChannel_LOST PendingChannelsResponse_ForceClosedChannel_AnchorState = 2 +) + +var PendingChannelsResponse_ForceClosedChannel_AnchorState_name = map[int32]string{ + 0: "LIMBO", + 1: "RECOVERED", + 2: "LOST", +} + +var PendingChannelsResponse_ForceClosedChannel_AnchorState_value = map[string]int32{ + "LIMBO": 0, + "RECOVERED": 1, + "LOST": 2, +} + +func (x PendingChannelsResponse_ForceClosedChannel_AnchorState) String() string { + return proto.EnumName(PendingChannelsResponse_ForceClosedChannel_AnchorState_name, int32(x)) +} + +func (PendingChannelsResponse_ForceClosedChannel_AnchorState) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{76, 5, 0} +} + type ChannelEventUpdate_UpdateType int32 const ( - ChannelEventUpdate_OPEN_CHANNEL ChannelEventUpdate_UpdateType = 0 - ChannelEventUpdate_CLOSED_CHANNEL ChannelEventUpdate_UpdateType = 1 - ChannelEventUpdate_ACTIVE_CHANNEL ChannelEventUpdate_UpdateType = 2 - ChannelEventUpdate_INACTIVE_CHANNEL ChannelEventUpdate_UpdateType = 3 + ChannelEventUpdate_OPEN_CHANNEL ChannelEventUpdate_UpdateType = 0 + ChannelEventUpdate_CLOSED_CHANNEL ChannelEventUpdate_UpdateType = 1 + ChannelEventUpdate_ACTIVE_CHANNEL ChannelEventUpdate_UpdateType = 2 + ChannelEventUpdate_INACTIVE_CHANNEL ChannelEventUpdate_UpdateType = 3 + ChannelEventUpdate_PENDING_OPEN_CHANNEL ChannelEventUpdate_UpdateType = 4 ) var ChannelEventUpdate_UpdateType_name = map[int32]string{ @@ -172,13 +448,15 @@ var ChannelEventUpdate_UpdateType_name = map[int32]string{ 1: "CLOSED_CHANNEL", 2: "ACTIVE_CHANNEL", 3: "INACTIVE_CHANNEL", + 4: "PENDING_OPEN_CHANNEL", } var ChannelEventUpdate_UpdateType_value = map[string]int32{ - "OPEN_CHANNEL": 0, - "CLOSED_CHANNEL": 1, - "ACTIVE_CHANNEL": 2, - "INACTIVE_CHANNEL": 3, + "OPEN_CHANNEL": 0, + "CLOSED_CHANNEL": 1, + "ACTIVE_CHANNEL": 2, + "INACTIVE_CHANNEL": 3, + "PENDING_OPEN_CHANNEL": 4, } func (x ChannelEventUpdate_UpdateType) String() string { @@ -186,7 +464,7 @@ func (x ChannelEventUpdate_UpdateType) String() string { } func (ChannelEventUpdate_UpdateType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{64, 0} + return fileDescriptor_77a6da22d6a3feb1, []int{78, 0} } type Invoice_InvoiceState int32 @@ -217,7 +495,7 @@ func (x Invoice_InvoiceState) String() string { } func (Invoice_InvoiceState) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{95, 0} + return fileDescriptor_77a6da22d6a3feb1, []int{113, 0} } type Payment_PaymentStatus int32 @@ -248,17 +526,158 @@ func (x Payment_PaymentStatus) String() string { } func (Payment_PaymentStatus) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{102, 0} + return fileDescriptor_77a6da22d6a3feb1, []int{120, 0} +} + +type HTLCAttempt_HTLCStatus int32 + +const ( + HTLCAttempt_IN_FLIGHT HTLCAttempt_HTLCStatus = 0 + HTLCAttempt_SUCCEEDED HTLCAttempt_HTLCStatus = 1 + HTLCAttempt_FAILED HTLCAttempt_HTLCStatus = 2 +) + +var HTLCAttempt_HTLCStatus_name = map[int32]string{ + 0: "IN_FLIGHT", + 1: "SUCCEEDED", + 2: "FAILED", +} + +var HTLCAttempt_HTLCStatus_value = map[string]int32{ + "IN_FLIGHT": 0, + "SUCCEEDED": 1, + "FAILED": 2, +} + +func (x HTLCAttempt_HTLCStatus) String() string { + return proto.EnumName(HTLCAttempt_HTLCStatus_name, int32(x)) +} + +func (HTLCAttempt_HTLCStatus) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{121, 0} +} + +type Failure_FailureCode int32 + +const ( + //* + //The numbers assigned in this enumeration match the failure codes as + //defined in BOLT #4. Because protobuf 3 requires enums to start with 0, + //a RESERVED value is added. + Failure_RESERVED Failure_FailureCode = 0 + Failure_INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS Failure_FailureCode = 1 + Failure_INCORRECT_PAYMENT_AMOUNT Failure_FailureCode = 2 + Failure_FINAL_INCORRECT_CLTV_EXPIRY Failure_FailureCode = 3 + Failure_FINAL_INCORRECT_HTLC_AMOUNT Failure_FailureCode = 4 + Failure_FINAL_EXPIRY_TOO_SOON Failure_FailureCode = 5 + Failure_INVALID_REALM Failure_FailureCode = 6 + Failure_EXPIRY_TOO_SOON Failure_FailureCode = 7 + Failure_INVALID_ONION_VERSION Failure_FailureCode = 8 + Failure_INVALID_ONION_HMAC Failure_FailureCode = 9 + Failure_INVALID_ONION_KEY Failure_FailureCode = 10 + Failure_AMOUNT_BELOW_MINIMUM Failure_FailureCode = 11 + Failure_FEE_INSUFFICIENT Failure_FailureCode = 12 + Failure_INCORRECT_CLTV_EXPIRY Failure_FailureCode = 13 + Failure_CHANNEL_DISABLED Failure_FailureCode = 14 + Failure_TEMPORARY_CHANNEL_FAILURE Failure_FailureCode = 15 + Failure_REQUIRED_NODE_FEATURE_MISSING Failure_FailureCode = 16 + Failure_REQUIRED_CHANNEL_FEATURE_MISSING Failure_FailureCode = 17 + Failure_UNKNOWN_NEXT_PEER Failure_FailureCode = 18 + Failure_TEMPORARY_NODE_FAILURE Failure_FailureCode = 19 + Failure_PERMANENT_NODE_FAILURE Failure_FailureCode = 20 + Failure_PERMANENT_CHANNEL_FAILURE Failure_FailureCode = 21 + Failure_EXPIRY_TOO_FAR Failure_FailureCode = 22 + Failure_MPP_TIMEOUT Failure_FailureCode = 23 + //* + //An internal error occurred. + Failure_INTERNAL_FAILURE Failure_FailureCode = 997 + //* + //The error source is known, but the failure itself couldn't be decoded. + Failure_UNKNOWN_FAILURE Failure_FailureCode = 998 + //* + //An unreadable failure result is returned if the received failure message + //cannot be decrypted. In that case the error source is unknown. + Failure_UNREADABLE_FAILURE Failure_FailureCode = 999 +) + +var Failure_FailureCode_name = map[int32]string{ + 0: "RESERVED", + 1: "INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS", + 2: "INCORRECT_PAYMENT_AMOUNT", + 3: "FINAL_INCORRECT_CLTV_EXPIRY", + 4: "FINAL_INCORRECT_HTLC_AMOUNT", + 5: "FINAL_EXPIRY_TOO_SOON", + 6: "INVALID_REALM", + 7: "EXPIRY_TOO_SOON", + 8: "INVALID_ONION_VERSION", + 9: "INVALID_ONION_HMAC", + 10: "INVALID_ONION_KEY", + 11: "AMOUNT_BELOW_MINIMUM", + 12: "FEE_INSUFFICIENT", + 13: "INCORRECT_CLTV_EXPIRY", + 14: "CHANNEL_DISABLED", + 15: "TEMPORARY_CHANNEL_FAILURE", + 16: "REQUIRED_NODE_FEATURE_MISSING", + 17: "REQUIRED_CHANNEL_FEATURE_MISSING", + 18: "UNKNOWN_NEXT_PEER", + 19: "TEMPORARY_NODE_FAILURE", + 20: "PERMANENT_NODE_FAILURE", + 21: "PERMANENT_CHANNEL_FAILURE", + 22: "EXPIRY_TOO_FAR", + 23: "MPP_TIMEOUT", + 997: "INTERNAL_FAILURE", + 998: "UNKNOWN_FAILURE", + 999: "UNREADABLE_FAILURE", +} + +var Failure_FailureCode_value = map[string]int32{ + "RESERVED": 0, + "INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS": 1, + "INCORRECT_PAYMENT_AMOUNT": 2, + "FINAL_INCORRECT_CLTV_EXPIRY": 3, + "FINAL_INCORRECT_HTLC_AMOUNT": 4, + "FINAL_EXPIRY_TOO_SOON": 5, + "INVALID_REALM": 6, + "EXPIRY_TOO_SOON": 7, + "INVALID_ONION_VERSION": 8, + "INVALID_ONION_HMAC": 9, + "INVALID_ONION_KEY": 10, + "AMOUNT_BELOW_MINIMUM": 11, + "FEE_INSUFFICIENT": 12, + "INCORRECT_CLTV_EXPIRY": 13, + "CHANNEL_DISABLED": 14, + "TEMPORARY_CHANNEL_FAILURE": 15, + "REQUIRED_NODE_FEATURE_MISSING": 16, + "REQUIRED_CHANNEL_FEATURE_MISSING": 17, + "UNKNOWN_NEXT_PEER": 18, + "TEMPORARY_NODE_FAILURE": 19, + "PERMANENT_NODE_FAILURE": 20, + "PERMANENT_CHANNEL_FAILURE": 21, + "EXPIRY_TOO_FAR": 22, + "MPP_TIMEOUT": 23, + "INTERNAL_FAILURE": 997, + "UNKNOWN_FAILURE": 998, + "UNREADABLE_FAILURE": 999, +} + +func (x Failure_FailureCode) String() string { + return proto.EnumName(Failure_FailureCode_name, int32(x)) +} + +func (Failure_FailureCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{154, 0} } type GenSeedRequest struct { //* //aezeed_passphrase is an optional user provided passphrase that will be used - //to encrypt the generated aezeed cipher seed. + //to encrypt the generated aezeed cipher seed. When using REST, this field + //must be encoded as base64. AezeedPassphrase []byte `protobuf:"bytes,1,opt,name=aezeed_passphrase,json=aezeedPassphrase,proto3" json:"aezeed_passphrase,omitempty"` //* //seed_entropy is an optional 16-bytes generated via CSPRNG. If not //specified, then a fresh set of randomness will be used to create the seed. + //When using REST, this field must be encoded as base64. SeedEntropy []byte `protobuf:"bytes,2,opt,name=seed_entropy,json=seedEntropy,proto3" json:"seed_entropy,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -364,7 +783,8 @@ type InitWalletRequest struct { //* //wallet_password is the passphrase that should be used to encrypt the //wallet. This MUST be at least 8 chars in length. After creation, this - //password is required to unlock the daemon. + //password is required to unlock the daemon. When using REST, this field + //must be encoded as base64. WalletPassword []byte `protobuf:"bytes,1,opt,name=wallet_password,json=walletPassword,proto3" json:"wallet_password,omitempty"` //* //cipher_seed_mnemonic is a 24-word mnemonic that encodes a prior aezeed @@ -373,7 +793,8 @@ type InitWalletRequest struct { CipherSeedMnemonic []string `protobuf:"bytes,2,rep,name=cipher_seed_mnemonic,json=cipherSeedMnemonic,proto3" json:"cipher_seed_mnemonic,omitempty"` //* //aezeed_passphrase is an optional user provided passphrase that will be used - //to encrypt the generated aezeed cipher seed. + //to encrypt the generated aezeed cipher seed. When using REST, this field + //must be encoded as base64. AezeedPassphrase []byte `protobuf:"bytes,3,opt,name=aezeed_passphrase,json=aezeedPassphrase,proto3" json:"aezeed_passphrase,omitempty"` //* //recovery_window is an optional argument specifying the address lookahead @@ -490,7 +911,7 @@ type UnlockWalletRequest struct { //* //wallet_password should be the current valid passphrase for the daemon. This //will be required to decrypt on-disk material that the daemon requires to - //function properly. + //function properly. When using REST, this field must be encoded as base64. WalletPassword []byte `protobuf:"bytes,1,opt,name=wallet_password,json=walletPassword,proto3" json:"wallet_password,omitempty"` //* //recovery_window is an optional argument specifying the address lookahead @@ -592,11 +1013,11 @@ var xxx_messageInfo_UnlockWalletResponse proto.InternalMessageInfo type ChangePasswordRequest struct { //* //current_password should be the current valid passphrase used to unlock the - //daemon. + //daemon. When using REST, this field must be encoded as base64. CurrentPassword []byte `protobuf:"bytes,1,opt,name=current_password,json=currentPassword,proto3" json:"current_password,omitempty"` //* //new_password should be the new passphrase that will be needed to unlock the - //daemon. + //daemon. When using REST, this field must be encoded as base64. NewPassword []byte `protobuf:"bytes,2,opt,name=new_password,json=newPassword,proto3" json:"new_password,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -675,13 +1096,13 @@ var xxx_messageInfo_ChangePasswordResponse proto.InternalMessageInfo type Utxo struct { /// The type of address - Type AddressType `protobuf:"varint,1,opt,name=type,json=address_type,proto3,enum=lnrpc.AddressType" json:"type,omitempty"` + AddressType AddressType `protobuf:"varint,1,opt,name=address_type,json=addressType,proto3,enum=lnrpc.AddressType" json:"address_type,omitempty"` /// The address Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` /// The value of the unspent coin in satoshis - AmountSat int64 `protobuf:"varint,3,opt,name=amount_sat,proto3" json:"amount_sat,omitempty"` + AmountSat int64 `protobuf:"varint,3,opt,name=amount_sat,json=amountSat,proto3" json:"amount_sat,omitempty"` /// The pkscript in hex - PkScript string `protobuf:"bytes,4,opt,name=pk_script,proto3" json:"pk_script,omitempty"` + PkScript string `protobuf:"bytes,4,opt,name=pk_script,json=pkScript,proto3" json:"pk_script,omitempty"` /// The outpoint in format txid:n Outpoint *OutPoint `protobuf:"bytes,5,opt,name=outpoint,proto3" json:"outpoint,omitempty"` /// The number of confirmations for the Utxo @@ -716,9 +1137,9 @@ func (m *Utxo) XXX_DiscardUnknown() { var xxx_messageInfo_Utxo proto.InternalMessageInfo -func (m *Utxo) GetType() AddressType { +func (m *Utxo) GetAddressType() AddressType { if m != nil { - return m.Type + return m.AddressType } return AddressType_WITNESS_PUBKEY_HASH } @@ -760,23 +1181,23 @@ func (m *Utxo) GetConfirmations() int64 { type Transaction struct { /// The transaction hash - TxHash string `protobuf:"bytes,1,opt,name=tx_hash,proto3" json:"tx_hash,omitempty"` + TxHash string `protobuf:"bytes,1,opt,name=tx_hash,json=txHash,proto3" json:"tx_hash,omitempty"` /// The transaction amount, denominated in satoshis Amount int64 `protobuf:"varint,2,opt,name=amount,proto3" json:"amount,omitempty"` /// The number of confirmations - NumConfirmations int32 `protobuf:"varint,3,opt,name=num_confirmations,proto3" json:"num_confirmations,omitempty"` + NumConfirmations int32 `protobuf:"varint,3,opt,name=num_confirmations,json=numConfirmations,proto3" json:"num_confirmations,omitempty"` /// The hash of the block this transaction was included in - BlockHash string `protobuf:"bytes,4,opt,name=block_hash,proto3" json:"block_hash,omitempty"` + BlockHash string `protobuf:"bytes,4,opt,name=block_hash,json=blockHash,proto3" json:"block_hash,omitempty"` /// The height of the block this transaction was included in - BlockHeight int32 `protobuf:"varint,5,opt,name=block_height,proto3" json:"block_height,omitempty"` + BlockHeight int32 `protobuf:"varint,5,opt,name=block_height,json=blockHeight,proto3" json:"block_height,omitempty"` /// Timestamp of this transaction - TimeStamp int64 `protobuf:"varint,6,opt,name=time_stamp,proto3" json:"time_stamp,omitempty"` + TimeStamp int64 `protobuf:"varint,6,opt,name=time_stamp,json=timeStamp,proto3" json:"time_stamp,omitempty"` /// Fees paid for this transaction - TotalFees int64 `protobuf:"varint,7,opt,name=total_fees,proto3" json:"total_fees,omitempty"` + TotalFees int64 `protobuf:"varint,7,opt,name=total_fees,json=totalFees,proto3" json:"total_fees,omitempty"` /// Addresses that received funds for this transaction - DestAddresses []string `protobuf:"bytes,8,rep,name=dest_addresses,proto3" json:"dest_addresses,omitempty"` + DestAddresses []string `protobuf:"bytes,8,rep,name=dest_addresses,json=destAddresses,proto3" json:"dest_addresses,omitempty"` /// The raw transaction hex. - RawTxHex string `protobuf:"bytes,9,opt,name=raw_tx_hex,proto3" json:"raw_tx_hex,omitempty"` + RawTxHex string `protobuf:"bytes,9,opt,name=raw_tx_hex,json=rawTxHex,proto3" json:"raw_tx_hex,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -944,6 +1365,7 @@ func (m *TransactionDetails) GetTransactions() []*Transaction { type FeeLimit struct { // Types that are valid to be assigned to Limit: // *FeeLimit_Fixed + // *FeeLimit_FixedMsat // *FeeLimit_Percent Limit isFeeLimit_Limit `protobuf_oneof:"limit"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -984,12 +1406,18 @@ type FeeLimit_Fixed struct { Fixed int64 `protobuf:"varint,1,opt,name=fixed,proto3,oneof"` } +type FeeLimit_FixedMsat struct { + FixedMsat int64 `protobuf:"varint,3,opt,name=fixed_msat,json=fixedMsat,proto3,oneof"` +} + type FeeLimit_Percent struct { Percent int64 `protobuf:"varint,2,opt,name=percent,proto3,oneof"` } func (*FeeLimit_Fixed) isFeeLimit_Limit() {} +func (*FeeLimit_FixedMsat) isFeeLimit_Limit() {} + func (*FeeLimit_Percent) isFeeLimit_Limit() {} func (m *FeeLimit) GetLimit() isFeeLimit_Limit { @@ -1006,6 +1434,13 @@ func (m *FeeLimit) GetFixed() int64 { return 0 } +func (m *FeeLimit) GetFixedMsat() int64 { + if x, ok := m.GetLimit().(*FeeLimit_FixedMsat); ok { + return x.FixedMsat + } + return 0 +} + func (m *FeeLimit) GetPercent() int64 { if x, ok := m.GetLimit().(*FeeLimit_Percent); ok { return x.Percent @@ -1017,23 +1452,40 @@ func (m *FeeLimit) GetPercent() int64 { func (*FeeLimit) XXX_OneofWrappers() []interface{} { return []interface{}{ (*FeeLimit_Fixed)(nil), + (*FeeLimit_FixedMsat)(nil), (*FeeLimit_Percent)(nil), } } type SendRequest struct { - /// The identity pubkey of the payment recipient + //* + //The identity pubkey of the payment recipient. When using REST, this field + //must be encoded as base64. Dest []byte `protobuf:"bytes,1,opt,name=dest,proto3" json:"dest,omitempty"` - /// The hex-encoded identity pubkey of the payment recipient - DestString string `protobuf:"bytes,2,opt,name=dest_string,json=destString,proto3" json:"dest_string,omitempty"` - /// Number of satoshis to send. + //* + //The hex-encoded identity pubkey of the payment recipient. Deprecated now + //that the REST gateway supports base64 encoding of bytes fields. + DestString string `protobuf:"bytes,2,opt,name=dest_string,json=destString,proto3" json:"dest_string,omitempty"` // Deprecated: Do not use. + //* + //The amount to send expressed in satoshis. + // + //The fields amt and amt_msat are mutually exclusive. Amt int64 `protobuf:"varint,3,opt,name=amt,proto3" json:"amt,omitempty"` - /// The hash to use within the payment's HTLC + //* + //The amount to send expressed in millisatoshis. + // + //The fields amt and amt_msat are mutually exclusive. + AmtMsat int64 `protobuf:"varint,12,opt,name=amt_msat,json=amtMsat,proto3" json:"amt_msat,omitempty"` + //* + //The hash to use within the payment's HTLC. When using REST, this field + //must be encoded as base64. PaymentHash []byte `protobuf:"bytes,4,opt,name=payment_hash,json=paymentHash,proto3" json:"payment_hash,omitempty"` - /// The hex-encoded hash to use within the payment's HTLC - PaymentHashString string `protobuf:"bytes,5,opt,name=payment_hash_string,json=paymentHashString,proto3" json:"payment_hash_string,omitempty"` //* - //A bare-bones invoice for a payment within the Lightning Network. With the + //The hex-encoded hash to use within the payment's HTLC. Deprecated now + //that the REST gateway supports base64 encoding of bytes fields. + PaymentHashString string `protobuf:"bytes,5,opt,name=payment_hash_string,json=paymentHashString,proto3" json:"payment_hash_string,omitempty"` // Deprecated: Do not use. + //* + //A bare-bones invoice for a payment within the Lightning Network. With the //details of the invoice, the sender has all the data necessary to send a //payment to the recipient. PaymentRequest string `protobuf:"bytes,6,opt,name=payment_request,json=paymentRequest,proto3" json:"payment_request,omitempty"` @@ -1052,6 +1504,9 @@ type SendRequest struct { //any channel may be used. OutgoingChanId uint64 `protobuf:"varint,9,opt,name=outgoing_chan_id,json=outgoingChanId,proto3" json:"outgoing_chan_id,omitempty"` //* + //The pubkey of the last hop of the route. If empty, any hop may be used. + LastHopPubkey []byte `protobuf:"bytes,13,opt,name=last_hop_pubkey,json=lastHopPubkey,proto3" json:"last_hop_pubkey,omitempty"` + //* //An optional maximum total time lock for the route. This should not exceed //lnd's `--max-cltv-expiry` setting. If zero, then the value of //`--max-cltv-expiry` is enforced. @@ -1059,11 +1514,22 @@ type SendRequest struct { //* //An optional field that can be used to pass an arbitrary set of TLV records //to a peer which understands the new records. This can be used to pass - //application specific data during the payment attempt. - DestTlv map[uint64][]byte `protobuf:"bytes,11,rep,name=dest_tlv,json=destTlv,proto3" json:"dest_tlv,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + //application specific data during the payment attempt. Record types are + //required to be in the custom range >= 65536. When using REST, the values + //must be encoded as base64. + DestCustomRecords map[uint64][]byte `protobuf:"bytes,11,rep,name=dest_custom_records,json=destCustomRecords,proto3" json:"dest_custom_records,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + /// If set, circular payments to self are permitted. + AllowSelfPayment bool `protobuf:"varint,14,opt,name=allow_self_payment,json=allowSelfPayment,proto3" json:"allow_self_payment,omitempty"` + //* + //Features assumed to be supported by the final node. All transitive feature + //dependencies must also be set properly. For a given feature bit pair, either + //optional or remote may be set, but not both. If this field is nil or empty, + //the router will try to load destination features from the graph as a + //fallback. + DestFeatures []FeatureBit `protobuf:"varint,15,rep,packed,name=dest_features,json=destFeatures,proto3,enum=lnrpc.FeatureBit" json:"dest_features,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *SendRequest) Reset() { *m = SendRequest{} } @@ -1098,6 +1564,7 @@ func (m *SendRequest) GetDest() []byte { return nil } +// Deprecated: Do not use. func (m *SendRequest) GetDestString() string { if m != nil { return m.DestString @@ -1112,6 +1579,13 @@ func (m *SendRequest) GetAmt() int64 { return 0 } +func (m *SendRequest) GetAmtMsat() int64 { + if m != nil { + return m.AmtMsat + } + return 0 +} + func (m *SendRequest) GetPaymentHash() []byte { if m != nil { return m.PaymentHash @@ -1119,6 +1593,7 @@ func (m *SendRequest) GetPaymentHash() []byte { return nil } +// Deprecated: Do not use. func (m *SendRequest) GetPaymentHashString() string { if m != nil { return m.PaymentHashString @@ -1154,6 +1629,13 @@ func (m *SendRequest) GetOutgoingChanId() uint64 { return 0 } +func (m *SendRequest) GetLastHopPubkey() []byte { + if m != nil { + return m.LastHopPubkey + } + return nil +} + func (m *SendRequest) GetCltvLimit() uint32 { if m != nil { return m.CltvLimit @@ -1161,18 +1643,32 @@ func (m *SendRequest) GetCltvLimit() uint32 { return 0 } -func (m *SendRequest) GetDestTlv() map[uint64][]byte { +func (m *SendRequest) GetDestCustomRecords() map[uint64][]byte { + if m != nil { + return m.DestCustomRecords + } + return nil +} + +func (m *SendRequest) GetAllowSelfPayment() bool { + if m != nil { + return m.AllowSelfPayment + } + return false +} + +func (m *SendRequest) GetDestFeatures() []FeatureBit { if m != nil { - return m.DestTlv + return m.DestFeatures } return nil } type SendResponse struct { - PaymentError string `protobuf:"bytes,1,opt,name=payment_error,proto3" json:"payment_error,omitempty"` - PaymentPreimage []byte `protobuf:"bytes,2,opt,name=payment_preimage,proto3" json:"payment_preimage,omitempty"` - PaymentRoute *Route `protobuf:"bytes,3,opt,name=payment_route,proto3" json:"payment_route,omitempty"` - PaymentHash []byte `protobuf:"bytes,4,opt,name=payment_hash,proto3" json:"payment_hash,omitempty"` + PaymentError string `protobuf:"bytes,1,opt,name=payment_error,json=paymentError,proto3" json:"payment_error,omitempty"` + PaymentPreimage []byte `protobuf:"bytes,2,opt,name=payment_preimage,json=paymentPreimage,proto3" json:"payment_preimage,omitempty"` + PaymentRoute *Route `protobuf:"bytes,3,opt,name=payment_route,json=paymentRoute,proto3" json:"payment_route,omitempty"` + PaymentHash []byte `protobuf:"bytes,4,opt,name=payment_hash,json=paymentHash,proto3" json:"payment_hash,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1232,10 +1728,14 @@ func (m *SendResponse) GetPaymentHash() []byte { } type SendToRouteRequest struct { - /// The payment hash to use for the HTLC. + //* + //The payment hash to use for the HTLC. When using REST, this field must be + //encoded as base64. PaymentHash []byte `protobuf:"bytes,1,opt,name=payment_hash,json=paymentHash,proto3" json:"payment_hash,omitempty"` - /// An optional hex-encoded payment hash to be used for the HTLC. - PaymentHashString string `protobuf:"bytes,2,opt,name=payment_hash_string,json=paymentHashString,proto3" json:"payment_hash_string,omitempty"` + //* + //An optional hex-encoded payment hash to be used for the HTLC. Deprecated now + //that the REST gateway supports base64 encoding of bytes fields. + PaymentHashString string `protobuf:"bytes,2,opt,name=payment_hash_string,json=paymentHashString,proto3" json:"payment_hash_string,omitempty"` // Deprecated: Do not use. /// Route that should be used to attempt to complete the payment. Route *Route `protobuf:"bytes,4,opt,name=route,proto3" json:"route,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -1275,6 +1775,7 @@ func (m *SendToRouteRequest) GetPaymentHash() []byte { return nil } +// Deprecated: Do not use. func (m *SendToRouteRequest) GetPaymentHashString() string { if m != nil { return m.PaymentHashString @@ -1296,27 +1797,32 @@ type ChannelAcceptRequest struct { ChainHash []byte `protobuf:"bytes,2,opt,name=chain_hash,json=chainHash,proto3" json:"chain_hash,omitempty"` /// The pending channel id. PendingChanId []byte `protobuf:"bytes,3,opt,name=pending_chan_id,json=pendingChanId,proto3" json:"pending_chan_id,omitempty"` - /// The funding amount in satoshis that initiator wishes to use in the channel. + /// The funding amount in satoshis that initiator wishes to use in the + /// channel. FundingAmt uint64 `protobuf:"varint,4,opt,name=funding_amt,json=fundingAmt,proto3" json:"funding_amt,omitempty"` /// The push amount of the proposed channel in millisatoshis. PushAmt uint64 `protobuf:"varint,5,opt,name=push_amt,json=pushAmt,proto3" json:"push_amt,omitempty"` /// The dust limit of the initiator's commitment tx. DustLimit uint64 `protobuf:"varint,6,opt,name=dust_limit,json=dustLimit,proto3" json:"dust_limit,omitempty"` - /// The maximum amount of coins in millisatoshis that can be pending in this channel. + /// The maximum amount of coins in millisatoshis that can be pending in this + /// channel. MaxValueInFlight uint64 `protobuf:"varint,7,opt,name=max_value_in_flight,json=maxValueInFlight,proto3" json:"max_value_in_flight,omitempty"` - /// The minimum amount of satoshis the initiator requires us to have at all times. + /// The minimum amount of satoshis the initiator requires us to have at all + /// times. ChannelReserve uint64 `protobuf:"varint,8,opt,name=channel_reserve,json=channelReserve,proto3" json:"channel_reserve,omitempty"` /// The smallest HTLC in millisatoshis that the initiator will accept. MinHtlc uint64 `protobuf:"varint,9,opt,name=min_htlc,json=minHtlc,proto3" json:"min_htlc,omitempty"` - /// The initial fee rate that the initiator suggests for both commitment transactions. + /// The initial fee rate that the initiator suggests for both commitment + /// transactions. FeePerKw uint64 `protobuf:"varint,10,opt,name=fee_per_kw,json=feePerKw,proto3" json:"fee_per_kw,omitempty"` //* - //The number of blocks to use for the relative time lock in the pay-to-self output - //of both commitment transactions. + //The number of blocks to use for the relative time lock in the pay-to-self + //output of both commitment transactions. CsvDelay uint32 `protobuf:"varint,11,opt,name=csv_delay,json=csvDelay,proto3" json:"csv_delay,omitempty"` /// The total number of incoming HTLC's that the initiator will accept. MaxAcceptedHtlcs uint32 `protobuf:"varint,12,opt,name=max_accepted_htlcs,json=maxAcceptedHtlcs,proto3" json:"max_accepted_htlcs,omitempty"` - /// A bit-field which the initiator uses to specify proposed channel behavior. + /// A bit-field which the initiator uses to specify proposed channel + /// behavior. ChannelFlags uint32 `protobuf:"varint,13,opt,name=channel_flags,json=channelFlags,proto3" json:"channel_flags,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -1494,7 +2000,7 @@ type ChannelPoint struct { // *ChannelPoint_FundingTxidStr FundingTxid isChannelPoint_FundingTxid `protobuf_oneof:"funding_txid"` /// The index of the output of the funding transaction - OutputIndex uint32 `protobuf:"varint,3,opt,name=output_index,proto3" json:"output_index,omitempty"` + OutputIndex uint32 `protobuf:"varint,3,opt,name=output_index,json=outputIndex,proto3" json:"output_index,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1530,11 +2036,11 @@ type isChannelPoint_FundingTxid interface { } type ChannelPoint_FundingTxidBytes struct { - FundingTxidBytes []byte `protobuf:"bytes,1,opt,name=funding_txid_bytes,proto3,oneof"` + FundingTxidBytes []byte `protobuf:"bytes,1,opt,name=funding_txid_bytes,json=fundingTxidBytes,proto3,oneof"` } type ChannelPoint_FundingTxidStr struct { - FundingTxidStr string `protobuf:"bytes,2,opt,name=funding_txid_str,proto3,oneof"` + FundingTxidStr string `protobuf:"bytes,2,opt,name=funding_txid_str,json=fundingTxidStr,proto3,oneof"` } func (*ChannelPoint_FundingTxidBytes) isChannelPoint_FundingTxid() {} @@ -1579,11 +2085,11 @@ func (*ChannelPoint) XXX_OneofWrappers() []interface{} { type OutPoint struct { /// Raw bytes representing the transaction id. - TxidBytes []byte `protobuf:"bytes,1,opt,name=txid_bytes,proto3" json:"txid_bytes,omitempty"` + TxidBytes []byte `protobuf:"bytes,1,opt,name=txid_bytes,json=txidBytes,proto3" json:"txid_bytes,omitempty"` /// Reversed, hex-encoded string representing the transaction id. - TxidStr string `protobuf:"bytes,2,opt,name=txid_str,proto3" json:"txid_str,omitempty"` + TxidStr string `protobuf:"bytes,2,opt,name=txid_str,json=txidStr,proto3" json:"txid_str,omitempty"` /// The index of the output on the transaction. - OutputIndex uint32 `protobuf:"varint,3,opt,name=output_index,proto3" json:"output_index,omitempty"` + OutputIndex uint32 `protobuf:"varint,3,opt,name=output_index,json=outputIndex,proto3" json:"output_index,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1638,7 +2144,8 @@ func (m *OutPoint) GetOutputIndex() uint32 { type LightningAddress struct { /// The identity pubkey of the Lightning node Pubkey string `protobuf:"bytes,1,opt,name=pubkey,proto3" json:"pubkey,omitempty"` - /// The network location of the lightning node, e.g. `69.69.69.69:1337` or `localhost:10011` + /// The network location of the lightning node, e.g. `69.69.69.69:1337` or + /// `localhost:10011` Host string `protobuf:"bytes,2,opt,name=host,proto3" json:"host,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -1687,7 +2194,8 @@ func (m *LightningAddress) GetHost() string { type EstimateFeeRequest struct { /// The map from addresses to amounts for the transaction. AddrToAmount map[string]int64 `protobuf:"bytes,1,rep,name=AddrToAmount,proto3" json:"AddrToAmount,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` - /// The target number of blocks that this transaction should be confirmed by. + /// The target number of blocks that this transaction should be confirmed + /// by. TargetConf int32 `protobuf:"varint,2,opt,name=target_conf,json=targetConf,proto3" json:"target_conf,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -1735,9 +2243,9 @@ func (m *EstimateFeeRequest) GetTargetConf() int32 { type EstimateFeeResponse struct { /// The total fee in satoshis. - FeeSat int64 `protobuf:"varint,1,opt,name=fee_sat,proto3" json:"fee_sat,omitempty"` + FeeSat int64 `protobuf:"varint,1,opt,name=fee_sat,json=feeSat,proto3" json:"fee_sat,omitempty"` /// The fee rate in satoshi/byte. - FeerateSatPerByte int64 `protobuf:"varint,2,opt,name=feerate_sat_per_byte,proto3" json:"feerate_sat_per_byte,omitempty"` + FeerateSatPerByte int64 `protobuf:"varint,2,opt,name=feerate_sat_per_byte,json=feerateSatPerByte,proto3" json:"feerate_sat_per_byte,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1785,9 +2293,11 @@ func (m *EstimateFeeResponse) GetFeerateSatPerByte() int64 { type SendManyRequest struct { /// The map from addresses to amounts AddrToAmount map[string]int64 `protobuf:"bytes,1,rep,name=AddrToAmount,proto3" json:"AddrToAmount,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` - /// The target number of blocks that this transaction should be confirmed by. + /// The target number of blocks that this transaction should be confirmed + /// by. TargetConf int32 `protobuf:"varint,3,opt,name=target_conf,json=targetConf,proto3" json:"target_conf,omitempty"` - /// A manual fee rate set in sat/byte that should be used when crafting the transaction. + /// A manual fee rate set in sat/byte that should be used when crafting the + /// transaction. SatPerByte int64 `protobuf:"varint,5,opt,name=sat_per_byte,json=satPerByte,proto3" json:"sat_per_byte,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -1885,9 +2395,11 @@ type SendCoinsRequest struct { Addr string `protobuf:"bytes,1,opt,name=addr,proto3" json:"addr,omitempty"` /// The amount in satoshis to send Amount int64 `protobuf:"varint,2,opt,name=amount,proto3" json:"amount,omitempty"` - /// The target number of blocks that this transaction should be confirmed by. + /// The target number of blocks that this transaction should be confirmed + /// by. TargetConf int32 `protobuf:"varint,3,opt,name=target_conf,json=targetConf,proto3" json:"target_conf,omitempty"` - /// A manual fee rate set in sat/byte that should be used when crafting the transaction. + /// A manual fee rate set in sat/byte that should be used when crafting the + /// transaction. SatPerByte int64 `protobuf:"varint,5,opt,name=sat_per_byte,json=satPerByte,proto3" json:"sat_per_byte,omitempty"` //* //If set, then the amount field will be ignored, and lnd will attempt to @@ -2169,7 +2681,9 @@ func (m *NewAddressResponse) GetAddress() string { } type SignMessageRequest struct { - /// The message to be signed + //* + //The message to be signed. When using REST, this field must be encoded as + //base64. Msg []byte `protobuf:"bytes,1,opt,name=msg,proto3" json:"msg,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -2249,7 +2763,9 @@ func (m *SignMessageResponse) GetSignature() string { } type VerifyMessageRequest struct { - /// The message over which the signature is to be verified + //* + //The message over which the signature is to be verified. When using REST, + //this field must be encoded as base64. Msg []byte `protobuf:"bytes,1,opt,name=msg,proto3" json:"msg,omitempty"` /// The signature to be verified over the given message Signature string `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` @@ -2350,7 +2866,7 @@ type ConnectPeerRequest struct { /// Lightning address of the peer, in the format `@host` Addr *LightningAddress `protobuf:"bytes,1,opt,name=addr,proto3" json:"addr,omitempty"` //* If set, the daemon will attempt to persistently connect to the target - // peer. Otherwise, the call will be synchronous. + // peer. Otherwise, the call will be synchronous. Perm bool `protobuf:"varint,2,opt,name=perm,proto3" json:"perm,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -2429,7 +2945,7 @@ var xxx_messageInfo_ConnectPeerResponse proto.InternalMessageInfo type DisconnectPeerRequest struct { /// The pubkey of the node to disconnect from - PubKey string `protobuf:"bytes,1,opt,name=pub_key,proto3" json:"pub_key,omitempty"` + PubKey string `protobuf:"bytes,1,opt,name=pub_key,json=pubKey,proto3" json:"pub_key,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -2501,8 +3017,8 @@ var xxx_messageInfo_DisconnectPeerResponse proto.InternalMessageInfo type HTLC struct { Incoming bool `protobuf:"varint,1,opt,name=incoming,proto3" json:"incoming,omitempty"` Amount int64 `protobuf:"varint,2,opt,name=amount,proto3" json:"amount,omitempty"` - HashLock []byte `protobuf:"bytes,3,opt,name=hash_lock,proto3" json:"hash_lock,omitempty"` - ExpirationHeight uint32 `protobuf:"varint,4,opt,name=expiration_height,proto3" json:"expiration_height,omitempty"` + HashLock []byte `protobuf:"bytes,3,opt,name=hash_lock,json=hashLock,proto3" json:"hash_lock,omitempty"` + ExpirationHeight uint32 `protobuf:"varint,4,opt,name=expiration_height,json=expirationHeight,proto3" json:"expiration_height,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -2565,71 +3081,100 @@ type Channel struct { /// Whether this channel is active or not Active bool `protobuf:"varint,1,opt,name=active,proto3" json:"active,omitempty"` /// The identity pubkey of the remote node - RemotePubkey string `protobuf:"bytes,2,opt,name=remote_pubkey,proto3" json:"remote_pubkey,omitempty"` + RemotePubkey string `protobuf:"bytes,2,opt,name=remote_pubkey,json=remotePubkey,proto3" json:"remote_pubkey,omitempty"` //* //The outpoint (txid:index) of the funding transaction. With this value, Bob //will be able to generate a signature for Alice's version of the commitment //transaction. - ChannelPoint string `protobuf:"bytes,3,opt,name=channel_point,proto3" json:"channel_point,omitempty"` + ChannelPoint string `protobuf:"bytes,3,opt,name=channel_point,json=channelPoint,proto3" json:"channel_point,omitempty"` //* //The unique channel ID for the channel. The first 3 bytes are the block //height, the next 3 the index within the block, and the last 2 bytes are the //output index for the channel. - ChanId uint64 `protobuf:"varint,4,opt,name=chan_id,proto3" json:"chan_id,omitempty"` + ChanId uint64 `protobuf:"varint,4,opt,name=chan_id,json=chanId,proto3" json:"chan_id,omitempty"` /// The total amount of funds held in this channel Capacity int64 `protobuf:"varint,5,opt,name=capacity,proto3" json:"capacity,omitempty"` /// This node's current balance in this channel - LocalBalance int64 `protobuf:"varint,6,opt,name=local_balance,proto3" json:"local_balance,omitempty"` + LocalBalance int64 `protobuf:"varint,6,opt,name=local_balance,json=localBalance,proto3" json:"local_balance,omitempty"` /// The counterparty's current balance in this channel - RemoteBalance int64 `protobuf:"varint,7,opt,name=remote_balance,proto3" json:"remote_balance,omitempty"` + RemoteBalance int64 `protobuf:"varint,7,opt,name=remote_balance,json=remoteBalance,proto3" json:"remote_balance,omitempty"` //* //The amount calculated to be paid in fees for the current set of commitment //transactions. The fee amount is persisted with the channel in order to //allow the fee amount to be removed and recalculated with each channel state //update, including updates that happen after a system restart. - CommitFee int64 `protobuf:"varint,8,opt,name=commit_fee,proto3" json:"commit_fee,omitempty"` + CommitFee int64 `protobuf:"varint,8,opt,name=commit_fee,json=commitFee,proto3" json:"commit_fee,omitempty"` /// The weight of the commitment transaction - CommitWeight int64 `protobuf:"varint,9,opt,name=commit_weight,proto3" json:"commit_weight,omitempty"` + CommitWeight int64 `protobuf:"varint,9,opt,name=commit_weight,json=commitWeight,proto3" json:"commit_weight,omitempty"` //* //The required number of satoshis per kilo-weight that the requester will pay //at all times, for both the funding transaction and commitment transaction. //This value can later be updated once the channel is open. - FeePerKw int64 `protobuf:"varint,10,opt,name=fee_per_kw,proto3" json:"fee_per_kw,omitempty"` + FeePerKw int64 `protobuf:"varint,10,opt,name=fee_per_kw,json=feePerKw,proto3" json:"fee_per_kw,omitempty"` /// The unsettled balance in this channel - UnsettledBalance int64 `protobuf:"varint,11,opt,name=unsettled_balance,proto3" json:"unsettled_balance,omitempty"` + UnsettledBalance int64 `protobuf:"varint,11,opt,name=unsettled_balance,json=unsettledBalance,proto3" json:"unsettled_balance,omitempty"` //* //The total number of satoshis we've sent within this channel. - TotalSatoshisSent int64 `protobuf:"varint,12,opt,name=total_satoshis_sent,proto3" json:"total_satoshis_sent,omitempty"` + TotalSatoshisSent int64 `protobuf:"varint,12,opt,name=total_satoshis_sent,json=totalSatoshisSent,proto3" json:"total_satoshis_sent,omitempty"` //* //The total number of satoshis we've received within this channel. - TotalSatoshisReceived int64 `protobuf:"varint,13,opt,name=total_satoshis_received,proto3" json:"total_satoshis_received,omitempty"` + TotalSatoshisReceived int64 `protobuf:"varint,13,opt,name=total_satoshis_received,json=totalSatoshisReceived,proto3" json:"total_satoshis_received,omitempty"` //* //The total number of updates conducted within this channel. - NumUpdates uint64 `protobuf:"varint,14,opt,name=num_updates,proto3" json:"num_updates,omitempty"` + NumUpdates uint64 `protobuf:"varint,14,opt,name=num_updates,json=numUpdates,proto3" json:"num_updates,omitempty"` //* //The list of active, uncleared HTLCs currently pending within the channel. - PendingHtlcs []*HTLC `protobuf:"bytes,15,rep,name=pending_htlcs,proto3" json:"pending_htlcs,omitempty"` + PendingHtlcs []*HTLC `protobuf:"bytes,15,rep,name=pending_htlcs,json=pendingHtlcs,proto3" json:"pending_htlcs,omitempty"` //* //The CSV delay expressed in relative blocks. If the channel is force closed, //we will need to wait for this many blocks before we can regain our funds. - CsvDelay uint32 `protobuf:"varint,16,opt,name=csv_delay,proto3" json:"csv_delay,omitempty"` + CsvDelay uint32 `protobuf:"varint,16,opt,name=csv_delay,json=csvDelay,proto3" json:"csv_delay,omitempty"` /// Whether this channel is advertised to the network or not. Private bool `protobuf:"varint,17,opt,name=private,proto3" json:"private,omitempty"` /// True if we were the ones that created the channel. Initiator bool `protobuf:"varint,18,opt,name=initiator,proto3" json:"initiator,omitempty"` /// A set of flags showing the current state of the channel. - ChanStatusFlags string `protobuf:"bytes,19,opt,name=chan_status_flags,proto3" json:"chan_status_flags,omitempty"` + ChanStatusFlags string `protobuf:"bytes,19,opt,name=chan_status_flags,json=chanStatusFlags,proto3" json:"chan_status_flags,omitempty"` /// The minimum satoshis this node is required to reserve in its balance. - LocalChanReserveSat int64 `protobuf:"varint,20,opt,name=local_chan_reserve_sat,proto3" json:"local_chan_reserve_sat,omitempty"` + LocalChanReserveSat int64 `protobuf:"varint,20,opt,name=local_chan_reserve_sat,json=localChanReserveSat,proto3" json:"local_chan_reserve_sat,omitempty"` //* //The minimum satoshis the other node is required to reserve in its balance. - RemoteChanReserveSat int64 `protobuf:"varint,21,opt,name=remote_chan_reserve_sat,proto3" json:"remote_chan_reserve_sat,omitempty"` + RemoteChanReserveSat int64 `protobuf:"varint,21,opt,name=remote_chan_reserve_sat,json=remoteChanReserveSat,proto3" json:"remote_chan_reserve_sat,omitempty"` + /// Deprecated. Use commitment_type. + StaticRemoteKey bool `protobuf:"varint,22,opt,name=static_remote_key,json=staticRemoteKey,proto3" json:"static_remote_key,omitempty"` // Deprecated: Do not use. + /// The commitment type used by this channel. + CommitmentType CommitmentType `protobuf:"varint,26,opt,name=commitment_type,json=commitmentType,proto3,enum=lnrpc.CommitmentType" json:"commitment_type,omitempty"` //* - //If true, then this channel uses the modern commitment format where the key - //in the output of the remote party does not change each state. This makes - //back up and recovery easier as when the channel is closed, the funds go - //directly to that key. - StaticRemoteKey bool `protobuf:"varint,22,opt,name=static_remote_key,proto3" json:"static_remote_key,omitempty"` + //The number of seconds that the channel has been monitored by the channel + //scoring system. Scores are currently not persisted, so this value may be + //less than the lifetime of the channel [EXPERIMENTAL]. + Lifetime int64 `protobuf:"varint,23,opt,name=lifetime,proto3" json:"lifetime,omitempty"` + //* + //The number of seconds that the remote peer has been observed as being online + //by the channel scoring system over the lifetime of the channel + //[EXPERIMENTAL]. + Uptime int64 `protobuf:"varint,24,opt,name=uptime,proto3" json:"uptime,omitempty"` + //* + //Close address is the address that we will enforce payout to on cooperative + //close if the channel was opened utilizing option upfront shutdown. This + //value can be set on channel open by setting close_address in an open channel + //request. If this value is not set, you can still choose a payout address by + //cooperatively closing with the delivery_address field set. + CloseAddress string `protobuf:"bytes,25,opt,name=close_address,json=closeAddress,proto3" json:"close_address,omitempty"` + // + //The amount that the initiator of the channel optionally pushed to the remote + //party on channel open. This amount will be zero if the channel initiator did + //not push any funds to the remote peer. If the initiator field is true, we + //pushed this amount to our peer, if it is false, the remote peer pushed this + //amount to us. + PushAmountSat uint64 `protobuf:"varint,27,opt,name=push_amount_sat,json=pushAmountSat,proto3" json:"push_amount_sat,omitempty"` + //* + //This uint32 indicates if this channel is to be considered 'frozen'. A + //frozen channel doest not allow a cooperative channel close by the + //initiator. The thaw_height is the height that this restriction stops + //applying to the channel. This field is optional, not setting it or using a + //value of zero will mean the channel has no additional restrictions. + ThawHeight uint32 `protobuf:"varint,28,opt,name=thaw_height,json=thawHeight,proto3" json:"thaw_height,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -2807,6 +3352,7 @@ func (m *Channel) GetRemoteChanReserveSat() int64 { return 0 } +// Deprecated: Do not use. func (m *Channel) GetStaticRemoteKey() bool { if m != nil { return m.StaticRemoteKey @@ -2814,11 +3360,57 @@ func (m *Channel) GetStaticRemoteKey() bool { return false } +func (m *Channel) GetCommitmentType() CommitmentType { + if m != nil { + return m.CommitmentType + } + return CommitmentType_LEGACY +} + +func (m *Channel) GetLifetime() int64 { + if m != nil { + return m.Lifetime + } + return 0 +} + +func (m *Channel) GetUptime() int64 { + if m != nil { + return m.Uptime + } + return 0 +} + +func (m *Channel) GetCloseAddress() string { + if m != nil { + return m.CloseAddress + } + return "" +} + +func (m *Channel) GetPushAmountSat() uint64 { + if m != nil { + return m.PushAmountSat + } + return 0 +} + +func (m *Channel) GetThawHeight() uint32 { + if m != nil { + return m.ThawHeight + } + return 0 +} + type ListChannelsRequest struct { - ActiveOnly bool `protobuf:"varint,1,opt,name=active_only,json=activeOnly,proto3" json:"active_only,omitempty"` - InactiveOnly bool `protobuf:"varint,2,opt,name=inactive_only,json=inactiveOnly,proto3" json:"inactive_only,omitempty"` - PublicOnly bool `protobuf:"varint,3,opt,name=public_only,json=publicOnly,proto3" json:"public_only,omitempty"` - PrivateOnly bool `protobuf:"varint,4,opt,name=private_only,json=privateOnly,proto3" json:"private_only,omitempty"` + ActiveOnly bool `protobuf:"varint,1,opt,name=active_only,json=activeOnly,proto3" json:"active_only,omitempty"` + InactiveOnly bool `protobuf:"varint,2,opt,name=inactive_only,json=inactiveOnly,proto3" json:"inactive_only,omitempty"` + PublicOnly bool `protobuf:"varint,3,opt,name=public_only,json=publicOnly,proto3" json:"public_only,omitempty"` + PrivateOnly bool `protobuf:"varint,4,opt,name=private_only,json=privateOnly,proto3" json:"private_only,omitempty"` + //* + //Filters the response for channels with a target peer's pubkey. If peer is + //empty, all channels will be returned. + Peer []byte `protobuf:"bytes,5,opt,name=peer,proto3" json:"peer,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -2877,6 +3469,13 @@ func (m *ListChannelsRequest) GetPrivateOnly() bool { return false } +func (m *ListChannelsRequest) GetPeer() []byte { + if m != nil { + return m.Peer + } + return nil +} + type ListChannelsResponse struct { /// The list of active channels Channels []*Channel `protobuf:"bytes,11,rep,name=channels,proto3" json:"channels,omitempty"` @@ -2919,28 +3518,40 @@ func (m *ListChannelsResponse) GetChannels() []*Channel { type ChannelCloseSummary struct { /// The outpoint (txid:index) of the funding transaction. - ChannelPoint string `protobuf:"bytes,1,opt,name=channel_point,proto3" json:"channel_point,omitempty"` + ChannelPoint string `protobuf:"bytes,1,opt,name=channel_point,json=channelPoint,proto3" json:"channel_point,omitempty"` /// The unique channel ID for the channel. - ChanId uint64 `protobuf:"varint,2,opt,name=chan_id,proto3" json:"chan_id,omitempty"` + ChanId uint64 `protobuf:"varint,2,opt,name=chan_id,json=chanId,proto3" json:"chan_id,omitempty"` /// The hash of the genesis block that this channel resides within. - ChainHash string `protobuf:"bytes,3,opt,name=chain_hash,proto3" json:"chain_hash,omitempty"` + ChainHash string `protobuf:"bytes,3,opt,name=chain_hash,json=chainHash,proto3" json:"chain_hash,omitempty"` /// The txid of the transaction which ultimately closed this channel. - ClosingTxHash string `protobuf:"bytes,4,opt,name=closing_tx_hash,proto3" json:"closing_tx_hash,omitempty"` + ClosingTxHash string `protobuf:"bytes,4,opt,name=closing_tx_hash,json=closingTxHash,proto3" json:"closing_tx_hash,omitempty"` /// Public key of the remote peer that we formerly had a channel with. - RemotePubkey string `protobuf:"bytes,5,opt,name=remote_pubkey,proto3" json:"remote_pubkey,omitempty"` + RemotePubkey string `protobuf:"bytes,5,opt,name=remote_pubkey,json=remotePubkey,proto3" json:"remote_pubkey,omitempty"` /// Total capacity of the channel. Capacity int64 `protobuf:"varint,6,opt,name=capacity,proto3" json:"capacity,omitempty"` /// Height at which the funding transaction was spent. - CloseHeight uint32 `protobuf:"varint,7,opt,name=close_height,proto3" json:"close_height,omitempty"` + CloseHeight uint32 `protobuf:"varint,7,opt,name=close_height,json=closeHeight,proto3" json:"close_height,omitempty"` /// Settled balance at the time of channel closure - SettledBalance int64 `protobuf:"varint,8,opt,name=settled_balance,proto3" json:"settled_balance,omitempty"` + SettledBalance int64 `protobuf:"varint,8,opt,name=settled_balance,json=settledBalance,proto3" json:"settled_balance,omitempty"` /// The sum of all the time-locked outputs at the time of channel closure - TimeLockedBalance int64 `protobuf:"varint,9,opt,name=time_locked_balance,proto3" json:"time_locked_balance,omitempty"` + TimeLockedBalance int64 `protobuf:"varint,9,opt,name=time_locked_balance,json=timeLockedBalance,proto3" json:"time_locked_balance,omitempty"` /// Details on how the channel was closed. - CloseType ChannelCloseSummary_ClosureType `protobuf:"varint,10,opt,name=close_type,proto3,enum=lnrpc.ChannelCloseSummary_ClosureType" json:"close_type,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + CloseType ChannelCloseSummary_ClosureType `protobuf:"varint,10,opt,name=close_type,json=closeType,proto3,enum=lnrpc.ChannelCloseSummary_ClosureType" json:"close_type,omitempty"` + //* + //Open initiator is the party that initiated opening the channel. Note that + //this value may be unknown if the channel was closed before we migrated to + //store open channel information after close. + OpenInitiator Initiator `protobuf:"varint,11,opt,name=open_initiator,json=openInitiator,proto3,enum=lnrpc.Initiator" json:"open_initiator,omitempty"` + //* + //Close initiator indicates which party initiated the close. This value will + //be unknown for channels that were cooperatively closed before we started + //tracking cooperative close initiators. Note that this indicates which party + //initiated a close, and it is possible for both to initiate cooperative or + //force closes, although only one party's close will be confirmed on chain. + CloseInitiator Initiator `protobuf:"varint,12,opt,name=close_initiator,json=closeInitiator,proto3,enum=lnrpc.Initiator" json:"close_initiator,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *ChannelCloseSummary) Reset() { *m = ChannelCloseSummary{} } @@ -3038,6 +3649,20 @@ func (m *ChannelCloseSummary) GetCloseType() ChannelCloseSummary_ClosureType { return ChannelCloseSummary_COOPERATIVE_CLOSE } +func (m *ChannelCloseSummary) GetOpenInitiator() Initiator { + if m != nil { + return m.OpenInitiator + } + return Initiator_INITIATOR_UNKNOWN +} + +func (m *ChannelCloseSummary) GetCloseInitiator() Initiator { + if m != nil { + return m.CloseInitiator + } + return Initiator_INITIATOR_UNKNOWN +} + type ClosedChannelsRequest struct { Cooperative bool `protobuf:"varint,1,opt,name=cooperative,proto3" json:"cooperative,omitempty"` LocalForce bool `protobuf:"varint,2,opt,name=local_force,json=localForce,proto3" json:"local_force,omitempty"` @@ -3158,26 +3783,35 @@ func (m *ClosedChannelsResponse) GetChannels() []*ChannelCloseSummary { type Peer struct { /// The identity pubkey of the peer - PubKey string `protobuf:"bytes,1,opt,name=pub_key,proto3" json:"pub_key,omitempty"` + PubKey string `protobuf:"bytes,1,opt,name=pub_key,json=pubKey,proto3" json:"pub_key,omitempty"` /// Network address of the peer; eg `127.0.0.1:10011` Address string `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"` /// Bytes of data transmitted to this peer - BytesSent uint64 `protobuf:"varint,4,opt,name=bytes_sent,proto3" json:"bytes_sent,omitempty"` + BytesSent uint64 `protobuf:"varint,4,opt,name=bytes_sent,json=bytesSent,proto3" json:"bytes_sent,omitempty"` /// Bytes of data transmitted from this peer - BytesRecv uint64 `protobuf:"varint,5,opt,name=bytes_recv,proto3" json:"bytes_recv,omitempty"` + BytesRecv uint64 `protobuf:"varint,5,opt,name=bytes_recv,json=bytesRecv,proto3" json:"bytes_recv,omitempty"` /// Satoshis sent to this peer - SatSent int64 `protobuf:"varint,6,opt,name=sat_sent,proto3" json:"sat_sent,omitempty"` + SatSent int64 `protobuf:"varint,6,opt,name=sat_sent,json=satSent,proto3" json:"sat_sent,omitempty"` /// Satoshis received from this peer - SatRecv int64 `protobuf:"varint,7,opt,name=sat_recv,proto3" json:"sat_recv,omitempty"` + SatRecv int64 `protobuf:"varint,7,opt,name=sat_recv,json=satRecv,proto3" json:"sat_recv,omitempty"` /// A channel is inbound if the counterparty initiated the channel Inbound bool `protobuf:"varint,8,opt,name=inbound,proto3" json:"inbound,omitempty"` /// Ping time to this peer - PingTime int64 `protobuf:"varint,9,opt,name=ping_time,proto3" json:"ping_time,omitempty"` + PingTime int64 `protobuf:"varint,9,opt,name=ping_time,json=pingTime,proto3" json:"ping_time,omitempty"` // The type of sync we are currently performing with this peer. - SyncType Peer_SyncType `protobuf:"varint,10,opt,name=sync_type,proto3,enum=lnrpc.Peer_SyncType" json:"sync_type,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + SyncType Peer_SyncType `protobuf:"varint,10,opt,name=sync_type,json=syncType,proto3,enum=lnrpc.Peer_SyncType" json:"sync_type,omitempty"` + /// Features advertised by the remote peer in their init message. + Features map[uint32]*Feature `protobuf:"bytes,11,rep,name=features,proto3" json:"features,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // + //The latest errors received from our peer with timestamps, limited to the 10 + //most recent errors. These errors are tracked across peer connections, but + //are not persisted across lnd restarts. Note that these errors are only + //stored for peers that we have channels open with, to prevent peers from + //spamming us with errors at no cost. + Errors []*TimestampedError `protobuf:"bytes,12,rep,name=errors,proto3" json:"errors,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Peer) Reset() { *m = Peer{} } @@ -3268,37 +3902,112 @@ func (m *Peer) GetSyncType() Peer_SyncType { return Peer_UNKNOWN_SYNC } -type ListPeersRequest struct { +func (m *Peer) GetFeatures() map[uint32]*Feature { + if m != nil { + return m.Features + } + return nil +} + +func (m *Peer) GetErrors() []*TimestampedError { + if m != nil { + return m.Errors + } + return nil +} + +type TimestampedError struct { + // The unix timestamp in seconds when the error occurred. + Timestamp uint64 `protobuf:"varint,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // The string representation of the error sent by our peer. + Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *ListPeersRequest) Reset() { *m = ListPeersRequest{} } -func (m *ListPeersRequest) String() string { return proto.CompactTextString(m) } -func (*ListPeersRequest) ProtoMessage() {} -func (*ListPeersRequest) Descriptor() ([]byte, []int) { +func (m *TimestampedError) Reset() { *m = TimestampedError{} } +func (m *TimestampedError) String() string { return proto.CompactTextString(m) } +func (*TimestampedError) ProtoMessage() {} +func (*TimestampedError) Descriptor() ([]byte, []int) { return fileDescriptor_77a6da22d6a3feb1, []int{47} } -func (m *ListPeersRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListPeersRequest.Unmarshal(m, b) +func (m *TimestampedError) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TimestampedError.Unmarshal(m, b) } -func (m *ListPeersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListPeersRequest.Marshal(b, m, deterministic) +func (m *TimestampedError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TimestampedError.Marshal(b, m, deterministic) } -func (m *ListPeersRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListPeersRequest.Merge(m, src) +func (m *TimestampedError) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimestampedError.Merge(m, src) } -func (m *ListPeersRequest) XXX_Size() int { - return xxx_messageInfo_ListPeersRequest.Size(m) +func (m *TimestampedError) XXX_Size() int { + return xxx_messageInfo_TimestampedError.Size(m) } -func (m *ListPeersRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListPeersRequest.DiscardUnknown(m) +func (m *TimestampedError) XXX_DiscardUnknown() { + xxx_messageInfo_TimestampedError.DiscardUnknown(m) +} + +var xxx_messageInfo_TimestampedError proto.InternalMessageInfo + +func (m *TimestampedError) GetTimestamp() uint64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +func (m *TimestampedError) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +type ListPeersRequest struct { + // + //If true, only the last error that our peer sent us will be returned with + //the peer's information, rather than the full set of historic errors we have + //stored. + LatestError bool `protobuf:"varint,1,opt,name=latest_error,json=latestError,proto3" json:"latest_error,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListPeersRequest) Reset() { *m = ListPeersRequest{} } +func (m *ListPeersRequest) String() string { return proto.CompactTextString(m) } +func (*ListPeersRequest) ProtoMessage() {} +func (*ListPeersRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{48} +} + +func (m *ListPeersRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListPeersRequest.Unmarshal(m, b) +} +func (m *ListPeersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListPeersRequest.Marshal(b, m, deterministic) +} +func (m *ListPeersRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListPeersRequest.Merge(m, src) +} +func (m *ListPeersRequest) XXX_Size() int { + return xxx_messageInfo_ListPeersRequest.Size(m) +} +func (m *ListPeersRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListPeersRequest.DiscardUnknown(m) } var xxx_messageInfo_ListPeersRequest proto.InternalMessageInfo +func (m *ListPeersRequest) GetLatestError() bool { + if m != nil { + return m.LatestError + } + return false +} + type ListPeersResponse struct { /// The list of currently connected peers Peers []*Peer `protobuf:"bytes,1,rep,name=peers,proto3" json:"peers,omitempty"` @@ -3311,7 +4020,7 @@ func (m *ListPeersResponse) Reset() { *m = ListPeersResponse{} } func (m *ListPeersResponse) String() string { return proto.CompactTextString(m) } func (*ListPeersResponse) ProtoMessage() {} func (*ListPeersResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{48} + return fileDescriptor_77a6da22d6a3feb1, []int{49} } func (m *ListPeersResponse) XXX_Unmarshal(b []byte) error { @@ -3339,6 +4048,85 @@ func (m *ListPeersResponse) GetPeers() []*Peer { return nil } +type PeerEventSubscription struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PeerEventSubscription) Reset() { *m = PeerEventSubscription{} } +func (m *PeerEventSubscription) String() string { return proto.CompactTextString(m) } +func (*PeerEventSubscription) ProtoMessage() {} +func (*PeerEventSubscription) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{50} +} + +func (m *PeerEventSubscription) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PeerEventSubscription.Unmarshal(m, b) +} +func (m *PeerEventSubscription) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PeerEventSubscription.Marshal(b, m, deterministic) +} +func (m *PeerEventSubscription) XXX_Merge(src proto.Message) { + xxx_messageInfo_PeerEventSubscription.Merge(m, src) +} +func (m *PeerEventSubscription) XXX_Size() int { + return xxx_messageInfo_PeerEventSubscription.Size(m) +} +func (m *PeerEventSubscription) XXX_DiscardUnknown() { + xxx_messageInfo_PeerEventSubscription.DiscardUnknown(m) +} + +var xxx_messageInfo_PeerEventSubscription proto.InternalMessageInfo + +type PeerEvent struct { + /// The identity pubkey of the peer. + PubKey string `protobuf:"bytes,1,opt,name=pub_key,json=pubKey,proto3" json:"pub_key,omitempty"` + Type PeerEvent_EventType `protobuf:"varint,2,opt,name=type,proto3,enum=lnrpc.PeerEvent_EventType" json:"type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PeerEvent) Reset() { *m = PeerEvent{} } +func (m *PeerEvent) String() string { return proto.CompactTextString(m) } +func (*PeerEvent) ProtoMessage() {} +func (*PeerEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{51} +} + +func (m *PeerEvent) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PeerEvent.Unmarshal(m, b) +} +func (m *PeerEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PeerEvent.Marshal(b, m, deterministic) +} +func (m *PeerEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_PeerEvent.Merge(m, src) +} +func (m *PeerEvent) XXX_Size() int { + return xxx_messageInfo_PeerEvent.Size(m) +} +func (m *PeerEvent) XXX_DiscardUnknown() { + xxx_messageInfo_PeerEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_PeerEvent proto.InternalMessageInfo + +func (m *PeerEvent) GetPubKey() string { + if m != nil { + return m.PubKey + } + return "" +} + +func (m *PeerEvent) GetType() PeerEvent_EventType { + if m != nil { + return m.Type + } + return PeerEvent_PEER_ONLINE +} + type GetInfoRequest struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -3349,7 +4137,7 @@ func (m *GetInfoRequest) Reset() { *m = GetInfoRequest{} } func (m *GetInfoRequest) String() string { return proto.CompactTextString(m) } func (*GetInfoRequest) ProtoMessage() {} func (*GetInfoRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{49} + return fileDescriptor_77a6da22d6a3feb1, []int{52} } func (m *GetInfoRequest) XXX_Unmarshal(b []byte) error { @@ -3371,50 +4159,56 @@ func (m *GetInfoRequest) XXX_DiscardUnknown() { var xxx_messageInfo_GetInfoRequest proto.InternalMessageInfo type GetInfoResponse struct { + /// The version of the LND software that the node is running. + Version string `protobuf:"bytes,14,opt,name=version,proto3" json:"version,omitempty"` + /// The SHA1 commit hash that the daemon is compiled with. + CommitHash string `protobuf:"bytes,20,opt,name=commit_hash,json=commitHash,proto3" json:"commit_hash,omitempty"` /// The identity pubkey of the current node. - IdentityPubkey string `protobuf:"bytes,1,opt,name=identity_pubkey,proto3" json:"identity_pubkey,omitempty"` + IdentityPubkey string `protobuf:"bytes,1,opt,name=identity_pubkey,json=identityPubkey,proto3" json:"identity_pubkey,omitempty"` /// If applicable, the alias of the current node, e.g. "bob" Alias string `protobuf:"bytes,2,opt,name=alias,proto3" json:"alias,omitempty"` + /// The color of the current node in hex code format + Color string `protobuf:"bytes,17,opt,name=color,proto3" json:"color,omitempty"` /// Number of pending channels - NumPendingChannels uint32 `protobuf:"varint,3,opt,name=num_pending_channels,proto3" json:"num_pending_channels,omitempty"` + NumPendingChannels uint32 `protobuf:"varint,3,opt,name=num_pending_channels,json=numPendingChannels,proto3" json:"num_pending_channels,omitempty"` /// Number of active channels - NumActiveChannels uint32 `protobuf:"varint,4,opt,name=num_active_channels,proto3" json:"num_active_channels,omitempty"` + NumActiveChannels uint32 `protobuf:"varint,4,opt,name=num_active_channels,json=numActiveChannels,proto3" json:"num_active_channels,omitempty"` + /// Number of inactive channels + NumInactiveChannels uint32 `protobuf:"varint,15,opt,name=num_inactive_channels,json=numInactiveChannels,proto3" json:"num_inactive_channels,omitempty"` /// Number of peers - NumPeers uint32 `protobuf:"varint,5,opt,name=num_peers,proto3" json:"num_peers,omitempty"` + NumPeers uint32 `protobuf:"varint,5,opt,name=num_peers,json=numPeers,proto3" json:"num_peers,omitempty"` /// The node's current view of the height of the best block - BlockHeight uint32 `protobuf:"varint,6,opt,name=block_height,proto3" json:"block_height,omitempty"` + BlockHeight uint32 `protobuf:"varint,6,opt,name=block_height,json=blockHeight,proto3" json:"block_height,omitempty"` /// The node's current view of the hash of the best block - BlockHash string `protobuf:"bytes,8,opt,name=block_hash,proto3" json:"block_hash,omitempty"` + BlockHash string `protobuf:"bytes,8,opt,name=block_hash,json=blockHash,proto3" json:"block_hash,omitempty"` + /// Timestamp of the block best known to the wallet + BestHeaderTimestamp int64 `protobuf:"varint,13,opt,name=best_header_timestamp,json=bestHeaderTimestamp,proto3" json:"best_header_timestamp,omitempty"` /// Whether the wallet's view is synced to the main chain - SyncedToChain bool `protobuf:"varint,9,opt,name=synced_to_chain,proto3" json:"synced_to_chain,omitempty"` + SyncedToChain bool `protobuf:"varint,9,opt,name=synced_to_chain,json=syncedToChain,proto3" json:"synced_to_chain,omitempty"` + // Whether we consider ourselves synced with the public channel graph. + SyncedToGraph bool `protobuf:"varint,18,opt,name=synced_to_graph,json=syncedToGraph,proto3" json:"synced_to_graph,omitempty"` //* //Whether the current node is connected to testnet. This field is //deprecated and the network field should be used instead Testnet bool `protobuf:"varint,10,opt,name=testnet,proto3" json:"testnet,omitempty"` // Deprecated: Do not use. - /// The URIs of the current node. - Uris []string `protobuf:"bytes,12,rep,name=uris,proto3" json:"uris,omitempty"` - /// Timestamp of the block best known to the wallet - BestHeaderTimestamp int64 `protobuf:"varint,13,opt,name=best_header_timestamp,proto3" json:"best_header_timestamp,omitempty"` - /// The version of the LND software that the node is running. - Version string `protobuf:"bytes,14,opt,name=version,proto3" json:"version,omitempty"` - /// Number of inactive channels - NumInactiveChannels uint32 `protobuf:"varint,15,opt,name=num_inactive_channels,proto3" json:"num_inactive_channels,omitempty"` /// A list of active chains the node is connected to Chains []*Chain `protobuf:"bytes,16,rep,name=chains,proto3" json:"chains,omitempty"` - /// The color of the current node in hex code format - Color string `protobuf:"bytes,17,opt,name=color,proto3" json:"color,omitempty"` - // Whether we consider ourselves synced with the public channel graph. - SyncedToGraph bool `protobuf:"varint,18,opt,name=synced_to_graph,proto3" json:"synced_to_graph,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + /// The URIs of the current node. + Uris []string `protobuf:"bytes,12,rep,name=uris,proto3" json:"uris,omitempty"` + // + //Features that our node has advertised in our init message, node + //announcements and invoices. + Features map[uint32]*Feature `protobuf:"bytes,19,rep,name=features,proto3" json:"features,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *GetInfoResponse) Reset() { *m = GetInfoResponse{} } func (m *GetInfoResponse) String() string { return proto.CompactTextString(m) } func (*GetInfoResponse) ProtoMessage() {} func (*GetInfoResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{50} + return fileDescriptor_77a6da22d6a3feb1, []int{53} } func (m *GetInfoResponse) XXX_Unmarshal(b []byte) error { @@ -3435,6 +4229,20 @@ func (m *GetInfoResponse) XXX_DiscardUnknown() { var xxx_messageInfo_GetInfoResponse proto.InternalMessageInfo +func (m *GetInfoResponse) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *GetInfoResponse) GetCommitHash() string { + if m != nil { + return m.CommitHash + } + return "" +} + func (m *GetInfoResponse) GetIdentityPubkey() string { if m != nil { return m.IdentityPubkey @@ -3449,6 +4257,13 @@ func (m *GetInfoResponse) GetAlias() string { return "" } +func (m *GetInfoResponse) GetColor() string { + if m != nil { + return m.Color + } + return "" +} + func (m *GetInfoResponse) GetNumPendingChannels() uint32 { if m != nil { return m.NumPendingChannels @@ -3463,6 +4278,13 @@ func (m *GetInfoResponse) GetNumActiveChannels() uint32 { return 0 } +func (m *GetInfoResponse) GetNumInactiveChannels() uint32 { + if m != nil { + return m.NumInactiveChannels + } + return 0 +} + func (m *GetInfoResponse) GetNumPeers() uint32 { if m != nil { return m.NumPeers @@ -3484,47 +4306,33 @@ func (m *GetInfoResponse) GetBlockHash() string { return "" } -func (m *GetInfoResponse) GetSyncedToChain() bool { +func (m *GetInfoResponse) GetBestHeaderTimestamp() int64 { if m != nil { - return m.SyncedToChain + return m.BestHeaderTimestamp } - return false + return 0 } -// Deprecated: Do not use. -func (m *GetInfoResponse) GetTestnet() bool { +func (m *GetInfoResponse) GetSyncedToChain() bool { if m != nil { - return m.Testnet + return m.SyncedToChain } return false } -func (m *GetInfoResponse) GetUris() []string { - if m != nil { - return m.Uris - } - return nil -} - -func (m *GetInfoResponse) GetBestHeaderTimestamp() int64 { - if m != nil { - return m.BestHeaderTimestamp - } - return 0 -} - -func (m *GetInfoResponse) GetVersion() string { +func (m *GetInfoResponse) GetSyncedToGraph() bool { if m != nil { - return m.Version + return m.SyncedToGraph } - return "" + return false } -func (m *GetInfoResponse) GetNumInactiveChannels() uint32 { +// Deprecated: Do not use. +func (m *GetInfoResponse) GetTestnet() bool { if m != nil { - return m.NumInactiveChannels + return m.Testnet } - return 0 + return false } func (m *GetInfoResponse) GetChains() []*Chain { @@ -3534,18 +4342,18 @@ func (m *GetInfoResponse) GetChains() []*Chain { return nil } -func (m *GetInfoResponse) GetColor() string { +func (m *GetInfoResponse) GetUris() []string { if m != nil { - return m.Color + return m.Uris } - return "" + return nil } -func (m *GetInfoResponse) GetSyncedToGraph() bool { +func (m *GetInfoResponse) GetFeatures() map[uint32]*Feature { if m != nil { - return m.SyncedToGraph + return m.Features } - return false + return nil } type Chain struct { @@ -3562,7 +4370,7 @@ func (m *Chain) Reset() { *m = Chain{} } func (m *Chain) String() string { return proto.CompactTextString(m) } func (*Chain) ProtoMessage() {} func (*Chain) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{51} + return fileDescriptor_77a6da22d6a3feb1, []int{54} } func (m *Chain) XXX_Unmarshal(b []byte) error { @@ -3610,7 +4418,7 @@ func (m *ConfirmationUpdate) Reset() { *m = ConfirmationUpdate{} } func (m *ConfirmationUpdate) String() string { return proto.CompactTextString(m) } func (*ConfirmationUpdate) ProtoMessage() {} func (*ConfirmationUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{52} + return fileDescriptor_77a6da22d6a3feb1, []int{55} } func (m *ConfirmationUpdate) XXX_Unmarshal(b []byte) error { @@ -3653,7 +4461,7 @@ func (m *ConfirmationUpdate) GetNumConfsLeft() uint32 { } type ChannelOpenUpdate struct { - ChannelPoint *ChannelPoint `protobuf:"bytes,1,opt,name=channel_point,proto3" json:"channel_point,omitempty"` + ChannelPoint *ChannelPoint `protobuf:"bytes,1,opt,name=channel_point,json=channelPoint,proto3" json:"channel_point,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -3663,7 +4471,7 @@ func (m *ChannelOpenUpdate) Reset() { *m = ChannelOpenUpdate{} } func (m *ChannelOpenUpdate) String() string { return proto.CompactTextString(m) } func (*ChannelOpenUpdate) ProtoMessage() {} func (*ChannelOpenUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{53} + return fileDescriptor_77a6da22d6a3feb1, []int{56} } func (m *ChannelOpenUpdate) XXX_Unmarshal(b []byte) error { @@ -3692,7 +4500,7 @@ func (m *ChannelOpenUpdate) GetChannelPoint() *ChannelPoint { } type ChannelCloseUpdate struct { - ClosingTxid []byte `protobuf:"bytes,1,opt,name=closing_txid,proto3" json:"closing_txid,omitempty"` + ClosingTxid []byte `protobuf:"bytes,1,opt,name=closing_txid,json=closingTxid,proto3" json:"closing_txid,omitempty"` Success bool `protobuf:"varint,2,opt,name=success,proto3" json:"success,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -3703,7 +4511,7 @@ func (m *ChannelCloseUpdate) Reset() { *m = ChannelCloseUpdate{} } func (m *ChannelCloseUpdate) String() string { return proto.CompactTextString(m) } func (*ChannelCloseUpdate) ProtoMessage() {} func (*ChannelCloseUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{54} + return fileDescriptor_77a6da22d6a3feb1, []int{57} } func (m *ChannelCloseUpdate) XXX_Unmarshal(b []byte) error { @@ -3744,12 +4552,21 @@ type CloseChannelRequest struct { //will be able to generate a signature for Alice's version of the commitment //transaction. ChannelPoint *ChannelPoint `protobuf:"bytes,1,opt,name=channel_point,json=channelPoint,proto3" json:"channel_point,omitempty"` - /// If true, then the channel will be closed forcibly. This means the current commitment transaction will be signed and broadcast. + /// If true, then the channel will be closed forcibly. This means the + /// current commitment transaction will be signed and broadcast. Force bool `protobuf:"varint,2,opt,name=force,proto3" json:"force,omitempty"` - /// The target number of blocks that the closure transaction should be confirmed by. + /// The target number of blocks that the closure transaction should be + /// confirmed by. TargetConf int32 `protobuf:"varint,3,opt,name=target_conf,json=targetConf,proto3" json:"target_conf,omitempty"` - /// A manual fee rate set in sat/byte that should be used when crafting the closure transaction. - SatPerByte int64 `protobuf:"varint,4,opt,name=sat_per_byte,json=satPerByte,proto3" json:"sat_per_byte,omitempty"` + /// A manual fee rate set in sat/byte that should be used when crafting the + /// closure transaction. + SatPerByte int64 `protobuf:"varint,4,opt,name=sat_per_byte,json=satPerByte,proto3" json:"sat_per_byte,omitempty"` + // + //An optional address to send funds to in the case of a cooperative close. + //If the channel was opened with an upfront shutdown script and this field + //is set, the request to close will fail because the channel must pay out + //to the upfront shutdown addresss. + DeliveryAddress string `protobuf:"bytes,5,opt,name=delivery_address,json=deliveryAddress,proto3" json:"delivery_address,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -3759,7 +4576,7 @@ func (m *CloseChannelRequest) Reset() { *m = CloseChannelRequest{} } func (m *CloseChannelRequest) String() string { return proto.CompactTextString(m) } func (*CloseChannelRequest) ProtoMessage() {} func (*CloseChannelRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{55} + return fileDescriptor_77a6da22d6a3feb1, []int{58} } func (m *CloseChannelRequest) XXX_Unmarshal(b []byte) error { @@ -3808,6 +4625,13 @@ func (m *CloseChannelRequest) GetSatPerByte() int64 { return 0 } +func (m *CloseChannelRequest) GetDeliveryAddress() string { + if m != nil { + return m.DeliveryAddress + } + return "" +} + type CloseStatusUpdate struct { // Types that are valid to be assigned to Update: // *CloseStatusUpdate_ClosePending @@ -3822,7 +4646,7 @@ func (m *CloseStatusUpdate) Reset() { *m = CloseStatusUpdate{} } func (m *CloseStatusUpdate) String() string { return proto.CompactTextString(m) } func (*CloseStatusUpdate) ProtoMessage() {} func (*CloseStatusUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{56} + return fileDescriptor_77a6da22d6a3feb1, []int{59} } func (m *CloseStatusUpdate) XXX_Unmarshal(b []byte) error { @@ -3848,11 +4672,11 @@ type isCloseStatusUpdate_Update interface { } type CloseStatusUpdate_ClosePending struct { - ClosePending *PendingUpdate `protobuf:"bytes,1,opt,name=close_pending,proto3,oneof"` + ClosePending *PendingUpdate `protobuf:"bytes,1,opt,name=close_pending,json=closePending,proto3,oneof"` } type CloseStatusUpdate_ChanClose struct { - ChanClose *ChannelCloseUpdate `protobuf:"bytes,3,opt,name=chan_close,proto3,oneof"` + ChanClose *ChannelCloseUpdate `protobuf:"bytes,3,opt,name=chan_close,json=chanClose,proto3,oneof"` } func (*CloseStatusUpdate_ClosePending) isCloseStatusUpdate_Update() {} @@ -3890,7 +4714,7 @@ func (*CloseStatusUpdate) XXX_OneofWrappers() []interface{} { type PendingUpdate struct { Txid []byte `protobuf:"bytes,1,opt,name=txid,proto3" json:"txid,omitempty"` - OutputIndex uint32 `protobuf:"varint,2,opt,name=output_index,proto3" json:"output_index,omitempty"` + OutputIndex uint32 `protobuf:"varint,2,opt,name=output_index,json=outputIndex,proto3" json:"output_index,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -3900,7 +4724,7 @@ func (m *PendingUpdate) Reset() { *m = PendingUpdate{} } func (m *PendingUpdate) String() string { return proto.CompactTextString(m) } func (*PendingUpdate) ProtoMessage() {} func (*PendingUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{57} + return fileDescriptor_77a6da22d6a3feb1, []int{60} } func (m *PendingUpdate) XXX_Unmarshal(b []byte) error { @@ -3935,89 +4759,185 @@ func (m *PendingUpdate) GetOutputIndex() uint32 { return 0 } -type OpenChannelRequest struct { - /// The pubkey of the node to open a channel with - NodePubkey []byte `protobuf:"bytes,2,opt,name=node_pubkey,proto3" json:"node_pubkey,omitempty"` - /// The hex encoded pubkey of the node to open a channel with - NodePubkeyString string `protobuf:"bytes,3,opt,name=node_pubkey_string,proto3" json:"node_pubkey_string,omitempty"` - /// The number of satoshis the wallet should commit to the channel - LocalFundingAmount int64 `protobuf:"varint,4,opt,name=local_funding_amount,proto3" json:"local_funding_amount,omitempty"` - /// The number of satoshis to push to the remote side as part of the initial commitment state - PushSat int64 `protobuf:"varint,5,opt,name=push_sat,proto3" json:"push_sat,omitempty"` - /// The target number of blocks that the funding transaction should be confirmed by. - TargetConf int32 `protobuf:"varint,6,opt,name=target_conf,json=targetConf,proto3" json:"target_conf,omitempty"` - /// A manual fee rate set in sat/byte that should be used when crafting the funding transaction. - SatPerByte int64 `protobuf:"varint,7,opt,name=sat_per_byte,json=satPerByte,proto3" json:"sat_per_byte,omitempty"` - /// Whether this channel should be private, not announced to the greater network. - Private bool `protobuf:"varint,8,opt,name=private,proto3" json:"private,omitempty"` - /// The minimum value in millisatoshi we will require for incoming HTLCs on the channel. - MinHtlcMsat int64 `protobuf:"varint,9,opt,name=min_htlc_msat,proto3" json:"min_htlc_msat,omitempty"` - /// The delay we require on the remote's commitment transaction. If this is not set, it will be scaled automatically with the channel size. - RemoteCsvDelay uint32 `protobuf:"varint,10,opt,name=remote_csv_delay,proto3" json:"remote_csv_delay,omitempty"` - /// The minimum number of confirmations each one of your outputs used for the funding transaction must satisfy. - MinConfs int32 `protobuf:"varint,11,opt,name=min_confs,proto3" json:"min_confs,omitempty"` - /// Whether unconfirmed outputs should be used as inputs for the funding transaction. - SpendUnconfirmed bool `protobuf:"varint,12,opt,name=spend_unconfirmed,proto3" json:"spend_unconfirmed,omitempty"` +type ReadyForPsbtFunding struct { + //* + //The P2WSH address of the channel funding multisig address that the below + //specified amount in satoshis needs to be sent to. + FundingAddress string `protobuf:"bytes,1,opt,name=funding_address,json=fundingAddress,proto3" json:"funding_address,omitempty"` + //* + //The exact amount in satoshis that needs to be sent to the above address to + //fund the pending channel. + FundingAmount int64 `protobuf:"varint,2,opt,name=funding_amount,json=fundingAmount,proto3" json:"funding_amount,omitempty"` + //* + //A raw PSBT that contains the pending channel output. If a base PSBT was + //provided in the PsbtShim, this is the base PSBT with one additional output. + //If no base PSBT was specified, this is an otherwise empty PSBT with exactly + //one output. + Psbt []byte `protobuf:"bytes,3,opt,name=psbt,proto3" json:"psbt,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *OpenChannelRequest) Reset() { *m = OpenChannelRequest{} } -func (m *OpenChannelRequest) String() string { return proto.CompactTextString(m) } -func (*OpenChannelRequest) ProtoMessage() {} -func (*OpenChannelRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{58} +func (m *ReadyForPsbtFunding) Reset() { *m = ReadyForPsbtFunding{} } +func (m *ReadyForPsbtFunding) String() string { return proto.CompactTextString(m) } +func (*ReadyForPsbtFunding) ProtoMessage() {} +func (*ReadyForPsbtFunding) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{61} } -func (m *OpenChannelRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_OpenChannelRequest.Unmarshal(m, b) +func (m *ReadyForPsbtFunding) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReadyForPsbtFunding.Unmarshal(m, b) } -func (m *OpenChannelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_OpenChannelRequest.Marshal(b, m, deterministic) +func (m *ReadyForPsbtFunding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReadyForPsbtFunding.Marshal(b, m, deterministic) } -func (m *OpenChannelRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_OpenChannelRequest.Merge(m, src) +func (m *ReadyForPsbtFunding) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadyForPsbtFunding.Merge(m, src) } -func (m *OpenChannelRequest) XXX_Size() int { - return xxx_messageInfo_OpenChannelRequest.Size(m) +func (m *ReadyForPsbtFunding) XXX_Size() int { + return xxx_messageInfo_ReadyForPsbtFunding.Size(m) } -func (m *OpenChannelRequest) XXX_DiscardUnknown() { - xxx_messageInfo_OpenChannelRequest.DiscardUnknown(m) +func (m *ReadyForPsbtFunding) XXX_DiscardUnknown() { + xxx_messageInfo_ReadyForPsbtFunding.DiscardUnknown(m) } -var xxx_messageInfo_OpenChannelRequest proto.InternalMessageInfo - -func (m *OpenChannelRequest) GetNodePubkey() []byte { - if m != nil { - return m.NodePubkey - } - return nil -} +var xxx_messageInfo_ReadyForPsbtFunding proto.InternalMessageInfo -func (m *OpenChannelRequest) GetNodePubkeyString() string { +func (m *ReadyForPsbtFunding) GetFundingAddress() string { if m != nil { - return m.NodePubkeyString + return m.FundingAddress } return "" } -func (m *OpenChannelRequest) GetLocalFundingAmount() int64 { +func (m *ReadyForPsbtFunding) GetFundingAmount() int64 { if m != nil { - return m.LocalFundingAmount + return m.FundingAmount } return 0 } -func (m *OpenChannelRequest) GetPushSat() int64 { +func (m *ReadyForPsbtFunding) GetPsbt() []byte { if m != nil { - return m.PushSat + return m.Psbt } - return 0 + return nil } -func (m *OpenChannelRequest) GetTargetConf() int32 { - if m != nil { +type OpenChannelRequest struct { + //* + //The pubkey of the node to open a channel with. When using REST, this field + //must be encoded as base64. + NodePubkey []byte `protobuf:"bytes,2,opt,name=node_pubkey,json=nodePubkey,proto3" json:"node_pubkey,omitempty"` + //* + //The hex encoded pubkey of the node to open a channel with. Deprecated now + //that the REST gateway supports base64 encoding of bytes fields. + NodePubkeyString string `protobuf:"bytes,3,opt,name=node_pubkey_string,json=nodePubkeyString,proto3" json:"node_pubkey_string,omitempty"` // Deprecated: Do not use. + /// The number of satoshis the wallet should commit to the channel + LocalFundingAmount int64 `protobuf:"varint,4,opt,name=local_funding_amount,json=localFundingAmount,proto3" json:"local_funding_amount,omitempty"` + /// The number of satoshis to push to the remote side as part of the initial + /// commitment state + PushSat int64 `protobuf:"varint,5,opt,name=push_sat,json=pushSat,proto3" json:"push_sat,omitempty"` + /// The target number of blocks that the funding transaction should be + /// confirmed by. + TargetConf int32 `protobuf:"varint,6,opt,name=target_conf,json=targetConf,proto3" json:"target_conf,omitempty"` + /// A manual fee rate set in sat/byte that should be used when crafting the + /// funding transaction. + SatPerByte int64 `protobuf:"varint,7,opt,name=sat_per_byte,json=satPerByte,proto3" json:"sat_per_byte,omitempty"` + /// Whether this channel should be private, not announced to the greater + /// network. + Private bool `protobuf:"varint,8,opt,name=private,proto3" json:"private,omitempty"` + /// The minimum value in millisatoshi we will require for incoming HTLCs on + /// the channel. + MinHtlcMsat int64 `protobuf:"varint,9,opt,name=min_htlc_msat,json=minHtlcMsat,proto3" json:"min_htlc_msat,omitempty"` + /// The delay we require on the remote's commitment transaction. If this is + /// not set, it will be scaled automatically with the channel size. + RemoteCsvDelay uint32 `protobuf:"varint,10,opt,name=remote_csv_delay,json=remoteCsvDelay,proto3" json:"remote_csv_delay,omitempty"` + /// The minimum number of confirmations each one of your outputs used for + /// the funding transaction must satisfy. + MinConfs int32 `protobuf:"varint,11,opt,name=min_confs,json=minConfs,proto3" json:"min_confs,omitempty"` + /// Whether unconfirmed outputs should be used as inputs for the funding + /// transaction. + SpendUnconfirmed bool `protobuf:"varint,12,opt,name=spend_unconfirmed,json=spendUnconfirmed,proto3" json:"spend_unconfirmed,omitempty"` + // + //Close address is an optional address which specifies the address to which + //funds should be paid out to upon cooperative close. This field may only be + //set if the peer supports the option upfront feature bit (call listpeers + //to check). The remote peer will only accept cooperative closes to this + //address if it is set. + // + //Note: If this value is set on channel creation, you will *not* be able to + //cooperatively close out to a different address. + CloseAddress string `protobuf:"bytes,13,opt,name=close_address,json=closeAddress,proto3" json:"close_address,omitempty"` + //* + //Funding shims are an optional argument that allow the caller to intercept + //certain funding functionality. For example, a shim can be provided to use a + //particular key for the commitment key (ideally cold) rather than use one + //that is generated by the wallet as normal, or signal that signing will be + //carried out in an interactive manner (PSBT based). + FundingShim *FundingShim `protobuf:"bytes,14,opt,name=funding_shim,json=fundingShim,proto3" json:"funding_shim,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OpenChannelRequest) Reset() { *m = OpenChannelRequest{} } +func (m *OpenChannelRequest) String() string { return proto.CompactTextString(m) } +func (*OpenChannelRequest) ProtoMessage() {} +func (*OpenChannelRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{62} +} + +func (m *OpenChannelRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OpenChannelRequest.Unmarshal(m, b) +} +func (m *OpenChannelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OpenChannelRequest.Marshal(b, m, deterministic) +} +func (m *OpenChannelRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_OpenChannelRequest.Merge(m, src) +} +func (m *OpenChannelRequest) XXX_Size() int { + return xxx_messageInfo_OpenChannelRequest.Size(m) +} +func (m *OpenChannelRequest) XXX_DiscardUnknown() { + xxx_messageInfo_OpenChannelRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_OpenChannelRequest proto.InternalMessageInfo + +func (m *OpenChannelRequest) GetNodePubkey() []byte { + if m != nil { + return m.NodePubkey + } + return nil +} + +// Deprecated: Do not use. +func (m *OpenChannelRequest) GetNodePubkeyString() string { + if m != nil { + return m.NodePubkeyString + } + return "" +} + +func (m *OpenChannelRequest) GetLocalFundingAmount() int64 { + if m != nil { + return m.LocalFundingAmount + } + return 0 +} + +func (m *OpenChannelRequest) GetPushSat() int64 { + if m != nil { + return m.PushSat + } + return 0 +} + +func (m *OpenChannelRequest) GetTargetConf() int32 { + if m != nil { return m.TargetConf } return 0 @@ -4065,21 +4985,40 @@ func (m *OpenChannelRequest) GetSpendUnconfirmed() bool { return false } +func (m *OpenChannelRequest) GetCloseAddress() string { + if m != nil { + return m.CloseAddress + } + return "" +} + +func (m *OpenChannelRequest) GetFundingShim() *FundingShim { + if m != nil { + return m.FundingShim + } + return nil +} + type OpenStatusUpdate struct { // Types that are valid to be assigned to Update: // *OpenStatusUpdate_ChanPending // *OpenStatusUpdate_ChanOpen - Update isOpenStatusUpdate_Update `protobuf_oneof:"update"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + // *OpenStatusUpdate_PsbtFund + Update isOpenStatusUpdate_Update `protobuf_oneof:"update"` + //* + //The pending channel ID of the created channel. This value may be used to + //further the funding flow manually via the FundingStateStep method. + PendingChanId []byte `protobuf:"bytes,4,opt,name=pending_chan_id,json=pendingChanId,proto3" json:"pending_chan_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *OpenStatusUpdate) Reset() { *m = OpenStatusUpdate{} } func (m *OpenStatusUpdate) String() string { return proto.CompactTextString(m) } func (*OpenStatusUpdate) ProtoMessage() {} func (*OpenStatusUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{59} + return fileDescriptor_77a6da22d6a3feb1, []int{63} } func (m *OpenStatusUpdate) XXX_Unmarshal(b []byte) error { @@ -4105,17 +5044,23 @@ type isOpenStatusUpdate_Update interface { } type OpenStatusUpdate_ChanPending struct { - ChanPending *PendingUpdate `protobuf:"bytes,1,opt,name=chan_pending,proto3,oneof"` + ChanPending *PendingUpdate `protobuf:"bytes,1,opt,name=chan_pending,json=chanPending,proto3,oneof"` } type OpenStatusUpdate_ChanOpen struct { - ChanOpen *ChannelOpenUpdate `protobuf:"bytes,3,opt,name=chan_open,proto3,oneof"` + ChanOpen *ChannelOpenUpdate `protobuf:"bytes,3,opt,name=chan_open,json=chanOpen,proto3,oneof"` +} + +type OpenStatusUpdate_PsbtFund struct { + PsbtFund *ReadyForPsbtFunding `protobuf:"bytes,5,opt,name=psbt_fund,json=psbtFund,proto3,oneof"` } func (*OpenStatusUpdate_ChanPending) isOpenStatusUpdate_Update() {} func (*OpenStatusUpdate_ChanOpen) isOpenStatusUpdate_Update() {} +func (*OpenStatusUpdate_PsbtFund) isOpenStatusUpdate_Update() {} + func (m *OpenStatusUpdate) GetUpdate() isOpenStatusUpdate_Update { if m != nil { return m.Update @@ -4137,14 +5082,643 @@ func (m *OpenStatusUpdate) GetChanOpen() *ChannelOpenUpdate { return nil } +func (m *OpenStatusUpdate) GetPsbtFund() *ReadyForPsbtFunding { + if x, ok := m.GetUpdate().(*OpenStatusUpdate_PsbtFund); ok { + return x.PsbtFund + } + return nil +} + +func (m *OpenStatusUpdate) GetPendingChanId() []byte { + if m != nil { + return m.PendingChanId + } + return nil +} + // XXX_OneofWrappers is for the internal use of the proto package. func (*OpenStatusUpdate) XXX_OneofWrappers() []interface{} { return []interface{}{ (*OpenStatusUpdate_ChanPending)(nil), (*OpenStatusUpdate_ChanOpen)(nil), + (*OpenStatusUpdate_PsbtFund)(nil), + } +} + +type KeyLocator struct { + /// The family of key being identified. + KeyFamily int32 `protobuf:"varint,1,opt,name=key_family,json=keyFamily,proto3" json:"key_family,omitempty"` + /// The precise index of the key being identified. + KeyIndex int32 `protobuf:"varint,2,opt,name=key_index,json=keyIndex,proto3" json:"key_index,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KeyLocator) Reset() { *m = KeyLocator{} } +func (m *KeyLocator) String() string { return proto.CompactTextString(m) } +func (*KeyLocator) ProtoMessage() {} +func (*KeyLocator) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{64} +} + +func (m *KeyLocator) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_KeyLocator.Unmarshal(m, b) +} +func (m *KeyLocator) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_KeyLocator.Marshal(b, m, deterministic) +} +func (m *KeyLocator) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeyLocator.Merge(m, src) +} +func (m *KeyLocator) XXX_Size() int { + return xxx_messageInfo_KeyLocator.Size(m) +} +func (m *KeyLocator) XXX_DiscardUnknown() { + xxx_messageInfo_KeyLocator.DiscardUnknown(m) +} + +var xxx_messageInfo_KeyLocator proto.InternalMessageInfo + +func (m *KeyLocator) GetKeyFamily() int32 { + if m != nil { + return m.KeyFamily + } + return 0 +} + +func (m *KeyLocator) GetKeyIndex() int32 { + if m != nil { + return m.KeyIndex + } + return 0 +} + +type KeyDescriptor struct { + //* + //The raw bytes of the key being identified. + RawKeyBytes []byte `protobuf:"bytes,1,opt,name=raw_key_bytes,json=rawKeyBytes,proto3" json:"raw_key_bytes,omitempty"` + //* + //The key locator that identifies which key to use for signing. + KeyLoc *KeyLocator `protobuf:"bytes,2,opt,name=key_loc,json=keyLoc,proto3" json:"key_loc,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KeyDescriptor) Reset() { *m = KeyDescriptor{} } +func (m *KeyDescriptor) String() string { return proto.CompactTextString(m) } +func (*KeyDescriptor) ProtoMessage() {} +func (*KeyDescriptor) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{65} +} + +func (m *KeyDescriptor) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_KeyDescriptor.Unmarshal(m, b) +} +func (m *KeyDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_KeyDescriptor.Marshal(b, m, deterministic) +} +func (m *KeyDescriptor) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeyDescriptor.Merge(m, src) +} +func (m *KeyDescriptor) XXX_Size() int { + return xxx_messageInfo_KeyDescriptor.Size(m) +} +func (m *KeyDescriptor) XXX_DiscardUnknown() { + xxx_messageInfo_KeyDescriptor.DiscardUnknown(m) +} + +var xxx_messageInfo_KeyDescriptor proto.InternalMessageInfo + +func (m *KeyDescriptor) GetRawKeyBytes() []byte { + if m != nil { + return m.RawKeyBytes + } + return nil +} + +func (m *KeyDescriptor) GetKeyLoc() *KeyLocator { + if m != nil { + return m.KeyLoc + } + return nil +} + +type ChanPointShim struct { + //* + //The size of the pre-crafted output to be used as the channel point for this + //channel funding. + Amt int64 `protobuf:"varint,1,opt,name=amt,proto3" json:"amt,omitempty"` + /// The target channel point to refrence in created commitment transactions. + ChanPoint *ChannelPoint `protobuf:"bytes,2,opt,name=chan_point,json=chanPoint,proto3" json:"chan_point,omitempty"` + /// Our local key to use when creating the multi-sig output. + LocalKey *KeyDescriptor `protobuf:"bytes,3,opt,name=local_key,json=localKey,proto3" json:"local_key,omitempty"` + /// The key of the remote party to use when creating the multi-sig output. + RemoteKey []byte `protobuf:"bytes,4,opt,name=remote_key,json=remoteKey,proto3" json:"remote_key,omitempty"` + //* + //If non-zero, then this will be used as the pending channel ID on the wire + //protocol to initate the funding request. This is an optional field, and + //should only be set if the responder is already expecting a specific pending + //channel ID. + PendingChanId []byte `protobuf:"bytes,5,opt,name=pending_chan_id,json=pendingChanId,proto3" json:"pending_chan_id,omitempty"` + //* + //This uint32 indicates if this channel is to be considered 'frozen'. A + //frozen channel does not allow a cooperative channel close by the + //initiator. The thaw_height is the height that this restriction stops + //applying to the channel. + ThawHeight uint32 `protobuf:"varint,6,opt,name=thaw_height,json=thawHeight,proto3" json:"thaw_height,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ChanPointShim) Reset() { *m = ChanPointShim{} } +func (m *ChanPointShim) String() string { return proto.CompactTextString(m) } +func (*ChanPointShim) ProtoMessage() {} +func (*ChanPointShim) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{66} +} + +func (m *ChanPointShim) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ChanPointShim.Unmarshal(m, b) +} +func (m *ChanPointShim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ChanPointShim.Marshal(b, m, deterministic) +} +func (m *ChanPointShim) XXX_Merge(src proto.Message) { + xxx_messageInfo_ChanPointShim.Merge(m, src) +} +func (m *ChanPointShim) XXX_Size() int { + return xxx_messageInfo_ChanPointShim.Size(m) +} +func (m *ChanPointShim) XXX_DiscardUnknown() { + xxx_messageInfo_ChanPointShim.DiscardUnknown(m) +} + +var xxx_messageInfo_ChanPointShim proto.InternalMessageInfo + +func (m *ChanPointShim) GetAmt() int64 { + if m != nil { + return m.Amt + } + return 0 +} + +func (m *ChanPointShim) GetChanPoint() *ChannelPoint { + if m != nil { + return m.ChanPoint + } + return nil +} + +func (m *ChanPointShim) GetLocalKey() *KeyDescriptor { + if m != nil { + return m.LocalKey + } + return nil +} + +func (m *ChanPointShim) GetRemoteKey() []byte { + if m != nil { + return m.RemoteKey + } + return nil +} + +func (m *ChanPointShim) GetPendingChanId() []byte { + if m != nil { + return m.PendingChanId + } + return nil +} + +func (m *ChanPointShim) GetThawHeight() uint32 { + if m != nil { + return m.ThawHeight + } + return 0 +} + +type PsbtShim struct { + //* + //A unique identifier of 32 random bytes that will be used as the pending + //channel ID to identify the PSBT state machine when interacting with it and + //on the wire protocol to initiate the funding request. + PendingChanId []byte `protobuf:"bytes,1,opt,name=pending_chan_id,json=pendingChanId,proto3" json:"pending_chan_id,omitempty"` + //* + //An optional base PSBT the new channel output will be added to. If this is + //non-empty, it must be a binary serialized PSBT. + BasePsbt []byte `protobuf:"bytes,2,opt,name=base_psbt,json=basePsbt,proto3" json:"base_psbt,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PsbtShim) Reset() { *m = PsbtShim{} } +func (m *PsbtShim) String() string { return proto.CompactTextString(m) } +func (*PsbtShim) ProtoMessage() {} +func (*PsbtShim) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{67} +} + +func (m *PsbtShim) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PsbtShim.Unmarshal(m, b) +} +func (m *PsbtShim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PsbtShim.Marshal(b, m, deterministic) +} +func (m *PsbtShim) XXX_Merge(src proto.Message) { + xxx_messageInfo_PsbtShim.Merge(m, src) +} +func (m *PsbtShim) XXX_Size() int { + return xxx_messageInfo_PsbtShim.Size(m) +} +func (m *PsbtShim) XXX_DiscardUnknown() { + xxx_messageInfo_PsbtShim.DiscardUnknown(m) +} + +var xxx_messageInfo_PsbtShim proto.InternalMessageInfo + +func (m *PsbtShim) GetPendingChanId() []byte { + if m != nil { + return m.PendingChanId + } + return nil +} + +func (m *PsbtShim) GetBasePsbt() []byte { + if m != nil { + return m.BasePsbt + } + return nil +} + +type FundingShim struct { + // Types that are valid to be assigned to Shim: + // *FundingShim_ChanPointShim + // *FundingShim_PsbtShim + Shim isFundingShim_Shim `protobuf_oneof:"shim"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FundingShim) Reset() { *m = FundingShim{} } +func (m *FundingShim) String() string { return proto.CompactTextString(m) } +func (*FundingShim) ProtoMessage() {} +func (*FundingShim) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{68} +} + +func (m *FundingShim) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FundingShim.Unmarshal(m, b) +} +func (m *FundingShim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FundingShim.Marshal(b, m, deterministic) +} +func (m *FundingShim) XXX_Merge(src proto.Message) { + xxx_messageInfo_FundingShim.Merge(m, src) +} +func (m *FundingShim) XXX_Size() int { + return xxx_messageInfo_FundingShim.Size(m) +} +func (m *FundingShim) XXX_DiscardUnknown() { + xxx_messageInfo_FundingShim.DiscardUnknown(m) +} + +var xxx_messageInfo_FundingShim proto.InternalMessageInfo + +type isFundingShim_Shim interface { + isFundingShim_Shim() +} + +type FundingShim_ChanPointShim struct { + ChanPointShim *ChanPointShim `protobuf:"bytes,1,opt,name=chan_point_shim,json=chanPointShim,proto3,oneof"` +} + +type FundingShim_PsbtShim struct { + PsbtShim *PsbtShim `protobuf:"bytes,2,opt,name=psbt_shim,json=psbtShim,proto3,oneof"` +} + +func (*FundingShim_ChanPointShim) isFundingShim_Shim() {} + +func (*FundingShim_PsbtShim) isFundingShim_Shim() {} + +func (m *FundingShim) GetShim() isFundingShim_Shim { + if m != nil { + return m.Shim + } + return nil +} + +func (m *FundingShim) GetChanPointShim() *ChanPointShim { + if x, ok := m.GetShim().(*FundingShim_ChanPointShim); ok { + return x.ChanPointShim + } + return nil +} + +func (m *FundingShim) GetPsbtShim() *PsbtShim { + if x, ok := m.GetShim().(*FundingShim_PsbtShim); ok { + return x.PsbtShim + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*FundingShim) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*FundingShim_ChanPointShim)(nil), + (*FundingShim_PsbtShim)(nil), + } +} + +type FundingShimCancel struct { + /// The pending channel ID of the channel to cancel the funding shim for. + PendingChanId []byte `protobuf:"bytes,1,opt,name=pending_chan_id,json=pendingChanId,proto3" json:"pending_chan_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FundingShimCancel) Reset() { *m = FundingShimCancel{} } +func (m *FundingShimCancel) String() string { return proto.CompactTextString(m) } +func (*FundingShimCancel) ProtoMessage() {} +func (*FundingShimCancel) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{69} +} + +func (m *FundingShimCancel) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FundingShimCancel.Unmarshal(m, b) +} +func (m *FundingShimCancel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FundingShimCancel.Marshal(b, m, deterministic) +} +func (m *FundingShimCancel) XXX_Merge(src proto.Message) { + xxx_messageInfo_FundingShimCancel.Merge(m, src) +} +func (m *FundingShimCancel) XXX_Size() int { + return xxx_messageInfo_FundingShimCancel.Size(m) +} +func (m *FundingShimCancel) XXX_DiscardUnknown() { + xxx_messageInfo_FundingShimCancel.DiscardUnknown(m) +} + +var xxx_messageInfo_FundingShimCancel proto.InternalMessageInfo + +func (m *FundingShimCancel) GetPendingChanId() []byte { + if m != nil { + return m.PendingChanId } + return nil +} + +type FundingPsbtVerify struct { + //* + //The funded but not yet signed PSBT that sends the exact channel capacity + //amount to the PK script returned in the open channel message in a previous + //step. + FundedPsbt []byte `protobuf:"bytes,1,opt,name=funded_psbt,json=fundedPsbt,proto3" json:"funded_psbt,omitempty"` + /// The pending channel ID of the channel to get the PSBT for. + PendingChanId []byte `protobuf:"bytes,2,opt,name=pending_chan_id,json=pendingChanId,proto3" json:"pending_chan_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FundingPsbtVerify) Reset() { *m = FundingPsbtVerify{} } +func (m *FundingPsbtVerify) String() string { return proto.CompactTextString(m) } +func (*FundingPsbtVerify) ProtoMessage() {} +func (*FundingPsbtVerify) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{70} +} + +func (m *FundingPsbtVerify) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FundingPsbtVerify.Unmarshal(m, b) +} +func (m *FundingPsbtVerify) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FundingPsbtVerify.Marshal(b, m, deterministic) +} +func (m *FundingPsbtVerify) XXX_Merge(src proto.Message) { + xxx_messageInfo_FundingPsbtVerify.Merge(m, src) +} +func (m *FundingPsbtVerify) XXX_Size() int { + return xxx_messageInfo_FundingPsbtVerify.Size(m) +} +func (m *FundingPsbtVerify) XXX_DiscardUnknown() { + xxx_messageInfo_FundingPsbtVerify.DiscardUnknown(m) +} + +var xxx_messageInfo_FundingPsbtVerify proto.InternalMessageInfo + +func (m *FundingPsbtVerify) GetFundedPsbt() []byte { + if m != nil { + return m.FundedPsbt + } + return nil +} + +func (m *FundingPsbtVerify) GetPendingChanId() []byte { + if m != nil { + return m.PendingChanId + } + return nil +} + +type FundingPsbtFinalize struct { + //* + //The funded PSBT that contains all witness data to send the exact channel + //capacity amount to the PK script returned in the open channel message in a + //previous step. + SignedPsbt []byte `protobuf:"bytes,1,opt,name=signed_psbt,json=signedPsbt,proto3" json:"signed_psbt,omitempty"` + /// The pending channel ID of the channel to get the PSBT for. + PendingChanId []byte `protobuf:"bytes,2,opt,name=pending_chan_id,json=pendingChanId,proto3" json:"pending_chan_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FundingPsbtFinalize) Reset() { *m = FundingPsbtFinalize{} } +func (m *FundingPsbtFinalize) String() string { return proto.CompactTextString(m) } +func (*FundingPsbtFinalize) ProtoMessage() {} +func (*FundingPsbtFinalize) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{71} +} + +func (m *FundingPsbtFinalize) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FundingPsbtFinalize.Unmarshal(m, b) +} +func (m *FundingPsbtFinalize) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FundingPsbtFinalize.Marshal(b, m, deterministic) +} +func (m *FundingPsbtFinalize) XXX_Merge(src proto.Message) { + xxx_messageInfo_FundingPsbtFinalize.Merge(m, src) +} +func (m *FundingPsbtFinalize) XXX_Size() int { + return xxx_messageInfo_FundingPsbtFinalize.Size(m) +} +func (m *FundingPsbtFinalize) XXX_DiscardUnknown() { + xxx_messageInfo_FundingPsbtFinalize.DiscardUnknown(m) +} + +var xxx_messageInfo_FundingPsbtFinalize proto.InternalMessageInfo + +func (m *FundingPsbtFinalize) GetSignedPsbt() []byte { + if m != nil { + return m.SignedPsbt + } + return nil +} + +func (m *FundingPsbtFinalize) GetPendingChanId() []byte { + if m != nil { + return m.PendingChanId + } + return nil +} + +type FundingTransitionMsg struct { + // Types that are valid to be assigned to Trigger: + // *FundingTransitionMsg_ShimRegister + // *FundingTransitionMsg_ShimCancel + // *FundingTransitionMsg_PsbtVerify + // *FundingTransitionMsg_PsbtFinalize + Trigger isFundingTransitionMsg_Trigger `protobuf_oneof:"trigger"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FundingTransitionMsg) Reset() { *m = FundingTransitionMsg{} } +func (m *FundingTransitionMsg) String() string { return proto.CompactTextString(m) } +func (*FundingTransitionMsg) ProtoMessage() {} +func (*FundingTransitionMsg) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{72} +} + +func (m *FundingTransitionMsg) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FundingTransitionMsg.Unmarshal(m, b) +} +func (m *FundingTransitionMsg) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FundingTransitionMsg.Marshal(b, m, deterministic) +} +func (m *FundingTransitionMsg) XXX_Merge(src proto.Message) { + xxx_messageInfo_FundingTransitionMsg.Merge(m, src) +} +func (m *FundingTransitionMsg) XXX_Size() int { + return xxx_messageInfo_FundingTransitionMsg.Size(m) +} +func (m *FundingTransitionMsg) XXX_DiscardUnknown() { + xxx_messageInfo_FundingTransitionMsg.DiscardUnknown(m) +} + +var xxx_messageInfo_FundingTransitionMsg proto.InternalMessageInfo + +type isFundingTransitionMsg_Trigger interface { + isFundingTransitionMsg_Trigger() +} + +type FundingTransitionMsg_ShimRegister struct { + ShimRegister *FundingShim `protobuf:"bytes,1,opt,name=shim_register,json=shimRegister,proto3,oneof"` +} + +type FundingTransitionMsg_ShimCancel struct { + ShimCancel *FundingShimCancel `protobuf:"bytes,2,opt,name=shim_cancel,json=shimCancel,proto3,oneof"` +} + +type FundingTransitionMsg_PsbtVerify struct { + PsbtVerify *FundingPsbtVerify `protobuf:"bytes,3,opt,name=psbt_verify,json=psbtVerify,proto3,oneof"` +} + +type FundingTransitionMsg_PsbtFinalize struct { + PsbtFinalize *FundingPsbtFinalize `protobuf:"bytes,4,opt,name=psbt_finalize,json=psbtFinalize,proto3,oneof"` } +func (*FundingTransitionMsg_ShimRegister) isFundingTransitionMsg_Trigger() {} + +func (*FundingTransitionMsg_ShimCancel) isFundingTransitionMsg_Trigger() {} + +func (*FundingTransitionMsg_PsbtVerify) isFundingTransitionMsg_Trigger() {} + +func (*FundingTransitionMsg_PsbtFinalize) isFundingTransitionMsg_Trigger() {} + +func (m *FundingTransitionMsg) GetTrigger() isFundingTransitionMsg_Trigger { + if m != nil { + return m.Trigger + } + return nil +} + +func (m *FundingTransitionMsg) GetShimRegister() *FundingShim { + if x, ok := m.GetTrigger().(*FundingTransitionMsg_ShimRegister); ok { + return x.ShimRegister + } + return nil +} + +func (m *FundingTransitionMsg) GetShimCancel() *FundingShimCancel { + if x, ok := m.GetTrigger().(*FundingTransitionMsg_ShimCancel); ok { + return x.ShimCancel + } + return nil +} + +func (m *FundingTransitionMsg) GetPsbtVerify() *FundingPsbtVerify { + if x, ok := m.GetTrigger().(*FundingTransitionMsg_PsbtVerify); ok { + return x.PsbtVerify + } + return nil +} + +func (m *FundingTransitionMsg) GetPsbtFinalize() *FundingPsbtFinalize { + if x, ok := m.GetTrigger().(*FundingTransitionMsg_PsbtFinalize); ok { + return x.PsbtFinalize + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*FundingTransitionMsg) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*FundingTransitionMsg_ShimRegister)(nil), + (*FundingTransitionMsg_ShimCancel)(nil), + (*FundingTransitionMsg_PsbtVerify)(nil), + (*FundingTransitionMsg_PsbtFinalize)(nil), + } +} + +type FundingStateStepResp struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FundingStateStepResp) Reset() { *m = FundingStateStepResp{} } +func (m *FundingStateStepResp) String() string { return proto.CompactTextString(m) } +func (*FundingStateStepResp) ProtoMessage() {} +func (*FundingStateStepResp) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{73} +} + +func (m *FundingStateStepResp) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FundingStateStepResp.Unmarshal(m, b) +} +func (m *FundingStateStepResp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FundingStateStepResp.Marshal(b, m, deterministic) +} +func (m *FundingStateStepResp) XXX_Merge(src proto.Message) { + xxx_messageInfo_FundingStateStepResp.Merge(m, src) +} +func (m *FundingStateStepResp) XXX_Size() int { + return xxx_messageInfo_FundingStateStepResp.Size(m) +} +func (m *FundingStateStepResp) XXX_DiscardUnknown() { + xxx_messageInfo_FundingStateStepResp.DiscardUnknown(m) +} + +var xxx_messageInfo_FundingStateStepResp proto.InternalMessageInfo + type PendingHTLC struct { /// The direction within the channel that the htlc was sent Incoming bool `protobuf:"varint,1,opt,name=incoming,proto3" json:"incoming,omitempty"` @@ -4153,12 +5727,12 @@ type PendingHTLC struct { /// The final output to be swept back to the user's wallet Outpoint string `protobuf:"bytes,3,opt,name=outpoint,proto3" json:"outpoint,omitempty"` /// The next block height at which we can spend the current stage - MaturityHeight uint32 `protobuf:"varint,4,opt,name=maturity_height,proto3" json:"maturity_height,omitempty"` + MaturityHeight uint32 `protobuf:"varint,4,opt,name=maturity_height,json=maturityHeight,proto3" json:"maturity_height,omitempty"` //* //The number of blocks remaining until the current stage can be swept. //Negative values indicate how many blocks have passed since becoming //mature. - BlocksTilMaturity int32 `protobuf:"varint,5,opt,name=blocks_til_maturity,proto3" json:"blocks_til_maturity,omitempty"` + BlocksTilMaturity int32 `protobuf:"varint,5,opt,name=blocks_til_maturity,json=blocksTilMaturity,proto3" json:"blocks_til_maturity,omitempty"` /// Indicates whether the htlc is in its first or second stage of recovery Stage uint32 `protobuf:"varint,6,opt,name=stage,proto3" json:"stage,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -4170,7 +5744,7 @@ func (m *PendingHTLC) Reset() { *m = PendingHTLC{} } func (m *PendingHTLC) String() string { return proto.CompactTextString(m) } func (*PendingHTLC) ProtoMessage() {} func (*PendingHTLC) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{60} + return fileDescriptor_77a6da22d6a3feb1, []int{74} } func (m *PendingHTLC) XXX_Unmarshal(b []byte) error { @@ -4243,7 +5817,7 @@ func (m *PendingChannelsRequest) Reset() { *m = PendingChannelsRequest{} func (m *PendingChannelsRequest) String() string { return proto.CompactTextString(m) } func (*PendingChannelsRequest) ProtoMessage() {} func (*PendingChannelsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{61} + return fileDescriptor_77a6da22d6a3feb1, []int{75} } func (m *PendingChannelsRequest) XXX_Unmarshal(b []byte) error { @@ -4266,15 +5840,18 @@ var xxx_messageInfo_PendingChannelsRequest proto.InternalMessageInfo type PendingChannelsResponse struct { /// The balance in satoshis encumbered in pending channels - TotalLimboBalance int64 `protobuf:"varint,1,opt,name=total_limbo_balance,proto3" json:"total_limbo_balance,omitempty"` + TotalLimboBalance int64 `protobuf:"varint,1,opt,name=total_limbo_balance,json=totalLimboBalance,proto3" json:"total_limbo_balance,omitempty"` /// Channels pending opening - PendingOpenChannels []*PendingChannelsResponse_PendingOpenChannel `protobuf:"bytes,2,rep,name=pending_open_channels,proto3" json:"pending_open_channels,omitempty"` - /// Channels pending closing - PendingClosingChannels []*PendingChannelsResponse_ClosedChannel `protobuf:"bytes,3,rep,name=pending_closing_channels,proto3" json:"pending_closing_channels,omitempty"` + PendingOpenChannels []*PendingChannelsResponse_PendingOpenChannel `protobuf:"bytes,2,rep,name=pending_open_channels,json=pendingOpenChannels,proto3" json:"pending_open_channels,omitempty"` + // + //Deprecated: Channels pending closing previously contained cooperatively + //closed channels with a single confirmation. These channels are now + //considered closed from the time we see them on chain. + PendingClosingChannels []*PendingChannelsResponse_ClosedChannel `protobuf:"bytes,3,rep,name=pending_closing_channels,json=pendingClosingChannels,proto3" json:"pending_closing_channels,omitempty"` // Deprecated: Do not use. /// Channels pending force closing - PendingForceClosingChannels []*PendingChannelsResponse_ForceClosedChannel `protobuf:"bytes,4,rep,name=pending_force_closing_channels,proto3" json:"pending_force_closing_channels,omitempty"` + PendingForceClosingChannels []*PendingChannelsResponse_ForceClosedChannel `protobuf:"bytes,4,rep,name=pending_force_closing_channels,json=pendingForceClosingChannels,proto3" json:"pending_force_closing_channels,omitempty"` /// Channels waiting for closing tx to confirm - WaitingCloseChannels []*PendingChannelsResponse_WaitingCloseChannel `protobuf:"bytes,5,rep,name=waiting_close_channels,proto3" json:"waiting_close_channels,omitempty"` + WaitingCloseChannels []*PendingChannelsResponse_WaitingCloseChannel `protobuf:"bytes,5,rep,name=waiting_close_channels,json=waitingCloseChannels,proto3" json:"waiting_close_channels,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -4284,7 +5861,7 @@ func (m *PendingChannelsResponse) Reset() { *m = PendingChannelsResponse func (m *PendingChannelsResponse) String() string { return proto.CompactTextString(m) } func (*PendingChannelsResponse) ProtoMessage() {} func (*PendingChannelsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{62} + return fileDescriptor_77a6da22d6a3feb1, []int{76} } func (m *PendingChannelsResponse) XXX_Unmarshal(b []byte) error { @@ -4319,6 +5896,7 @@ func (m *PendingChannelsResponse) GetPendingOpenChannels() []*PendingChannelsRes return nil } +// Deprecated: Do not use. func (m *PendingChannelsResponse) GetPendingClosingChannels() []*PendingChannelsResponse_ClosedChannel { if m != nil { return m.PendingClosingChannels @@ -4341,20 +5919,25 @@ func (m *PendingChannelsResponse) GetWaitingCloseChannels() []*PendingChannelsRe } type PendingChannelsResponse_PendingChannel struct { - RemoteNodePub string `protobuf:"bytes,1,opt,name=remote_node_pub,proto3" json:"remote_node_pub,omitempty"` - ChannelPoint string `protobuf:"bytes,2,opt,name=channel_point,proto3" json:"channel_point,omitempty"` + RemoteNodePub string `protobuf:"bytes,1,opt,name=remote_node_pub,json=remoteNodePub,proto3" json:"remote_node_pub,omitempty"` + ChannelPoint string `protobuf:"bytes,2,opt,name=channel_point,json=channelPoint,proto3" json:"channel_point,omitempty"` Capacity int64 `protobuf:"varint,3,opt,name=capacity,proto3" json:"capacity,omitempty"` - LocalBalance int64 `protobuf:"varint,4,opt,name=local_balance,proto3" json:"local_balance,omitempty"` - RemoteBalance int64 `protobuf:"varint,5,opt,name=remote_balance,proto3" json:"remote_balance,omitempty"` - /// The minimum satoshis this node is required to reserve in its balance. - LocalChanReserveSat int64 `protobuf:"varint,6,opt,name=local_chan_reserve_sat,proto3" json:"local_chan_reserve_sat,omitempty"` + LocalBalance int64 `protobuf:"varint,4,opt,name=local_balance,json=localBalance,proto3" json:"local_balance,omitempty"` + RemoteBalance int64 `protobuf:"varint,5,opt,name=remote_balance,json=remoteBalance,proto3" json:"remote_balance,omitempty"` + /// The minimum satoshis this node is required to reserve in its + /// balance. + LocalChanReserveSat int64 `protobuf:"varint,6,opt,name=local_chan_reserve_sat,json=localChanReserveSat,proto3" json:"local_chan_reserve_sat,omitempty"` //* //The minimum satoshis the other node is required to reserve in its //balance. - RemoteChanReserveSat int64 `protobuf:"varint,7,opt,name=remote_chan_reserve_sat,proto3" json:"remote_chan_reserve_sat,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + RemoteChanReserveSat int64 `protobuf:"varint,7,opt,name=remote_chan_reserve_sat,json=remoteChanReserveSat,proto3" json:"remote_chan_reserve_sat,omitempty"` + // The party that initiated opening the channel. + Initiator Initiator `protobuf:"varint,8,opt,name=initiator,proto3,enum=lnrpc.Initiator" json:"initiator,omitempty"` + /// The commitment type used by this channel. + CommitmentType CommitmentType `protobuf:"varint,9,opt,name=commitment_type,json=commitmentType,proto3,enum=lnrpc.CommitmentType" json:"commitment_type,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *PendingChannelsResponse_PendingChannel) Reset() { @@ -4363,7 +5946,7 @@ func (m *PendingChannelsResponse_PendingChannel) Reset() { func (m *PendingChannelsResponse_PendingChannel) String() string { return proto.CompactTextString(m) } func (*PendingChannelsResponse_PendingChannel) ProtoMessage() {} func (*PendingChannelsResponse_PendingChannel) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{62, 0} + return fileDescriptor_77a6da22d6a3feb1, []int{76, 0} } func (m *PendingChannelsResponse_PendingChannel) XXX_Unmarshal(b []byte) error { @@ -4433,25 +6016,39 @@ func (m *PendingChannelsResponse_PendingChannel) GetRemoteChanReserveSat() int64 return 0 } +func (m *PendingChannelsResponse_PendingChannel) GetInitiator() Initiator { + if m != nil { + return m.Initiator + } + return Initiator_INITIATOR_UNKNOWN +} + +func (m *PendingChannelsResponse_PendingChannel) GetCommitmentType() CommitmentType { + if m != nil { + return m.CommitmentType + } + return CommitmentType_LEGACY +} + type PendingChannelsResponse_PendingOpenChannel struct { /// The pending channel Channel *PendingChannelsResponse_PendingChannel `protobuf:"bytes,1,opt,name=channel,proto3" json:"channel,omitempty"` /// The height at which this channel will be confirmed - ConfirmationHeight uint32 `protobuf:"varint,2,opt,name=confirmation_height,proto3" json:"confirmation_height,omitempty"` + ConfirmationHeight uint32 `protobuf:"varint,2,opt,name=confirmation_height,json=confirmationHeight,proto3" json:"confirmation_height,omitempty"` //* //The amount calculated to be paid in fees for the current set of //commitment transactions. The fee amount is persisted with the channel //in order to allow the fee amount to be removed and recalculated with //each channel state update, including updates that happen after a system //restart. - CommitFee int64 `protobuf:"varint,4,opt,name=commit_fee,proto3" json:"commit_fee,omitempty"` + CommitFee int64 `protobuf:"varint,4,opt,name=commit_fee,json=commitFee,proto3" json:"commit_fee,omitempty"` /// The weight of the commitment transaction - CommitWeight int64 `protobuf:"varint,5,opt,name=commit_weight,proto3" json:"commit_weight,omitempty"` + CommitWeight int64 `protobuf:"varint,5,opt,name=commit_weight,json=commitWeight,proto3" json:"commit_weight,omitempty"` //* //The required number of satoshis per kilo-weight that the requester will //pay at all times, for both the funding transaction and commitment //transaction. This value can later be updated once the channel is open. - FeePerKw int64 `protobuf:"varint,6,opt,name=fee_per_kw,proto3" json:"fee_per_kw,omitempty"` + FeePerKw int64 `protobuf:"varint,6,opt,name=fee_per_kw,json=feePerKw,proto3" json:"fee_per_kw,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -4465,7 +6062,7 @@ func (m *PendingChannelsResponse_PendingOpenChannel) String() string { } func (*PendingChannelsResponse_PendingOpenChannel) ProtoMessage() {} func (*PendingChannelsResponse_PendingOpenChannel) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{62, 1} + return fileDescriptor_77a6da22d6a3feb1, []int{76, 1} } func (m *PendingChannelsResponse_PendingOpenChannel) XXX_Unmarshal(b []byte) error { @@ -4525,10 +6122,14 @@ type PendingChannelsResponse_WaitingCloseChannel struct { /// The pending channel waiting for closing tx to confirm Channel *PendingChannelsResponse_PendingChannel `protobuf:"bytes,1,opt,name=channel,proto3" json:"channel,omitempty"` /// The balance in satoshis encumbered in this channel - LimboBalance int64 `protobuf:"varint,2,opt,name=limbo_balance,proto3" json:"limbo_balance,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + LimboBalance int64 `protobuf:"varint,2,opt,name=limbo_balance,json=limboBalance,proto3" json:"limbo_balance,omitempty"` + //* + //A list of valid commitment transactions. Any of these can confirm at + //this point. + Commitments *PendingChannelsResponse_Commitments `protobuf:"bytes,3,opt,name=commitments,proto3" json:"commitments,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *PendingChannelsResponse_WaitingCloseChannel) Reset() { @@ -4539,7 +6140,7 @@ func (m *PendingChannelsResponse_WaitingCloseChannel) String() string { } func (*PendingChannelsResponse_WaitingCloseChannel) ProtoMessage() {} func (*PendingChannelsResponse_WaitingCloseChannel) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{62, 2} + return fileDescriptor_77a6da22d6a3feb1, []int{76, 2} } func (m *PendingChannelsResponse_WaitingCloseChannel) XXX_Unmarshal(b []byte) error { @@ -4558,18 +6159,116 @@ func (m *PendingChannelsResponse_WaitingCloseChannel) XXX_DiscardUnknown() { xxx_messageInfo_PendingChannelsResponse_WaitingCloseChannel.DiscardUnknown(m) } -var xxx_messageInfo_PendingChannelsResponse_WaitingCloseChannel proto.InternalMessageInfo +var xxx_messageInfo_PendingChannelsResponse_WaitingCloseChannel proto.InternalMessageInfo + +func (m *PendingChannelsResponse_WaitingCloseChannel) GetChannel() *PendingChannelsResponse_PendingChannel { + if m != nil { + return m.Channel + } + return nil +} + +func (m *PendingChannelsResponse_WaitingCloseChannel) GetLimboBalance() int64 { + if m != nil { + return m.LimboBalance + } + return 0 +} + +func (m *PendingChannelsResponse_WaitingCloseChannel) GetCommitments() *PendingChannelsResponse_Commitments { + if m != nil { + return m.Commitments + } + return nil +} + +type PendingChannelsResponse_Commitments struct { + /// Hash of the local version of the commitment tx. + LocalTxid string `protobuf:"bytes,1,opt,name=local_txid,json=localTxid,proto3" json:"local_txid,omitempty"` + /// Hash of the remote version of the commitment tx. + RemoteTxid string `protobuf:"bytes,2,opt,name=remote_txid,json=remoteTxid,proto3" json:"remote_txid,omitempty"` + /// Hash of the remote pending version of the commitment tx. + RemotePendingTxid string `protobuf:"bytes,3,opt,name=remote_pending_txid,json=remotePendingTxid,proto3" json:"remote_pending_txid,omitempty"` + // + //The amount in satoshis calculated to be paid in fees for the local + //commitment. + LocalCommitFeeSat uint64 `protobuf:"varint,4,opt,name=local_commit_fee_sat,json=localCommitFeeSat,proto3" json:"local_commit_fee_sat,omitempty"` + // + //The amount in satoshis calculated to be paid in fees for the remote + //commitment. + RemoteCommitFeeSat uint64 `protobuf:"varint,5,opt,name=remote_commit_fee_sat,json=remoteCommitFeeSat,proto3" json:"remote_commit_fee_sat,omitempty"` + // + //The amount in satoshis calculated to be paid in fees for the remote + //pending commitment. + RemotePendingCommitFeeSat uint64 `protobuf:"varint,6,opt,name=remote_pending_commit_fee_sat,json=remotePendingCommitFeeSat,proto3" json:"remote_pending_commit_fee_sat,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PendingChannelsResponse_Commitments) Reset() { *m = PendingChannelsResponse_Commitments{} } +func (m *PendingChannelsResponse_Commitments) String() string { return proto.CompactTextString(m) } +func (*PendingChannelsResponse_Commitments) ProtoMessage() {} +func (*PendingChannelsResponse_Commitments) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{76, 3} +} + +func (m *PendingChannelsResponse_Commitments) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PendingChannelsResponse_Commitments.Unmarshal(m, b) +} +func (m *PendingChannelsResponse_Commitments) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PendingChannelsResponse_Commitments.Marshal(b, m, deterministic) +} +func (m *PendingChannelsResponse_Commitments) XXX_Merge(src proto.Message) { + xxx_messageInfo_PendingChannelsResponse_Commitments.Merge(m, src) +} +func (m *PendingChannelsResponse_Commitments) XXX_Size() int { + return xxx_messageInfo_PendingChannelsResponse_Commitments.Size(m) +} +func (m *PendingChannelsResponse_Commitments) XXX_DiscardUnknown() { + xxx_messageInfo_PendingChannelsResponse_Commitments.DiscardUnknown(m) +} + +var xxx_messageInfo_PendingChannelsResponse_Commitments proto.InternalMessageInfo + +func (m *PendingChannelsResponse_Commitments) GetLocalTxid() string { + if m != nil { + return m.LocalTxid + } + return "" +} + +func (m *PendingChannelsResponse_Commitments) GetRemoteTxid() string { + if m != nil { + return m.RemoteTxid + } + return "" +} + +func (m *PendingChannelsResponse_Commitments) GetRemotePendingTxid() string { + if m != nil { + return m.RemotePendingTxid + } + return "" +} + +func (m *PendingChannelsResponse_Commitments) GetLocalCommitFeeSat() uint64 { + if m != nil { + return m.LocalCommitFeeSat + } + return 0 +} -func (m *PendingChannelsResponse_WaitingCloseChannel) GetChannel() *PendingChannelsResponse_PendingChannel { +func (m *PendingChannelsResponse_Commitments) GetRemoteCommitFeeSat() uint64 { if m != nil { - return m.Channel + return m.RemoteCommitFeeSat } - return nil + return 0 } -func (m *PendingChannelsResponse_WaitingCloseChannel) GetLimboBalance() int64 { +func (m *PendingChannelsResponse_Commitments) GetRemotePendingCommitFeeSat() uint64 { if m != nil { - return m.LimboBalance + return m.RemotePendingCommitFeeSat } return 0 } @@ -4578,7 +6277,7 @@ type PendingChannelsResponse_ClosedChannel struct { /// The pending channel to be closed Channel *PendingChannelsResponse_PendingChannel `protobuf:"bytes,1,opt,name=channel,proto3" json:"channel,omitempty"` /// The transaction id of the closing transaction - ClosingTxid string `protobuf:"bytes,2,opt,name=closing_txid,proto3" json:"closing_txid,omitempty"` + ClosingTxid string `protobuf:"bytes,2,opt,name=closing_txid,json=closingTxid,proto3" json:"closing_txid,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -4588,7 +6287,7 @@ func (m *PendingChannelsResponse_ClosedChannel) Reset() { *m = PendingCh func (m *PendingChannelsResponse_ClosedChannel) String() string { return proto.CompactTextString(m) } func (*PendingChannelsResponse_ClosedChannel) ProtoMessage() {} func (*PendingChannelsResponse_ClosedChannel) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{62, 3} + return fileDescriptor_77a6da22d6a3feb1, []int{76, 4} } func (m *PendingChannelsResponse_ClosedChannel) XXX_Unmarshal(b []byte) error { @@ -4627,22 +6326,23 @@ type PendingChannelsResponse_ForceClosedChannel struct { /// The pending channel to be force closed Channel *PendingChannelsResponse_PendingChannel `protobuf:"bytes,1,opt,name=channel,proto3" json:"channel,omitempty"` /// The transaction id of the closing transaction - ClosingTxid string `protobuf:"bytes,2,opt,name=closing_txid,proto3" json:"closing_txid,omitempty"` + ClosingTxid string `protobuf:"bytes,2,opt,name=closing_txid,json=closingTxid,proto3" json:"closing_txid,omitempty"` /// The balance in satoshis encumbered in this pending channel - LimboBalance int64 `protobuf:"varint,3,opt,name=limbo_balance,proto3" json:"limbo_balance,omitempty"` + LimboBalance int64 `protobuf:"varint,3,opt,name=limbo_balance,json=limboBalance,proto3" json:"limbo_balance,omitempty"` /// The height at which funds can be swept into the wallet - MaturityHeight uint32 `protobuf:"varint,4,opt,name=maturity_height,proto3" json:"maturity_height,omitempty"` + MaturityHeight uint32 `protobuf:"varint,4,opt,name=maturity_height,json=maturityHeight,proto3" json:"maturity_height,omitempty"` // //Remaining # of blocks until the commitment output can be swept. //Negative values indicate how many blocks have passed since becoming //mature. - BlocksTilMaturity int32 `protobuf:"varint,5,opt,name=blocks_til_maturity,proto3" json:"blocks_til_maturity,omitempty"` + BlocksTilMaturity int32 `protobuf:"varint,5,opt,name=blocks_til_maturity,json=blocksTilMaturity,proto3" json:"blocks_til_maturity,omitempty"` /// The total value of funds successfully recovered from this channel - RecoveredBalance int64 `protobuf:"varint,6,opt,name=recovered_balance,proto3" json:"recovered_balance,omitempty"` - PendingHtlcs []*PendingHTLC `protobuf:"bytes,8,rep,name=pending_htlcs,proto3" json:"pending_htlcs,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + RecoveredBalance int64 `protobuf:"varint,6,opt,name=recovered_balance,json=recoveredBalance,proto3" json:"recovered_balance,omitempty"` + PendingHtlcs []*PendingHTLC `protobuf:"bytes,8,rep,name=pending_htlcs,json=pendingHtlcs,proto3" json:"pending_htlcs,omitempty"` + Anchor PendingChannelsResponse_ForceClosedChannel_AnchorState `protobuf:"varint,9,opt,name=anchor,proto3,enum=lnrpc.PendingChannelsResponse_ForceClosedChannel_AnchorState" json:"anchor,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *PendingChannelsResponse_ForceClosedChannel) Reset() { @@ -4653,7 +6353,7 @@ func (m *PendingChannelsResponse_ForceClosedChannel) String() string { } func (*PendingChannelsResponse_ForceClosedChannel) ProtoMessage() {} func (*PendingChannelsResponse_ForceClosedChannel) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{62, 4} + return fileDescriptor_77a6da22d6a3feb1, []int{76, 5} } func (m *PendingChannelsResponse_ForceClosedChannel) XXX_Unmarshal(b []byte) error { @@ -4723,6 +6423,13 @@ func (m *PendingChannelsResponse_ForceClosedChannel) GetPendingHtlcs() []*Pendin return nil } +func (m *PendingChannelsResponse_ForceClosedChannel) GetAnchor() PendingChannelsResponse_ForceClosedChannel_AnchorState { + if m != nil { + return m.Anchor + } + return PendingChannelsResponse_ForceClosedChannel_LIMBO +} + type ChannelEventSubscription struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -4733,7 +6440,7 @@ func (m *ChannelEventSubscription) Reset() { *m = ChannelEventSubscripti func (m *ChannelEventSubscription) String() string { return proto.CompactTextString(m) } func (*ChannelEventSubscription) ProtoMessage() {} func (*ChannelEventSubscription) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{63} + return fileDescriptor_77a6da22d6a3feb1, []int{77} } func (m *ChannelEventSubscription) XXX_Unmarshal(b []byte) error { @@ -4760,6 +6467,7 @@ type ChannelEventUpdate struct { // *ChannelEventUpdate_ClosedChannel // *ChannelEventUpdate_ActiveChannel // *ChannelEventUpdate_InactiveChannel + // *ChannelEventUpdate_PendingOpenChannel Channel isChannelEventUpdate_Channel `protobuf_oneof:"channel"` Type ChannelEventUpdate_UpdateType `protobuf:"varint,5,opt,name=type,proto3,enum=lnrpc.ChannelEventUpdate_UpdateType" json:"type,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -4771,7 +6479,7 @@ func (m *ChannelEventUpdate) Reset() { *m = ChannelEventUpdate{} } func (m *ChannelEventUpdate) String() string { return proto.CompactTextString(m) } func (*ChannelEventUpdate) ProtoMessage() {} func (*ChannelEventUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{64} + return fileDescriptor_77a6da22d6a3feb1, []int{78} } func (m *ChannelEventUpdate) XXX_Unmarshal(b []byte) error { @@ -4797,19 +6505,23 @@ type isChannelEventUpdate_Channel interface { } type ChannelEventUpdate_OpenChannel struct { - OpenChannel *Channel `protobuf:"bytes,1,opt,name=open_channel,proto3,oneof"` + OpenChannel *Channel `protobuf:"bytes,1,opt,name=open_channel,json=openChannel,proto3,oneof"` } type ChannelEventUpdate_ClosedChannel struct { - ClosedChannel *ChannelCloseSummary `protobuf:"bytes,2,opt,name=closed_channel,proto3,oneof"` + ClosedChannel *ChannelCloseSummary `protobuf:"bytes,2,opt,name=closed_channel,json=closedChannel,proto3,oneof"` } type ChannelEventUpdate_ActiveChannel struct { - ActiveChannel *ChannelPoint `protobuf:"bytes,3,opt,name=active_channel,proto3,oneof"` + ActiveChannel *ChannelPoint `protobuf:"bytes,3,opt,name=active_channel,json=activeChannel,proto3,oneof"` } type ChannelEventUpdate_InactiveChannel struct { - InactiveChannel *ChannelPoint `protobuf:"bytes,4,opt,name=inactive_channel,proto3,oneof"` + InactiveChannel *ChannelPoint `protobuf:"bytes,4,opt,name=inactive_channel,json=inactiveChannel,proto3,oneof"` +} + +type ChannelEventUpdate_PendingOpenChannel struct { + PendingOpenChannel *PendingUpdate `protobuf:"bytes,6,opt,name=pending_open_channel,json=pendingOpenChannel,proto3,oneof"` } func (*ChannelEventUpdate_OpenChannel) isChannelEventUpdate_Channel() {} @@ -4820,6 +6532,8 @@ func (*ChannelEventUpdate_ActiveChannel) isChannelEventUpdate_Channel() {} func (*ChannelEventUpdate_InactiveChannel) isChannelEventUpdate_Channel() {} +func (*ChannelEventUpdate_PendingOpenChannel) isChannelEventUpdate_Channel() {} + func (m *ChannelEventUpdate) GetChannel() isChannelEventUpdate_Channel { if m != nil { return m.Channel @@ -4855,6 +6569,13 @@ func (m *ChannelEventUpdate) GetInactiveChannel() *ChannelPoint { return nil } +func (m *ChannelEventUpdate) GetPendingOpenChannel() *PendingUpdate { + if x, ok := m.GetChannel().(*ChannelEventUpdate_PendingOpenChannel); ok { + return x.PendingOpenChannel + } + return nil +} + func (m *ChannelEventUpdate) GetType() ChannelEventUpdate_UpdateType { if m != nil { return m.Type @@ -4869,6 +6590,7 @@ func (*ChannelEventUpdate) XXX_OneofWrappers() []interface{} { (*ChannelEventUpdate_ClosedChannel)(nil), (*ChannelEventUpdate_ActiveChannel)(nil), (*ChannelEventUpdate_InactiveChannel)(nil), + (*ChannelEventUpdate_PendingOpenChannel)(nil), } } @@ -4882,7 +6604,7 @@ func (m *WalletBalanceRequest) Reset() { *m = WalletBalanceRequest{} } func (m *WalletBalanceRequest) String() string { return proto.CompactTextString(m) } func (*WalletBalanceRequest) ProtoMessage() {} func (*WalletBalanceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{65} + return fileDescriptor_77a6da22d6a3feb1, []int{79} } func (m *WalletBalanceRequest) XXX_Unmarshal(b []byte) error { @@ -4905,11 +6627,11 @@ var xxx_messageInfo_WalletBalanceRequest proto.InternalMessageInfo type WalletBalanceResponse struct { /// The balance of the wallet - TotalBalance int64 `protobuf:"varint,1,opt,name=total_balance,proto3" json:"total_balance,omitempty"` + TotalBalance int64 `protobuf:"varint,1,opt,name=total_balance,json=totalBalance,proto3" json:"total_balance,omitempty"` /// The confirmed balance of a wallet(with >= 1 confirmations) - ConfirmedBalance int64 `protobuf:"varint,2,opt,name=confirmed_balance,proto3" json:"confirmed_balance,omitempty"` + ConfirmedBalance int64 `protobuf:"varint,2,opt,name=confirmed_balance,json=confirmedBalance,proto3" json:"confirmed_balance,omitempty"` /// The unconfirmed balance of a wallet(with 0 confirmations) - UnconfirmedBalance int64 `protobuf:"varint,3,opt,name=unconfirmed_balance,proto3" json:"unconfirmed_balance,omitempty"` + UnconfirmedBalance int64 `protobuf:"varint,3,opt,name=unconfirmed_balance,json=unconfirmedBalance,proto3" json:"unconfirmed_balance,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -4919,7 +6641,7 @@ func (m *WalletBalanceResponse) Reset() { *m = WalletBalanceResponse{} } func (m *WalletBalanceResponse) String() string { return proto.CompactTextString(m) } func (*WalletBalanceResponse) ProtoMessage() {} func (*WalletBalanceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{66} + return fileDescriptor_77a6da22d6a3feb1, []int{80} } func (m *WalletBalanceResponse) XXX_Unmarshal(b []byte) error { @@ -4971,7 +6693,7 @@ func (m *ChannelBalanceRequest) Reset() { *m = ChannelBalanceRequest{} } func (m *ChannelBalanceRequest) String() string { return proto.CompactTextString(m) } func (*ChannelBalanceRequest) ProtoMessage() {} func (*ChannelBalanceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{67} + return fileDescriptor_77a6da22d6a3feb1, []int{81} } func (m *ChannelBalanceRequest) XXX_Unmarshal(b []byte) error { @@ -4996,7 +6718,7 @@ type ChannelBalanceResponse struct { /// Sum of channels balances denominated in satoshis Balance int64 `protobuf:"varint,1,opt,name=balance,proto3" json:"balance,omitempty"` /// Sum of channels pending balances denominated in satoshis - PendingOpenBalance int64 `protobuf:"varint,2,opt,name=pending_open_balance,proto3" json:"pending_open_balance,omitempty"` + PendingOpenBalance int64 `protobuf:"varint,2,opt,name=pending_open_balance,json=pendingOpenBalance,proto3" json:"pending_open_balance,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -5006,7 +6728,7 @@ func (m *ChannelBalanceResponse) Reset() { *m = ChannelBalanceResponse{} func (m *ChannelBalanceResponse) String() string { return proto.CompactTextString(m) } func (*ChannelBalanceResponse) ProtoMessage() {} func (*ChannelBalanceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{68} + return fileDescriptor_77a6da22d6a3feb1, []int{82} } func (m *ChannelBalanceResponse) XXX_Unmarshal(b []byte) error { @@ -5044,9 +6766,22 @@ func (m *ChannelBalanceResponse) GetPendingOpenBalance() int64 { type QueryRoutesRequest struct { /// The 33-byte hex-encoded public key for the payment destination PubKey string `protobuf:"bytes,1,opt,name=pub_key,json=pubKey,proto3" json:"pub_key,omitempty"` - /// The amount to send expressed in satoshis + //* + //The amount to send expressed in satoshis. + // + //The fields amt and amt_msat are mutually exclusive. Amt int64 `protobuf:"varint,2,opt,name=amt,proto3" json:"amt,omitempty"` - /// An optional CLTV delta from the current height that should be used for the timelock of the final hop + //* + //The amount to send expressed in millisatoshis. + // + //The fields amt and amt_msat are mutually exclusive. + AmtMsat int64 `protobuf:"varint,12,opt,name=amt_msat,json=amtMsat,proto3" json:"amt_msat,omitempty"` + //* + //An optional CLTV delta from the current height that should be used for the + //timelock of the final hop. Note that unlike SendPayment, QueryRoutes does + //not add any additional block padding on top of final_ctlv_delta. This + //padding of a few blocks needs to be added manually or otherwise failures may + //happen when a block comes in while the payment is in flight. FinalCltvDelta int32 `protobuf:"varint,4,opt,name=final_cltv_delta,json=finalCltvDelta,proto3" json:"final_cltv_delta,omitempty"` //* //The maximum number of satoshis that will be paid as a fee of the payment. @@ -5055,7 +6790,8 @@ type QueryRoutesRequest struct { //send the payment. FeeLimit *FeeLimit `protobuf:"bytes,5,opt,name=fee_limit,json=feeLimit,proto3" json:"fee_limit,omitempty"` //* - //A list of nodes to ignore during path finding. + //A list of nodes to ignore during path finding. When using REST, these fields + //must be encoded as base64. IgnoredNodes [][]byte `protobuf:"bytes,6,rep,name=ignored_nodes,json=ignoredNodes,proto3" json:"ignored_nodes,omitempty"` //* //Deprecated. A list of edges to ignore during path finding. @@ -5075,17 +6811,42 @@ type QueryRoutesRequest struct { //An optional maximum total time lock for the route. If the source is empty or //ourselves, this should not exceed lnd's `--max-cltv-expiry` setting. If //zero, then the value of `--max-cltv-expiry` is used as the limit. - CltvLimit uint32 `protobuf:"varint,11,opt,name=cltv_limit,json=cltvLimit,proto3" json:"cltv_limit,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + CltvLimit uint32 `protobuf:"varint,11,opt,name=cltv_limit,json=cltvLimit,proto3" json:"cltv_limit,omitempty"` + //* + //An optional field that can be used to pass an arbitrary set of TLV records + //to a peer which understands the new records. This can be used to pass + //application specific data during the payment attempt. If the destination + //does not support the specified recrods, and error will be returned. + //Record types are required to be in the custom range >= 65536. When using + //REST, the values must be encoded as base64. + DestCustomRecords map[uint64][]byte `protobuf:"bytes,13,rep,name=dest_custom_records,json=destCustomRecords,proto3" json:"dest_custom_records,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + //* + //The channel id of the channel that must be taken to the first hop. If zero, + //any channel may be used. + OutgoingChanId uint64 `protobuf:"varint,14,opt,name=outgoing_chan_id,json=outgoingChanId,proto3" json:"outgoing_chan_id,omitempty"` + //* + //The pubkey of the last hop of the route. If empty, any hop may be used. + LastHopPubkey []byte `protobuf:"bytes,15,opt,name=last_hop_pubkey,json=lastHopPubkey,proto3" json:"last_hop_pubkey,omitempty"` + //* + //Optional route hints to reach the destination through private channels. + RouteHints []*RouteHint `protobuf:"bytes,16,rep,name=route_hints,json=routeHints,proto3" json:"route_hints,omitempty"` + //* + //Features assumed to be supported by the final node. All transitive feature + //dependencies must also be set properly. For a given feature bit pair, either + //optional or remote may be set, but not both. If this field is nil or empty, + //the router will try to load destination features from the graph as a + //fallback. + DestFeatures []FeatureBit `protobuf:"varint,17,rep,packed,name=dest_features,json=destFeatures,proto3,enum=lnrpc.FeatureBit" json:"dest_features,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *QueryRoutesRequest) Reset() { *m = QueryRoutesRequest{} } func (m *QueryRoutesRequest) String() string { return proto.CompactTextString(m) } func (*QueryRoutesRequest) ProtoMessage() {} func (*QueryRoutesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{69} + return fileDescriptor_77a6da22d6a3feb1, []int{83} } func (m *QueryRoutesRequest) XXX_Unmarshal(b []byte) error { @@ -5120,6 +6881,13 @@ func (m *QueryRoutesRequest) GetAmt() int64 { return 0 } +func (m *QueryRoutesRequest) GetAmtMsat() int64 { + if m != nil { + return m.AmtMsat + } + return 0 +} + func (m *QueryRoutesRequest) GetFinalCltvDelta() int32 { if m != nil { return m.FinalCltvDelta @@ -5177,10 +6945,49 @@ func (m *QueryRoutesRequest) GetCltvLimit() uint32 { return 0 } +func (m *QueryRoutesRequest) GetDestCustomRecords() map[uint64][]byte { + if m != nil { + return m.DestCustomRecords + } + return nil +} + +func (m *QueryRoutesRequest) GetOutgoingChanId() uint64 { + if m != nil { + return m.OutgoingChanId + } + return 0 +} + +func (m *QueryRoutesRequest) GetLastHopPubkey() []byte { + if m != nil { + return m.LastHopPubkey + } + return nil +} + +func (m *QueryRoutesRequest) GetRouteHints() []*RouteHint { + if m != nil { + return m.RouteHints + } + return nil +} + +func (m *QueryRoutesRequest) GetDestFeatures() []FeatureBit { + if m != nil { + return m.DestFeatures + } + return nil +} + type NodePair struct { - /// The sending node of the pair. + //* + //The sending node of the pair. When using REST, this field must be encoded as + //base64. From []byte `protobuf:"bytes,1,opt,name=from,proto3" json:"from,omitempty"` - /// The receiving node of the pair. + //* + //The receiving node of the pair. When using REST, this field must be encoded + //as base64. To []byte `protobuf:"bytes,2,opt,name=to,proto3" json:"to,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -5191,7 +6998,7 @@ func (m *NodePair) Reset() { *m = NodePair{} } func (m *NodePair) String() string { return proto.CompactTextString(m) } func (*NodePair) ProtoMessage() {} func (*NodePair) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{70} + return fileDescriptor_77a6da22d6a3feb1, []int{84} } func (m *NodePair) XXX_Unmarshal(b []byte) error { @@ -5244,7 +7051,7 @@ func (m *EdgeLocator) Reset() { *m = EdgeLocator{} } func (m *EdgeLocator) String() string { return proto.CompactTextString(m) } func (*EdgeLocator) ProtoMessage() {} func (*EdgeLocator) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{71} + return fileDescriptor_77a6da22d6a3feb1, []int{85} } func (m *EdgeLocator) XXX_Unmarshal(b []byte) error { @@ -5287,7 +7094,7 @@ type QueryRoutesResponse struct { //* //The success probability of the returned route based on the current mission //control state. [EXPERIMENTAL] - SuccessProb float64 `protobuf:"fixed64,2,opt,name=success_prob,proto3" json:"success_prob,omitempty"` + SuccessProb float64 `protobuf:"fixed64,2,opt,name=success_prob,json=successProb,proto3" json:"success_prob,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -5297,7 +7104,7 @@ func (m *QueryRoutesResponse) Reset() { *m = QueryRoutesResponse{} } func (m *QueryRoutesResponse) String() string { return proto.CompactTextString(m) } func (*QueryRoutesResponse) ProtoMessage() {} func (*QueryRoutesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{72} + return fileDescriptor_77a6da22d6a3feb1, []int{86} } func (m *QueryRoutesResponse) XXX_Unmarshal(b []byte) error { @@ -5337,31 +7144,43 @@ type Hop struct { //The unique channel ID for the channel. The first 3 bytes are the block //height, the next 3 the index within the block, and the last 2 bytes are the //output index for the channel. - ChanId uint64 `protobuf:"varint,1,opt,name=chan_id,proto3" json:"chan_id,omitempty"` - ChanCapacity int64 `protobuf:"varint,2,opt,name=chan_capacity,proto3" json:"chan_capacity,omitempty"` - AmtToForward int64 `protobuf:"varint,3,opt,name=amt_to_forward,proto3" json:"amt_to_forward,omitempty"` // Deprecated: Do not use. - Fee int64 `protobuf:"varint,4,opt,name=fee,proto3" json:"fee,omitempty"` // Deprecated: Do not use. + ChanId uint64 `protobuf:"varint,1,opt,name=chan_id,json=chanId,proto3" json:"chan_id,omitempty"` + ChanCapacity int64 `protobuf:"varint,2,opt,name=chan_capacity,json=chanCapacity,proto3" json:"chan_capacity,omitempty"` + AmtToForward int64 `protobuf:"varint,3,opt,name=amt_to_forward,json=amtToForward,proto3" json:"amt_to_forward,omitempty"` // Deprecated: Do not use. + Fee int64 `protobuf:"varint,4,opt,name=fee,proto3" json:"fee,omitempty"` // Deprecated: Do not use. Expiry uint32 `protobuf:"varint,5,opt,name=expiry,proto3" json:"expiry,omitempty"` - AmtToForwardMsat int64 `protobuf:"varint,6,opt,name=amt_to_forward_msat,proto3" json:"amt_to_forward_msat,omitempty"` - FeeMsat int64 `protobuf:"varint,7,opt,name=fee_msat,proto3" json:"fee_msat,omitempty"` + AmtToForwardMsat int64 `protobuf:"varint,6,opt,name=amt_to_forward_msat,json=amtToForwardMsat,proto3" json:"amt_to_forward_msat,omitempty"` + FeeMsat int64 `protobuf:"varint,7,opt,name=fee_msat,json=feeMsat,proto3" json:"fee_msat,omitempty"` //* //An optional public key of the hop. If the public key is given, the payment //can be executed without relying on a copy of the channel graph. - PubKey string `protobuf:"bytes,8,opt,name=pub_key,proto3" json:"pub_key,omitempty"` + PubKey string `protobuf:"bytes,8,opt,name=pub_key,json=pubKey,proto3" json:"pub_key,omitempty"` //* //If set to true, then this hop will be encoded using the new variable length - //TLV format. - TlvPayload bool `protobuf:"varint,9,opt,name=tlv_payload,proto3" json:"tlv_payload,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + //TLV format. Note that if any custom tlv_records below are specified, then + //this field MUST be set to true for them to be encoded properly. + TlvPayload bool `protobuf:"varint,9,opt,name=tlv_payload,json=tlvPayload,proto3" json:"tlv_payload,omitempty"` + //* + //An optional TLV record that signals the use of an MPP payment. If present, + //the receiver will enforce that that the same mpp_record is included in the + //final hop payload of all non-zero payments in the HTLC set. If empty, a + //regular single-shot payment is or was attempted. + MppRecord *MPPRecord `protobuf:"bytes,10,opt,name=mpp_record,json=mppRecord,proto3" json:"mpp_record,omitempty"` + //* + //An optional set of key-value TLV records. This is useful within the context + //of the SendToRoute call as it allows callers to specify arbitrary K-V pairs + //to drop off at each hop within the onion. + CustomRecords map[uint64][]byte `protobuf:"bytes,11,rep,name=custom_records,json=customRecords,proto3" json:"custom_records,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Hop) Reset() { *m = Hop{} } func (m *Hop) String() string { return proto.CompactTextString(m) } func (*Hop) ProtoMessage() {} func (*Hop) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{73} + return fileDescriptor_77a6da22d6a3feb1, []int{87} } func (m *Hop) XXX_Unmarshal(b []byte) error { @@ -5447,6 +7266,77 @@ func (m *Hop) GetTlvPayload() bool { return false } +func (m *Hop) GetMppRecord() *MPPRecord { + if m != nil { + return m.MppRecord + } + return nil +} + +func (m *Hop) GetCustomRecords() map[uint64][]byte { + if m != nil { + return m.CustomRecords + } + return nil +} + +type MPPRecord struct { + //* + //A unique, random identifier used to authenticate the sender as the intended + //payer of a multi-path payment. The payment_addr must be the same for all + //subpayments, and match the payment_addr provided in the receiver's invoice. + //The same payment_addr must be used on all subpayments. + PaymentAddr []byte `protobuf:"bytes,11,opt,name=payment_addr,json=paymentAddr,proto3" json:"payment_addr,omitempty"` + //* + //The total amount in milli-satoshis being sent as part of a larger multi-path + //payment. The caller is responsible for ensuring subpayments to the same node + //and payment_hash sum exactly to total_amt_msat. The same + //total_amt_msat must be used on all subpayments. + TotalAmtMsat int64 `protobuf:"varint,10,opt,name=total_amt_msat,json=totalAmtMsat,proto3" json:"total_amt_msat,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MPPRecord) Reset() { *m = MPPRecord{} } +func (m *MPPRecord) String() string { return proto.CompactTextString(m) } +func (*MPPRecord) ProtoMessage() {} +func (*MPPRecord) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{88} +} + +func (m *MPPRecord) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MPPRecord.Unmarshal(m, b) +} +func (m *MPPRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MPPRecord.Marshal(b, m, deterministic) +} +func (m *MPPRecord) XXX_Merge(src proto.Message) { + xxx_messageInfo_MPPRecord.Merge(m, src) +} +func (m *MPPRecord) XXX_Size() int { + return xxx_messageInfo_MPPRecord.Size(m) +} +func (m *MPPRecord) XXX_DiscardUnknown() { + xxx_messageInfo_MPPRecord.DiscardUnknown(m) +} + +var xxx_messageInfo_MPPRecord proto.InternalMessageInfo + +func (m *MPPRecord) GetPaymentAddr() []byte { + if m != nil { + return m.PaymentAddr + } + return nil +} + +func (m *MPPRecord) GetTotalAmtMsat() int64 { + if m != nil { + return m.TotalAmtMsat + } + return 0 +} + //* //A path through the channel graph which runs over one or more channels in //succession. This struct carries all the information required to craft the @@ -5455,32 +7345,32 @@ func (m *Hop) GetTlvPayload() bool { //carry the initial payment amount after fees are accounted for. type Route struct { //* - //The cumulative (final) time lock across the entire route. This is the CLTV + //The cumulative (final) time lock across the entire route. This is the CLTV //value that should be extended to the first hop in the route. All other hops //will decrement the time-lock as advertised, leaving enough time for all //hops to wait for or present the payment preimage to complete the payment. - TotalTimeLock uint32 `protobuf:"varint,1,opt,name=total_time_lock,proto3" json:"total_time_lock,omitempty"` + TotalTimeLock uint32 `protobuf:"varint,1,opt,name=total_time_lock,json=totalTimeLock,proto3" json:"total_time_lock,omitempty"` //* - //The sum of the fees paid at each hop within the final route. In the case + //The sum of the fees paid at each hop within the final route. In the case //of a one-hop payment, this value will be zero as we don't need to pay a fee //to ourselves. - TotalFees int64 `protobuf:"varint,2,opt,name=total_fees,proto3" json:"total_fees,omitempty"` // Deprecated: Do not use. + TotalFees int64 `protobuf:"varint,2,opt,name=total_fees,json=totalFees,proto3" json:"total_fees,omitempty"` // Deprecated: Do not use. //* //The total amount of funds required to complete a payment over this route. //This value includes the cumulative fees at each hop. As a result, the HTLC //extended to the first-hop in the route will need to have at least this many //satoshis, otherwise the route will fail at an intermediate node due to an //insufficient amount of fees. - TotalAmt int64 `protobuf:"varint,3,opt,name=total_amt,proto3" json:"total_amt,omitempty"` // Deprecated: Do not use. + TotalAmt int64 `protobuf:"varint,3,opt,name=total_amt,json=totalAmt,proto3" json:"total_amt,omitempty"` // Deprecated: Do not use. //* //Contains details concerning the specific forwarding details at each hop. Hops []*Hop `protobuf:"bytes,4,rep,name=hops,proto3" json:"hops,omitempty"` //* //The total fees in millisatoshis. - TotalFeesMsat int64 `protobuf:"varint,5,opt,name=total_fees_msat,proto3" json:"total_fees_msat,omitempty"` + TotalFeesMsat int64 `protobuf:"varint,5,opt,name=total_fees_msat,json=totalFeesMsat,proto3" json:"total_fees_msat,omitempty"` //* //The total amount in millisatoshis. - TotalAmtMsat int64 `protobuf:"varint,6,opt,name=total_amt_msat,proto3" json:"total_amt_msat,omitempty"` + TotalAmtMsat int64 `protobuf:"varint,6,opt,name=total_amt_msat,json=totalAmtMsat,proto3" json:"total_amt_msat,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -5490,7 +7380,7 @@ func (m *Route) Reset() { *m = Route{} } func (m *Route) String() string { return proto.CompactTextString(m) } func (*Route) ProtoMessage() {} func (*Route) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{74} + return fileDescriptor_77a6da22d6a3feb1, []int{89} } func (m *Route) XXX_Unmarshal(b []byte) error { @@ -5569,7 +7459,7 @@ func (m *NodeInfoRequest) Reset() { *m = NodeInfoRequest{} } func (m *NodeInfoRequest) String() string { return proto.CompactTextString(m) } func (*NodeInfoRequest) ProtoMessage() {} func (*NodeInfoRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{75} + return fileDescriptor_77a6da22d6a3feb1, []int{90} } func (m *NodeInfoRequest) XXX_Unmarshal(b []byte) error { @@ -5612,9 +7502,9 @@ type NodeInfo struct { //it for each outgoing edge. Node *LightningNode `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` /// The total number of channels for the node. - NumChannels uint32 `protobuf:"varint,2,opt,name=num_channels,proto3" json:"num_channels,omitempty"` + NumChannels uint32 `protobuf:"varint,2,opt,name=num_channels,json=numChannels,proto3" json:"num_channels,omitempty"` /// The sum of all channels capacity for the node, denominated in satoshis. - TotalCapacity int64 `protobuf:"varint,3,opt,name=total_capacity,proto3" json:"total_capacity,omitempty"` + TotalCapacity int64 `protobuf:"varint,3,opt,name=total_capacity,json=totalCapacity,proto3" json:"total_capacity,omitempty"` /// A list of all public channels for the node. Channels []*ChannelEdge `protobuf:"bytes,4,rep,name=channels,proto3" json:"channels,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -5626,7 +7516,7 @@ func (m *NodeInfo) Reset() { *m = NodeInfo{} } func (m *NodeInfo) String() string { return proto.CompactTextString(m) } func (*NodeInfo) ProtoMessage() {} func (*NodeInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{76} + return fileDescriptor_77a6da22d6a3feb1, []int{91} } func (m *NodeInfo) XXX_Unmarshal(b []byte) error { @@ -5681,21 +7571,22 @@ func (m *NodeInfo) GetChannels() []*ChannelEdge { //graph is directed, a node will also have an incoming edge attached to it for //each outgoing edge. type LightningNode struct { - LastUpdate uint32 `protobuf:"varint,1,opt,name=last_update,proto3" json:"last_update,omitempty"` - PubKey string `protobuf:"bytes,2,opt,name=pub_key,proto3" json:"pub_key,omitempty"` - Alias string `protobuf:"bytes,3,opt,name=alias,proto3" json:"alias,omitempty"` - Addresses []*NodeAddress `protobuf:"bytes,4,rep,name=addresses,proto3" json:"addresses,omitempty"` - Color string `protobuf:"bytes,5,opt,name=color,proto3" json:"color,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + LastUpdate uint32 `protobuf:"varint,1,opt,name=last_update,json=lastUpdate,proto3" json:"last_update,omitempty"` + PubKey string `protobuf:"bytes,2,opt,name=pub_key,json=pubKey,proto3" json:"pub_key,omitempty"` + Alias string `protobuf:"bytes,3,opt,name=alias,proto3" json:"alias,omitempty"` + Addresses []*NodeAddress `protobuf:"bytes,4,rep,name=addresses,proto3" json:"addresses,omitempty"` + Color string `protobuf:"bytes,5,opt,name=color,proto3" json:"color,omitempty"` + Features map[uint32]*Feature `protobuf:"bytes,6,rep,name=features,proto3" json:"features,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *LightningNode) Reset() { *m = LightningNode{} } func (m *LightningNode) String() string { return proto.CompactTextString(m) } func (*LightningNode) ProtoMessage() {} func (*LightningNode) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{77} + return fileDescriptor_77a6da22d6a3feb1, []int{92} } func (m *LightningNode) XXX_Unmarshal(b []byte) error { @@ -5751,6 +7642,13 @@ func (m *LightningNode) GetColor() string { return "" } +func (m *LightningNode) GetFeatures() map[uint32]*Feature { + if m != nil { + return m.Features + } + return nil +} + type NodeAddress struct { Network string `protobuf:"bytes,1,opt,name=network,proto3" json:"network,omitempty"` Addr string `protobuf:"bytes,2,opt,name=addr,proto3" json:"addr,omitempty"` @@ -5763,7 +7661,7 @@ func (m *NodeAddress) Reset() { *m = NodeAddress{} } func (m *NodeAddress) String() string { return proto.CompactTextString(m) } func (*NodeAddress) ProtoMessage() {} func (*NodeAddress) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{78} + return fileDescriptor_77a6da22d6a3feb1, []int{93} } func (m *NodeAddress) XXX_Unmarshal(b []byte) error { @@ -5799,13 +7697,13 @@ func (m *NodeAddress) GetAddr() string { } type RoutingPolicy struct { - TimeLockDelta uint32 `protobuf:"varint,1,opt,name=time_lock_delta,proto3" json:"time_lock_delta,omitempty"` - MinHtlc int64 `protobuf:"varint,2,opt,name=min_htlc,proto3" json:"min_htlc,omitempty"` - FeeBaseMsat int64 `protobuf:"varint,3,opt,name=fee_base_msat,proto3" json:"fee_base_msat,omitempty"` - FeeRateMilliMsat int64 `protobuf:"varint,4,opt,name=fee_rate_milli_msat,proto3" json:"fee_rate_milli_msat,omitempty"` + TimeLockDelta uint32 `protobuf:"varint,1,opt,name=time_lock_delta,json=timeLockDelta,proto3" json:"time_lock_delta,omitempty"` + MinHtlc int64 `protobuf:"varint,2,opt,name=min_htlc,json=minHtlc,proto3" json:"min_htlc,omitempty"` + FeeBaseMsat int64 `protobuf:"varint,3,opt,name=fee_base_msat,json=feeBaseMsat,proto3" json:"fee_base_msat,omitempty"` + FeeRateMilliMsat int64 `protobuf:"varint,4,opt,name=fee_rate_milli_msat,json=feeRateMilliMsat,proto3" json:"fee_rate_milli_msat,omitempty"` Disabled bool `protobuf:"varint,5,opt,name=disabled,proto3" json:"disabled,omitempty"` - MaxHtlcMsat uint64 `protobuf:"varint,6,opt,name=max_htlc_msat,proto3" json:"max_htlc_msat,omitempty"` - LastUpdate uint32 `protobuf:"varint,7,opt,name=last_update,proto3" json:"last_update,omitempty"` + MaxHtlcMsat uint64 `protobuf:"varint,6,opt,name=max_htlc_msat,json=maxHtlcMsat,proto3" json:"max_htlc_msat,omitempty"` + LastUpdate uint32 `protobuf:"varint,7,opt,name=last_update,json=lastUpdate,proto3" json:"last_update,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -5815,7 +7713,7 @@ func (m *RoutingPolicy) Reset() { *m = RoutingPolicy{} } func (m *RoutingPolicy) String() string { return proto.CompactTextString(m) } func (*RoutingPolicy) ProtoMessage() {} func (*RoutingPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{79} + return fileDescriptor_77a6da22d6a3feb1, []int{94} } func (m *RoutingPolicy) XXX_Unmarshal(b []byte) error { @@ -5896,14 +7794,14 @@ type ChannelEdge struct { //The unique channel ID for the channel. The first 3 bytes are the block //height, the next 3 the index within the block, and the last 2 bytes are the //output index for the channel. - ChannelId uint64 `protobuf:"varint,1,opt,name=channel_id,proto3" json:"channel_id,omitempty"` - ChanPoint string `protobuf:"bytes,2,opt,name=chan_point,proto3" json:"chan_point,omitempty"` - LastUpdate uint32 `protobuf:"varint,3,opt,name=last_update,proto3" json:"last_update,omitempty"` // Deprecated: Do not use. - Node1Pub string `protobuf:"bytes,4,opt,name=node1_pub,proto3" json:"node1_pub,omitempty"` - Node2Pub string `protobuf:"bytes,5,opt,name=node2_pub,proto3" json:"node2_pub,omitempty"` + ChannelId uint64 `protobuf:"varint,1,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"` + ChanPoint string `protobuf:"bytes,2,opt,name=chan_point,json=chanPoint,proto3" json:"chan_point,omitempty"` + LastUpdate uint32 `protobuf:"varint,3,opt,name=last_update,json=lastUpdate,proto3" json:"last_update,omitempty"` // Deprecated: Do not use. + Node1Pub string `protobuf:"bytes,4,opt,name=node1_pub,json=node1Pub,proto3" json:"node1_pub,omitempty"` + Node2Pub string `protobuf:"bytes,5,opt,name=node2_pub,json=node2Pub,proto3" json:"node2_pub,omitempty"` Capacity int64 `protobuf:"varint,6,opt,name=capacity,proto3" json:"capacity,omitempty"` - Node1Policy *RoutingPolicy `protobuf:"bytes,7,opt,name=node1_policy,proto3" json:"node1_policy,omitempty"` - Node2Policy *RoutingPolicy `protobuf:"bytes,8,opt,name=node2_policy,proto3" json:"node2_policy,omitempty"` + Node1Policy *RoutingPolicy `protobuf:"bytes,7,opt,name=node1_policy,json=node1Policy,proto3" json:"node1_policy,omitempty"` + Node2Policy *RoutingPolicy `protobuf:"bytes,8,opt,name=node2_policy,json=node2Policy,proto3" json:"node2_policy,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -5913,7 +7811,7 @@ func (m *ChannelEdge) Reset() { *m = ChannelEdge{} } func (m *ChannelEdge) String() string { return proto.CompactTextString(m) } func (*ChannelEdge) ProtoMessage() {} func (*ChannelEdge) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{80} + return fileDescriptor_77a6da22d6a3feb1, []int{95} } func (m *ChannelEdge) XXX_Unmarshal(b []byte) error { @@ -5996,7 +7894,7 @@ type ChannelGraphRequest struct { //Whether unannounced channels are included in the response or not. If set, //unannounced channels are included. Unannounced channels are both private //channels, and public channels that are not yet announced to the network. - IncludeUnannounced bool `protobuf:"varint,1,opt,name=include_unannounced,proto3" json:"include_unannounced,omitempty"` + IncludeUnannounced bool `protobuf:"varint,1,opt,name=include_unannounced,json=includeUnannounced,proto3" json:"include_unannounced,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -6006,7 +7904,7 @@ func (m *ChannelGraphRequest) Reset() { *m = ChannelGraphRequest{} } func (m *ChannelGraphRequest) String() string { return proto.CompactTextString(m) } func (*ChannelGraphRequest) ProtoMessage() {} func (*ChannelGraphRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{81} + return fileDescriptor_77a6da22d6a3feb1, []int{96} } func (m *ChannelGraphRequest) XXX_Unmarshal(b []byte) error { @@ -6049,7 +7947,7 @@ func (m *ChannelGraph) Reset() { *m = ChannelGraph{} } func (m *ChannelGraph) String() string { return proto.CompactTextString(m) } func (*ChannelGraph) ProtoMessage() {} func (*ChannelGraph) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{82} + return fileDescriptor_77a6da22d6a3feb1, []int{97} } func (m *ChannelGraph) XXX_Unmarshal(b []byte) error { @@ -6084,6 +7982,140 @@ func (m *ChannelGraph) GetEdges() []*ChannelEdge { return nil } +type NodeMetricsRequest struct { + /// The requested node metrics. + Types []NodeMetricType `protobuf:"varint,1,rep,packed,name=types,proto3,enum=lnrpc.NodeMetricType" json:"types,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeMetricsRequest) Reset() { *m = NodeMetricsRequest{} } +func (m *NodeMetricsRequest) String() string { return proto.CompactTextString(m) } +func (*NodeMetricsRequest) ProtoMessage() {} +func (*NodeMetricsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{98} +} + +func (m *NodeMetricsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeMetricsRequest.Unmarshal(m, b) +} +func (m *NodeMetricsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeMetricsRequest.Marshal(b, m, deterministic) +} +func (m *NodeMetricsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeMetricsRequest.Merge(m, src) +} +func (m *NodeMetricsRequest) XXX_Size() int { + return xxx_messageInfo_NodeMetricsRequest.Size(m) +} +func (m *NodeMetricsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodeMetricsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeMetricsRequest proto.InternalMessageInfo + +func (m *NodeMetricsRequest) GetTypes() []NodeMetricType { + if m != nil { + return m.Types + } + return nil +} + +type NodeMetricsResponse struct { + //* + //Betweenness centrality is the sum of the ratio of shortest paths that pass + //through the node for each pair of nodes in the graph (not counting paths + //starting or ending at this node). + //Map of node pubkey to betweenness centrality of the node. Normalized + //values are in the [0,1] closed interval. + BetweennessCentrality map[string]*FloatMetric `protobuf:"bytes,1,rep,name=betweenness_centrality,json=betweennessCentrality,proto3" json:"betweenness_centrality,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeMetricsResponse) Reset() { *m = NodeMetricsResponse{} } +func (m *NodeMetricsResponse) String() string { return proto.CompactTextString(m) } +func (*NodeMetricsResponse) ProtoMessage() {} +func (*NodeMetricsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{99} +} + +func (m *NodeMetricsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NodeMetricsResponse.Unmarshal(m, b) +} +func (m *NodeMetricsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NodeMetricsResponse.Marshal(b, m, deterministic) +} +func (m *NodeMetricsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeMetricsResponse.Merge(m, src) +} +func (m *NodeMetricsResponse) XXX_Size() int { + return xxx_messageInfo_NodeMetricsResponse.Size(m) +} +func (m *NodeMetricsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeMetricsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeMetricsResponse proto.InternalMessageInfo + +func (m *NodeMetricsResponse) GetBetweennessCentrality() map[string]*FloatMetric { + if m != nil { + return m.BetweennessCentrality + } + return nil +} + +type FloatMetric struct { + /// Arbitrary float value. + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + /// The value normalized to [0,1] or [-1,1]. + NormalizedValue float64 `protobuf:"fixed64,2,opt,name=normalized_value,json=normalizedValue,proto3" json:"normalized_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FloatMetric) Reset() { *m = FloatMetric{} } +func (m *FloatMetric) String() string { return proto.CompactTextString(m) } +func (*FloatMetric) ProtoMessage() {} +func (*FloatMetric) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{100} +} + +func (m *FloatMetric) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FloatMetric.Unmarshal(m, b) +} +func (m *FloatMetric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FloatMetric.Marshal(b, m, deterministic) +} +func (m *FloatMetric) XXX_Merge(src proto.Message) { + xxx_messageInfo_FloatMetric.Merge(m, src) +} +func (m *FloatMetric) XXX_Size() int { + return xxx_messageInfo_FloatMetric.Size(m) +} +func (m *FloatMetric) XXX_DiscardUnknown() { + xxx_messageInfo_FloatMetric.DiscardUnknown(m) +} + +var xxx_messageInfo_FloatMetric proto.InternalMessageInfo + +func (m *FloatMetric) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *FloatMetric) GetNormalizedValue() float64 { + if m != nil { + return m.NormalizedValue + } + return 0 +} + type ChanInfoRequest struct { //* //The unique channel ID for the channel. The first 3 bytes are the block @@ -6099,7 +8131,7 @@ func (m *ChanInfoRequest) Reset() { *m = ChanInfoRequest{} } func (m *ChanInfoRequest) String() string { return proto.CompactTextString(m) } func (*ChanInfoRequest) ProtoMessage() {} func (*ChanInfoRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{83} + return fileDescriptor_77a6da22d6a3feb1, []int{101} } func (m *ChanInfoRequest) XXX_Unmarshal(b []byte) error { @@ -6137,7 +8169,7 @@ func (m *NetworkInfoRequest) Reset() { *m = NetworkInfoRequest{} } func (m *NetworkInfoRequest) String() string { return proto.CompactTextString(m) } func (*NetworkInfoRequest) ProtoMessage() {} func (*NetworkInfoRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{84} + return fileDescriptor_77a6da22d6a3feb1, []int{102} } func (m *NetworkInfoRequest) XXX_Unmarshal(b []byte) error { @@ -6159,18 +8191,18 @@ func (m *NetworkInfoRequest) XXX_DiscardUnknown() { var xxx_messageInfo_NetworkInfoRequest proto.InternalMessageInfo type NetworkInfo struct { - GraphDiameter uint32 `protobuf:"varint,1,opt,name=graph_diameter,proto3" json:"graph_diameter,omitempty"` - AvgOutDegree float64 `protobuf:"fixed64,2,opt,name=avg_out_degree,proto3" json:"avg_out_degree,omitempty"` - MaxOutDegree uint32 `protobuf:"varint,3,opt,name=max_out_degree,proto3" json:"max_out_degree,omitempty"` - NumNodes uint32 `protobuf:"varint,4,opt,name=num_nodes,proto3" json:"num_nodes,omitempty"` - NumChannels uint32 `protobuf:"varint,5,opt,name=num_channels,proto3" json:"num_channels,omitempty"` - TotalNetworkCapacity int64 `protobuf:"varint,6,opt,name=total_network_capacity,proto3" json:"total_network_capacity,omitempty"` - AvgChannelSize float64 `protobuf:"fixed64,7,opt,name=avg_channel_size,proto3" json:"avg_channel_size,omitempty"` - MinChannelSize int64 `protobuf:"varint,8,opt,name=min_channel_size,proto3" json:"min_channel_size,omitempty"` - MaxChannelSize int64 `protobuf:"varint,9,opt,name=max_channel_size,proto3" json:"max_channel_size,omitempty"` - MedianChannelSizeSat int64 `protobuf:"varint,10,opt,name=median_channel_size_sat,proto3" json:"median_channel_size_sat,omitempty"` + GraphDiameter uint32 `protobuf:"varint,1,opt,name=graph_diameter,json=graphDiameter,proto3" json:"graph_diameter,omitempty"` + AvgOutDegree float64 `protobuf:"fixed64,2,opt,name=avg_out_degree,json=avgOutDegree,proto3" json:"avg_out_degree,omitempty"` + MaxOutDegree uint32 `protobuf:"varint,3,opt,name=max_out_degree,json=maxOutDegree,proto3" json:"max_out_degree,omitempty"` + NumNodes uint32 `protobuf:"varint,4,opt,name=num_nodes,json=numNodes,proto3" json:"num_nodes,omitempty"` + NumChannels uint32 `protobuf:"varint,5,opt,name=num_channels,json=numChannels,proto3" json:"num_channels,omitempty"` + TotalNetworkCapacity int64 `protobuf:"varint,6,opt,name=total_network_capacity,json=totalNetworkCapacity,proto3" json:"total_network_capacity,omitempty"` + AvgChannelSize float64 `protobuf:"fixed64,7,opt,name=avg_channel_size,json=avgChannelSize,proto3" json:"avg_channel_size,omitempty"` + MinChannelSize int64 `protobuf:"varint,8,opt,name=min_channel_size,json=minChannelSize,proto3" json:"min_channel_size,omitempty"` + MaxChannelSize int64 `protobuf:"varint,9,opt,name=max_channel_size,json=maxChannelSize,proto3" json:"max_channel_size,omitempty"` + MedianChannelSizeSat int64 `protobuf:"varint,10,opt,name=median_channel_size_sat,json=medianChannelSizeSat,proto3" json:"median_channel_size_sat,omitempty"` // The number of edges marked as zombies. - NumZombieChans uint64 `protobuf:"varint,11,opt,name=num_zombie_chans,proto3" json:"num_zombie_chans,omitempty"` + NumZombieChans uint64 `protobuf:"varint,11,opt,name=num_zombie_chans,json=numZombieChans,proto3" json:"num_zombie_chans,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -6180,7 +8212,7 @@ func (m *NetworkInfo) Reset() { *m = NetworkInfo{} } func (m *NetworkInfo) String() string { return proto.CompactTextString(m) } func (*NetworkInfo) ProtoMessage() {} func (*NetworkInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{85} + return fileDescriptor_77a6da22d6a3feb1, []int{103} } func (m *NetworkInfo) XXX_Unmarshal(b []byte) error { @@ -6288,7 +8320,7 @@ func (m *StopRequest) Reset() { *m = StopRequest{} } func (m *StopRequest) String() string { return proto.CompactTextString(m) } func (*StopRequest) ProtoMessage() {} func (*StopRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{86} + return fileDescriptor_77a6da22d6a3feb1, []int{104} } func (m *StopRequest) XXX_Unmarshal(b []byte) error { @@ -6319,7 +8351,7 @@ func (m *StopResponse) Reset() { *m = StopResponse{} } func (m *StopResponse) String() string { return proto.CompactTextString(m) } func (*StopResponse) ProtoMessage() {} func (*StopResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{87} + return fileDescriptor_77a6da22d6a3feb1, []int{105} } func (m *StopResponse) XXX_Unmarshal(b []byte) error { @@ -6350,7 +8382,7 @@ func (m *GraphTopologySubscription) Reset() { *m = GraphTopologySubscrip func (m *GraphTopologySubscription) String() string { return proto.CompactTextString(m) } func (*GraphTopologySubscription) ProtoMessage() {} func (*GraphTopologySubscription) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{88} + return fileDescriptor_77a6da22d6a3feb1, []int{106} } func (m *GraphTopologySubscription) XXX_Unmarshal(b []byte) error { @@ -6384,7 +8416,7 @@ func (m *GraphTopologyUpdate) Reset() { *m = GraphTopologyUpdate{} } func (m *GraphTopologyUpdate) String() string { return proto.CompactTextString(m) } func (*GraphTopologyUpdate) ProtoMessage() {} func (*GraphTopologyUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{89} + return fileDescriptor_77a6da22d6a3feb1, []int{107} } func (m *GraphTopologyUpdate) XXX_Unmarshal(b []byte) error { @@ -6441,7 +8473,7 @@ func (m *NodeUpdate) Reset() { *m = NodeUpdate{} } func (m *NodeUpdate) String() string { return proto.CompactTextString(m) } func (*NodeUpdate) ProtoMessage() {} func (*NodeUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{90} + return fileDescriptor_77a6da22d6a3feb1, []int{108} } func (m *NodeUpdate) XXX_Unmarshal(b []byte) error { @@ -6517,7 +8549,7 @@ func (m *ChannelEdgeUpdate) Reset() { *m = ChannelEdgeUpdate{} } func (m *ChannelEdgeUpdate) String() string { return proto.CompactTextString(m) } func (*ChannelEdgeUpdate) ProtoMessage() {} func (*ChannelEdgeUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{91} + return fileDescriptor_77a6da22d6a3feb1, []int{109} } func (m *ChannelEdgeUpdate) XXX_Unmarshal(b []byte) error { @@ -6598,7 +8630,7 @@ func (m *ClosedChannelUpdate) Reset() { *m = ClosedChannelUpdate{} } func (m *ClosedChannelUpdate) String() string { return proto.CompactTextString(m) } func (*ClosedChannelUpdate) ProtoMessage() {} func (*ClosedChannelUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{92} + return fileDescriptor_77a6da22d6a3feb1, []int{110} } func (m *ClosedChannelUpdate) XXX_Unmarshal(b []byte) error { @@ -6649,17 +8681,17 @@ func (m *ClosedChannelUpdate) GetChanPoint() *ChannelPoint { type HopHint struct { /// The public key of the node at the start of the channel. - NodeId string `protobuf:"bytes,1,opt,name=node_id,proto3" json:"node_id,omitempty"` + NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` /// The unique identifier of the channel. - ChanId uint64 `protobuf:"varint,2,opt,name=chan_id,proto3" json:"chan_id,omitempty"` + ChanId uint64 `protobuf:"varint,2,opt,name=chan_id,json=chanId,proto3" json:"chan_id,omitempty"` /// The base fee of the channel denominated in millisatoshis. - FeeBaseMsat uint32 `protobuf:"varint,3,opt,name=fee_base_msat,proto3" json:"fee_base_msat,omitempty"` + FeeBaseMsat uint32 `protobuf:"varint,3,opt,name=fee_base_msat,json=feeBaseMsat,proto3" json:"fee_base_msat,omitempty"` //* //The fee rate of the channel for sending one satoshi across it denominated in //millionths of a satoshi. - FeeProportionalMillionths uint32 `protobuf:"varint,4,opt,name=fee_proportional_millionths,proto3" json:"fee_proportional_millionths,omitempty"` + FeeProportionalMillionths uint32 `protobuf:"varint,4,opt,name=fee_proportional_millionths,json=feeProportionalMillionths,proto3" json:"fee_proportional_millionths,omitempty"` /// The time-lock delta of the channel. - CltvExpiryDelta uint32 `protobuf:"varint,5,opt,name=cltv_expiry_delta,proto3" json:"cltv_expiry_delta,omitempty"` + CltvExpiryDelta uint32 `protobuf:"varint,5,opt,name=cltv_expiry_delta,json=cltvExpiryDelta,proto3" json:"cltv_expiry_delta,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -6669,7 +8701,7 @@ func (m *HopHint) Reset() { *m = HopHint{} } func (m *HopHint) String() string { return proto.CompactTextString(m) } func (*HopHint) ProtoMessage() {} func (*HopHint) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{93} + return fileDescriptor_77a6da22d6a3feb1, []int{111} } func (m *HopHint) XXX_Unmarshal(b []byte) error { @@ -6729,7 +8761,7 @@ type RouteHint struct { //* //A list of hop hints that when chained together can assist in reaching a //specific destination. - HopHints []*HopHint `protobuf:"bytes,1,rep,name=hop_hints,proto3" json:"hop_hints,omitempty"` + HopHints []*HopHint `protobuf:"bytes,1,rep,name=hop_hints,json=hopHints,proto3" json:"hop_hints,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -6739,7 +8771,7 @@ func (m *RouteHint) Reset() { *m = RouteHint{} } func (m *RouteHint) String() string { return proto.CompactTextString(m) } func (*RouteHint) ProtoMessage() {} func (*RouteHint) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{94} + return fileDescriptor_77a6da22d6a3feb1, []int{112} } func (m *RouteHint) XXX_Unmarshal(b []byte) error { @@ -6774,43 +8806,52 @@ type Invoice struct { //field of the encoded payment request if the description_hash field is not //being used. Memo string `protobuf:"bytes,1,opt,name=memo,proto3" json:"memo,omitempty"` - //* Deprecated. An optional cryptographic receipt of payment which is not - //implemented. - Receipt []byte `protobuf:"bytes,2,opt,name=receipt,proto3" json:"receipt,omitempty"` // Deprecated: Do not use. //* //The hex-encoded preimage (32 byte) which will allow settling an incoming - //HTLC payable to this preimage - RPreimage []byte `protobuf:"bytes,3,opt,name=r_preimage,proto3" json:"r_preimage,omitempty"` - /// The hash of the preimage - RHash []byte `protobuf:"bytes,4,opt,name=r_hash,proto3" json:"r_hash,omitempty"` - /// The value of this invoice in satoshis + //HTLC payable to this preimage. When using REST, this field must be encoded + //as base64. + RPreimage []byte `protobuf:"bytes,3,opt,name=r_preimage,json=rPreimage,proto3" json:"r_preimage,omitempty"` + //* + //The hash of the preimage. When using REST, this field must be encoded as + //base64. + RHash []byte `protobuf:"bytes,4,opt,name=r_hash,json=rHash,proto3" json:"r_hash,omitempty"` + //* + //The value of this invoice in satoshis + // + //The fields value and value_msat are mutually exclusive. Value int64 `protobuf:"varint,5,opt,name=value,proto3" json:"value,omitempty"` + //* + //The value of this invoice in millisatoshis + // + //The fields value and value_msat are mutually exclusive. + ValueMsat int64 `protobuf:"varint,23,opt,name=value_msat,json=valueMsat,proto3" json:"value_msat,omitempty"` /// Whether this invoice has been fulfilled Settled bool `protobuf:"varint,6,opt,name=settled,proto3" json:"settled,omitempty"` // Deprecated: Do not use. /// When this invoice was created - CreationDate int64 `protobuf:"varint,7,opt,name=creation_date,proto3" json:"creation_date,omitempty"` + CreationDate int64 `protobuf:"varint,7,opt,name=creation_date,json=creationDate,proto3" json:"creation_date,omitempty"` /// When this invoice was settled - SettleDate int64 `protobuf:"varint,8,opt,name=settle_date,proto3" json:"settle_date,omitempty"` + SettleDate int64 `protobuf:"varint,8,opt,name=settle_date,json=settleDate,proto3" json:"settle_date,omitempty"` //* - //A bare-bones invoice for a payment within the Lightning Network. With the + //A bare-bones invoice for a payment within the Lightning Network. With the //details of the invoice, the sender has all the data necessary to send a //payment to the recipient. - PaymentRequest string `protobuf:"bytes,9,opt,name=payment_request,proto3" json:"payment_request,omitempty"` + PaymentRequest string `protobuf:"bytes,9,opt,name=payment_request,json=paymentRequest,proto3" json:"payment_request,omitempty"` //* //Hash (SHA-256) of a description of the payment. Used if the description of //payment (memo) is too long to naturally fit within the description field - //of an encoded payment request. - DescriptionHash []byte `protobuf:"bytes,10,opt,name=description_hash,proto3" json:"description_hash,omitempty"` + //of an encoded payment request. When using REST, this field must be encoded + //as base64. + DescriptionHash []byte `protobuf:"bytes,10,opt,name=description_hash,json=descriptionHash,proto3" json:"description_hash,omitempty"` /// Payment request expiry time in seconds. Default is 3600 (1 hour). Expiry int64 `protobuf:"varint,11,opt,name=expiry,proto3" json:"expiry,omitempty"` /// Fallback on-chain address. - FallbackAddr string `protobuf:"bytes,12,opt,name=fallback_addr,proto3" json:"fallback_addr,omitempty"` + FallbackAddr string `protobuf:"bytes,12,opt,name=fallback_addr,json=fallbackAddr,proto3" json:"fallback_addr,omitempty"` /// Delta to use for the time-lock of the CLTV extended to the final hop. - CltvExpiry uint64 `protobuf:"varint,13,opt,name=cltv_expiry,proto3" json:"cltv_expiry,omitempty"` + CltvExpiry uint64 `protobuf:"varint,13,opt,name=cltv_expiry,json=cltvExpiry,proto3" json:"cltv_expiry,omitempty"` //* //Route hints that can each be individually used to assist in reaching the //invoice's destination. - RouteHints []*RouteHint `protobuf:"bytes,14,rep,name=route_hints,proto3" json:"route_hints,omitempty"` + RouteHints []*RouteHint `protobuf:"bytes,14,rep,name=route_hints,json=routeHints,proto3" json:"route_hints,omitempty"` /// Whether this invoice should include routing hints for private channels. Private bool `protobuf:"varint,15,opt,name=private,proto3" json:"private,omitempty"` //* @@ -6818,15 +8859,15 @@ type Invoice struct { //this index making it monotonically increasing. Callers to the //SubscribeInvoices call can use this to instantly get notified of all added //invoices with an add_index greater than this one. - AddIndex uint64 `protobuf:"varint,16,opt,name=add_index,proto3" json:"add_index,omitempty"` + AddIndex uint64 `protobuf:"varint,16,opt,name=add_index,json=addIndex,proto3" json:"add_index,omitempty"` //* //The "settle" index of this invoice. Each newly settled invoice will //increment this index making it monotonically increasing. Callers to the //SubscribeInvoices call can use this to instantly get notified of all //settled invoices with an settle_index greater than this one. - SettleIndex uint64 `protobuf:"varint,17,opt,name=settle_index,proto3" json:"settle_index,omitempty"` + SettleIndex uint64 `protobuf:"varint,17,opt,name=settle_index,json=settleIndex,proto3" json:"settle_index,omitempty"` /// Deprecated, use amt_paid_sat or amt_paid_msat. - AmtPaid int64 `protobuf:"varint,18,opt,name=amt_paid,proto3" json:"amt_paid,omitempty"` // Deprecated: Do not use. + AmtPaid int64 `protobuf:"varint,18,opt,name=amt_paid,json=amtPaid,proto3" json:"amt_paid,omitempty"` // Deprecated: Do not use. //* //The amount that was accepted for this invoice, in satoshis. This will ONLY //be set if this invoice has been settled. We provide this field as if the @@ -6834,7 +8875,7 @@ type Invoice struct { //was ultimately accepted. Additionally, it's possible that the sender paid //MORE that was specified in the original invoice. So we'll record that here //as well. - AmtPaidSat int64 `protobuf:"varint,19,opt,name=amt_paid_sat,proto3" json:"amt_paid_sat,omitempty"` + AmtPaidSat int64 `protobuf:"varint,19,opt,name=amt_paid_sat,json=amtPaidSat,proto3" json:"amt_paid_sat,omitempty"` //* //The amount that was accepted for this invoice, in millisatoshis. This will //ONLY be set if this invoice has been settled. We provide this field as if @@ -6842,22 +8883,28 @@ type Invoice struct { //amount was ultimately accepted. Additionally, it's possible that the sender //paid MORE that was specified in the original invoice. So we'll record that //here as well. - AmtPaidMsat int64 `protobuf:"varint,20,opt,name=amt_paid_msat,proto3" json:"amt_paid_msat,omitempty"` + AmtPaidMsat int64 `protobuf:"varint,20,opt,name=amt_paid_msat,json=amtPaidMsat,proto3" json:"amt_paid_msat,omitempty"` //* //The state the invoice is in. State Invoice_InvoiceState `protobuf:"varint,21,opt,name=state,proto3,enum=lnrpc.Invoice_InvoiceState" json:"state,omitempty"` /// List of HTLCs paying to this invoice [EXPERIMENTAL]. - Htlcs []*InvoiceHTLC `protobuf:"bytes,22,rep,name=htlcs,proto3" json:"htlcs,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Htlcs []*InvoiceHTLC `protobuf:"bytes,22,rep,name=htlcs,proto3" json:"htlcs,omitempty"` + /// List of features advertised on the invoice. + Features map[uint32]*Feature `protobuf:"bytes,24,rep,name=features,proto3" json:"features,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + //* + //Indicates if this invoice was a spontaneous payment that arrived via keysend + //[EXPERIMENTAL]. + IsKeysend bool `protobuf:"varint,25,opt,name=is_keysend,json=isKeysend,proto3" json:"is_keysend,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Invoice) Reset() { *m = Invoice{} } func (m *Invoice) String() string { return proto.CompactTextString(m) } func (*Invoice) ProtoMessage() {} func (*Invoice) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{95} + return fileDescriptor_77a6da22d6a3feb1, []int{113} } func (m *Invoice) XXX_Unmarshal(b []byte) error { @@ -6885,14 +8932,6 @@ func (m *Invoice) GetMemo() string { return "" } -// Deprecated: Do not use. -func (m *Invoice) GetReceipt() []byte { - if m != nil { - return m.Receipt - } - return nil -} - func (m *Invoice) GetRPreimage() []byte { if m != nil { return m.RPreimage @@ -6914,6 +8953,13 @@ func (m *Invoice) GetValue() int64 { return 0 } +func (m *Invoice) GetValueMsat() int64 { + if m != nil { + return m.ValueMsat + } + return 0 +} + // Deprecated: Do not use. func (m *Invoice) GetSettled() bool { if m != nil { @@ -7035,34 +9081,52 @@ func (m *Invoice) GetHtlcs() []*InvoiceHTLC { return nil } +func (m *Invoice) GetFeatures() map[uint32]*Feature { + if m != nil { + return m.Features + } + return nil +} + +func (m *Invoice) GetIsKeysend() bool { + if m != nil { + return m.IsKeysend + } + return false +} + /// Details of an HTLC that paid to an invoice type InvoiceHTLC struct { /// Short channel id over which the htlc was received. - ChanId uint64 `protobuf:"varint,1,opt,name=chan_id,proto3" json:"chan_id,omitempty"` + ChanId uint64 `protobuf:"varint,1,opt,name=chan_id,json=chanId,proto3" json:"chan_id,omitempty"` /// Index identifying the htlc on the channel. - HtlcIndex uint64 `protobuf:"varint,2,opt,name=htlc_index,proto3" json:"htlc_index,omitempty"` + HtlcIndex uint64 `protobuf:"varint,2,opt,name=htlc_index,json=htlcIndex,proto3" json:"htlc_index,omitempty"` /// The amount of the htlc in msat. - AmtMsat uint64 `protobuf:"varint,3,opt,name=amt_msat,proto3" json:"amt_msat,omitempty"` + AmtMsat uint64 `protobuf:"varint,3,opt,name=amt_msat,json=amtMsat,proto3" json:"amt_msat,omitempty"` /// Block height at which this htlc was accepted. - AcceptHeight int32 `protobuf:"varint,4,opt,name=accept_height,proto3" json:"accept_height,omitempty"` + AcceptHeight int32 `protobuf:"varint,4,opt,name=accept_height,json=acceptHeight,proto3" json:"accept_height,omitempty"` /// Time at which this htlc was accepted. - AcceptTime int64 `protobuf:"varint,5,opt,name=accept_time,proto3" json:"accept_time,omitempty"` + AcceptTime int64 `protobuf:"varint,5,opt,name=accept_time,json=acceptTime,proto3" json:"accept_time,omitempty"` /// Time at which this htlc was settled or canceled. - ResolveTime int64 `protobuf:"varint,6,opt,name=resolve_time,proto3" json:"resolve_time,omitempty"` + ResolveTime int64 `protobuf:"varint,6,opt,name=resolve_time,json=resolveTime,proto3" json:"resolve_time,omitempty"` /// Block height at which this htlc expires. - ExpiryHeight int32 `protobuf:"varint,7,opt,name=expiry_height,proto3" json:"expiry_height,omitempty"` + ExpiryHeight int32 `protobuf:"varint,7,opt,name=expiry_height,json=expiryHeight,proto3" json:"expiry_height,omitempty"` /// Current state the htlc is in. - State InvoiceHTLCState `protobuf:"varint,8,opt,name=state,proto3,enum=lnrpc.InvoiceHTLCState" json:"state,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + State InvoiceHTLCState `protobuf:"varint,8,opt,name=state,proto3,enum=lnrpc.InvoiceHTLCState" json:"state,omitempty"` + /// Custom tlv records. + CustomRecords map[uint64][]byte `protobuf:"bytes,9,rep,name=custom_records,json=customRecords,proto3" json:"custom_records,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + /// The total amount of the mpp payment in msat. + MppTotalAmtMsat uint64 `protobuf:"varint,10,opt,name=mpp_total_amt_msat,json=mppTotalAmtMsat,proto3" json:"mpp_total_amt_msat,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *InvoiceHTLC) Reset() { *m = InvoiceHTLC{} } func (m *InvoiceHTLC) String() string { return proto.CompactTextString(m) } func (*InvoiceHTLC) ProtoMessage() {} func (*InvoiceHTLC) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{96} + return fileDescriptor_77a6da22d6a3feb1, []int{114} } func (m *InvoiceHTLC) XXX_Unmarshal(b []byte) error { @@ -7139,19 +9203,33 @@ func (m *InvoiceHTLC) GetState() InvoiceHTLCState { return InvoiceHTLCState_ACCEPTED } +func (m *InvoiceHTLC) GetCustomRecords() map[uint64][]byte { + if m != nil { + return m.CustomRecords + } + return nil +} + +func (m *InvoiceHTLC) GetMppTotalAmtMsat() uint64 { + if m != nil { + return m.MppTotalAmtMsat + } + return 0 +} + type AddInvoiceResponse struct { - RHash []byte `protobuf:"bytes,1,opt,name=r_hash,proto3" json:"r_hash,omitempty"` + RHash []byte `protobuf:"bytes,1,opt,name=r_hash,json=rHash,proto3" json:"r_hash,omitempty"` //* - //A bare-bones invoice for a payment within the Lightning Network. With the + //A bare-bones invoice for a payment within the Lightning Network. With the //details of the invoice, the sender has all the data necessary to send a //payment to the recipient. - PaymentRequest string `protobuf:"bytes,2,opt,name=payment_request,proto3" json:"payment_request,omitempty"` + PaymentRequest string `protobuf:"bytes,2,opt,name=payment_request,json=paymentRequest,proto3" json:"payment_request,omitempty"` //* //The "add" index of this invoice. Each newly created invoice will increment //this index making it monotonically increasing. Callers to the //SubscribeInvoices call can use this to instantly get notified of all added //invoices with an add_index greater than this one. - AddIndex uint64 `protobuf:"varint,16,opt,name=add_index,proto3" json:"add_index,omitempty"` + AddIndex uint64 `protobuf:"varint,16,opt,name=add_index,json=addIndex,proto3" json:"add_index,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -7161,7 +9239,7 @@ func (m *AddInvoiceResponse) Reset() { *m = AddInvoiceResponse{} } func (m *AddInvoiceResponse) String() string { return proto.CompactTextString(m) } func (*AddInvoiceResponse) ProtoMessage() {} func (*AddInvoiceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{97} + return fileDescriptor_77a6da22d6a3feb1, []int{115} } func (m *AddInvoiceResponse) XXX_Unmarshal(b []byte) error { @@ -7207,9 +9285,13 @@ type PaymentHash struct { //* //The hex-encoded payment hash of the invoice to be looked up. The passed //payment hash must be exactly 32 bytes, otherwise an error is returned. - RHashStr string `protobuf:"bytes,1,opt,name=r_hash_str,proto3" json:"r_hash_str,omitempty"` - /// The payment hash of the invoice to be looked up. - RHash []byte `protobuf:"bytes,2,opt,name=r_hash,proto3" json:"r_hash,omitempty"` + //Deprecated now that the REST gateway supports base64 encoding of bytes + //fields. + RHashStr string `protobuf:"bytes,1,opt,name=r_hash_str,json=rHashStr,proto3" json:"r_hash_str,omitempty"` // Deprecated: Do not use. + //* + //The payment hash of the invoice to be looked up. When using REST, this field + //must be encoded as base64. + RHash []byte `protobuf:"bytes,2,opt,name=r_hash,json=rHash,proto3" json:"r_hash,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -7219,7 +9301,7 @@ func (m *PaymentHash) Reset() { *m = PaymentHash{} } func (m *PaymentHash) String() string { return proto.CompactTextString(m) } func (*PaymentHash) ProtoMessage() {} func (*PaymentHash) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{98} + return fileDescriptor_77a6da22d6a3feb1, []int{116} } func (m *PaymentHash) XXX_Unmarshal(b []byte) error { @@ -7240,6 +9322,7 @@ func (m *PaymentHash) XXX_DiscardUnknown() { var xxx_messageInfo_PaymentHash proto.InternalMessageInfo +// Deprecated: Do not use. func (m *PaymentHash) GetRHashStr() string { if m != nil { return m.RHashStr @@ -7255,14 +9338,16 @@ func (m *PaymentHash) GetRHash() []byte { } type ListInvoiceRequest struct { - /// If set, only unsettled invoices will be returned in the response. - PendingOnly bool `protobuf:"varint,1,opt,name=pending_only,proto3" json:"pending_only,omitempty"` + //* + //If set, only invoices that are not settled and not canceled will be returned + //in the response. + PendingOnly bool `protobuf:"varint,1,opt,name=pending_only,json=pendingOnly,proto3" json:"pending_only,omitempty"` //* //The index of an invoice that will be used as either the start or end of a //query to determine which invoices should be returned in the response. - IndexOffset uint64 `protobuf:"varint,4,opt,name=index_offset,proto3" json:"index_offset,omitempty"` + IndexOffset uint64 `protobuf:"varint,4,opt,name=index_offset,json=indexOffset,proto3" json:"index_offset,omitempty"` /// The max number of invoices to return in the response to this query. - NumMaxInvoices uint64 `protobuf:"varint,5,opt,name=num_max_invoices,proto3" json:"num_max_invoices,omitempty"` + NumMaxInvoices uint64 `protobuf:"varint,5,opt,name=num_max_invoices,json=numMaxInvoices,proto3" json:"num_max_invoices,omitempty"` //* //If set, the invoices returned will result from seeking backwards from the //specified index offset. This can be used to paginate backwards. @@ -7276,7 +9361,7 @@ func (m *ListInvoiceRequest) Reset() { *m = ListInvoiceRequest{} } func (m *ListInvoiceRequest) String() string { return proto.CompactTextString(m) } func (*ListInvoiceRequest) ProtoMessage() {} func (*ListInvoiceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{99} + return fileDescriptor_77a6da22d6a3feb1, []int{117} } func (m *ListInvoiceRequest) XXX_Unmarshal(b []byte) error { @@ -7333,11 +9418,11 @@ type ListInvoiceResponse struct { //* //The index of the last item in the set of returned invoices. This can be used //to seek further, pagination style. - LastIndexOffset uint64 `protobuf:"varint,2,opt,name=last_index_offset,proto3" json:"last_index_offset,omitempty"` + LastIndexOffset uint64 `protobuf:"varint,2,opt,name=last_index_offset,json=lastIndexOffset,proto3" json:"last_index_offset,omitempty"` //* //The index of the last item in the set of returned invoices. This can be used //to seek backwards, pagination style. - FirstIndexOffset uint64 `protobuf:"varint,3,opt,name=first_index_offset,proto3" json:"first_index_offset,omitempty"` + FirstIndexOffset uint64 `protobuf:"varint,3,opt,name=first_index_offset,json=firstIndexOffset,proto3" json:"first_index_offset,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -7347,7 +9432,7 @@ func (m *ListInvoiceResponse) Reset() { *m = ListInvoiceResponse{} } func (m *ListInvoiceResponse) String() string { return proto.CompactTextString(m) } func (*ListInvoiceResponse) ProtoMessage() {} func (*ListInvoiceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{100} + return fileDescriptor_77a6da22d6a3feb1, []int{118} } func (m *ListInvoiceResponse) XXX_Unmarshal(b []byte) error { @@ -7395,13 +9480,13 @@ type InvoiceSubscription struct { //notifications for all added indexes with an add_index greater than this //value. This allows callers to catch up on any events they missed while they //weren't connected to the streaming RPC. - AddIndex uint64 `protobuf:"varint,1,opt,name=add_index,proto3" json:"add_index,omitempty"` + AddIndex uint64 `protobuf:"varint,1,opt,name=add_index,json=addIndex,proto3" json:"add_index,omitempty"` //* //If specified (non-zero), then we'll first start by sending out //notifications for all settled indexes with an settle_index greater than //this value. This allows callers to catch up on any events they missed while //they weren't connected to the streaming RPC. - SettleIndex uint64 `protobuf:"varint,2,opt,name=settle_index,proto3" json:"settle_index,omitempty"` + SettleIndex uint64 `protobuf:"varint,2,opt,name=settle_index,json=settleIndex,proto3" json:"settle_index,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -7411,7 +9496,7 @@ func (m *InvoiceSubscription) Reset() { *m = InvoiceSubscription{} } func (m *InvoiceSubscription) String() string { return proto.CompactTextString(m) } func (*InvoiceSubscription) ProtoMessage() {} func (*InvoiceSubscription) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{101} + return fileDescriptor_77a6da22d6a3feb1, []int{119} } func (m *InvoiceSubscription) XXX_Unmarshal(b []byte) error { @@ -7448,39 +9533,47 @@ func (m *InvoiceSubscription) GetSettleIndex() uint64 { type Payment struct { /// The payment hash - PaymentHash string `protobuf:"bytes,1,opt,name=payment_hash,proto3" json:"payment_hash,omitempty"` + PaymentHash string `protobuf:"bytes,1,opt,name=payment_hash,json=paymentHash,proto3" json:"payment_hash,omitempty"` /// Deprecated, use value_sat or value_msat. Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` // Deprecated: Do not use. - /// The date of this payment - CreationDate int64 `protobuf:"varint,3,opt,name=creation_date,proto3" json:"creation_date,omitempty"` - /// The path this payment took - Path []string `protobuf:"bytes,4,rep,name=path,proto3" json:"path,omitempty"` + /// Deprecated, use creation_time_ns + CreationDate int64 `protobuf:"varint,3,opt,name=creation_date,json=creationDate,proto3" json:"creation_date,omitempty"` // Deprecated: Do not use. /// Deprecated, use fee_sat or fee_msat. Fee int64 `protobuf:"varint,5,opt,name=fee,proto3" json:"fee,omitempty"` // Deprecated: Do not use. /// The payment preimage - PaymentPreimage string `protobuf:"bytes,6,opt,name=payment_preimage,proto3" json:"payment_preimage,omitempty"` + PaymentPreimage string `protobuf:"bytes,6,opt,name=payment_preimage,json=paymentPreimage,proto3" json:"payment_preimage,omitempty"` /// The value of the payment in satoshis - ValueSat int64 `protobuf:"varint,7,opt,name=value_sat,proto3" json:"value_sat,omitempty"` + ValueSat int64 `protobuf:"varint,7,opt,name=value_sat,json=valueSat,proto3" json:"value_sat,omitempty"` /// The value of the payment in milli-satoshis - ValueMsat int64 `protobuf:"varint,8,opt,name=value_msat,proto3" json:"value_msat,omitempty"` + ValueMsat int64 `protobuf:"varint,8,opt,name=value_msat,json=valueMsat,proto3" json:"value_msat,omitempty"` /// The optional payment request being fulfilled. - PaymentRequest string `protobuf:"bytes,9,opt,name=payment_request,proto3" json:"payment_request,omitempty"` + PaymentRequest string `protobuf:"bytes,9,opt,name=payment_request,json=paymentRequest,proto3" json:"payment_request,omitempty"` // The status of the payment. Status Payment_PaymentStatus `protobuf:"varint,10,opt,name=status,proto3,enum=lnrpc.Payment_PaymentStatus" json:"status,omitempty"` /// The fee paid for this payment in satoshis - FeeSat int64 `protobuf:"varint,11,opt,name=fee_sat,proto3" json:"fee_sat,omitempty"` + FeeSat int64 `protobuf:"varint,11,opt,name=fee_sat,json=feeSat,proto3" json:"fee_sat,omitempty"` /// The fee paid for this payment in milli-satoshis - FeeMsat int64 `protobuf:"varint,12,opt,name=fee_msat,proto3" json:"fee_msat,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + FeeMsat int64 `protobuf:"varint,12,opt,name=fee_msat,json=feeMsat,proto3" json:"fee_msat,omitempty"` + /// The time in UNIX nanoseconds at which the payment was created. + CreationTimeNs int64 `protobuf:"varint,13,opt,name=creation_time_ns,json=creationTimeNs,proto3" json:"creation_time_ns,omitempty"` + /// The HTLCs made in attempt to settle the payment. + Htlcs []*HTLCAttempt `protobuf:"bytes,14,rep,name=htlcs,proto3" json:"htlcs,omitempty"` + //* + //The creation index of this payment. Each payment can be uniquely identified + //by this index, which may not strictly increment by 1 for payments made in + //older versions of lnd. + PaymentIndex uint64 `protobuf:"varint,15,opt,name=payment_index,json=paymentIndex,proto3" json:"payment_index,omitempty"` + FailureReason PaymentFailureReason `protobuf:"varint,16,opt,name=failure_reason,json=failureReason,proto3,enum=lnrpc.PaymentFailureReason" json:"failure_reason,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Payment) Reset() { *m = Payment{} } func (m *Payment) String() string { return proto.CompactTextString(m) } func (*Payment) ProtoMessage() {} func (*Payment) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{102} + return fileDescriptor_77a6da22d6a3feb1, []int{120} } func (m *Payment) XXX_Unmarshal(b []byte) error { @@ -7516,6 +9609,7 @@ func (m *Payment) GetValue() int64 { return 0 } +// Deprecated: Do not use. func (m *Payment) GetCreationDate() int64 { if m != nil { return m.CreationDate @@ -7523,13 +9617,6 @@ func (m *Payment) GetCreationDate() int64 { return 0 } -func (m *Payment) GetPath() []string { - if m != nil { - return m.Path - } - return nil -} - // Deprecated: Do not use. func (m *Payment) GetFee() int64 { if m != nil { @@ -7587,12 +9674,133 @@ func (m *Payment) GetFeeMsat() int64 { return 0 } +func (m *Payment) GetCreationTimeNs() int64 { + if m != nil { + return m.CreationTimeNs + } + return 0 +} + +func (m *Payment) GetHtlcs() []*HTLCAttempt { + if m != nil { + return m.Htlcs + } + return nil +} + +func (m *Payment) GetPaymentIndex() uint64 { + if m != nil { + return m.PaymentIndex + } + return 0 +} + +func (m *Payment) GetFailureReason() PaymentFailureReason { + if m != nil { + return m.FailureReason + } + return PaymentFailureReason_FAILURE_REASON_NONE +} + +type HTLCAttempt struct { + /// The status of the HTLC. + Status HTLCAttempt_HTLCStatus `protobuf:"varint,1,opt,name=status,proto3,enum=lnrpc.HTLCAttempt_HTLCStatus" json:"status,omitempty"` + /// The route taken by this HTLC. + Route *Route `protobuf:"bytes,2,opt,name=route,proto3" json:"route,omitempty"` + /// The time in UNIX nanoseconds at which this HTLC was sent. + AttemptTimeNs int64 `protobuf:"varint,3,opt,name=attempt_time_ns,json=attemptTimeNs,proto3" json:"attempt_time_ns,omitempty"` + //* + //The time in UNIX nanoseconds at which this HTLC was settled or failed. + //This value will not be set if the HTLC is still IN_FLIGHT. + ResolveTimeNs int64 `protobuf:"varint,4,opt,name=resolve_time_ns,json=resolveTimeNs,proto3" json:"resolve_time_ns,omitempty"` + // Detailed htlc failure info. + Failure *Failure `protobuf:"bytes,5,opt,name=failure,proto3" json:"failure,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HTLCAttempt) Reset() { *m = HTLCAttempt{} } +func (m *HTLCAttempt) String() string { return proto.CompactTextString(m) } +func (*HTLCAttempt) ProtoMessage() {} +func (*HTLCAttempt) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{121} +} + +func (m *HTLCAttempt) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HTLCAttempt.Unmarshal(m, b) +} +func (m *HTLCAttempt) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HTLCAttempt.Marshal(b, m, deterministic) +} +func (m *HTLCAttempt) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTLCAttempt.Merge(m, src) +} +func (m *HTLCAttempt) XXX_Size() int { + return xxx_messageInfo_HTLCAttempt.Size(m) +} +func (m *HTLCAttempt) XXX_DiscardUnknown() { + xxx_messageInfo_HTLCAttempt.DiscardUnknown(m) +} + +var xxx_messageInfo_HTLCAttempt proto.InternalMessageInfo + +func (m *HTLCAttempt) GetStatus() HTLCAttempt_HTLCStatus { + if m != nil { + return m.Status + } + return HTLCAttempt_IN_FLIGHT +} + +func (m *HTLCAttempt) GetRoute() *Route { + if m != nil { + return m.Route + } + return nil +} + +func (m *HTLCAttempt) GetAttemptTimeNs() int64 { + if m != nil { + return m.AttemptTimeNs + } + return 0 +} + +func (m *HTLCAttempt) GetResolveTimeNs() int64 { + if m != nil { + return m.ResolveTimeNs + } + return 0 +} + +func (m *HTLCAttempt) GetFailure() *Failure { + if m != nil { + return m.Failure + } + return nil +} + type ListPaymentsRequest struct { //* //If true, then return payments that have not yet fully completed. This means //that pending payments, as well as failed payments will show up if this - //field is set to True. - IncludeIncomplete bool `protobuf:"varint,1,opt,name=include_incomplete,json=includeIncomplete,proto3" json:"include_incomplete,omitempty"` + //field is set to true. This flag doesn't change the meaning of the indices, + //which are tied to individual payments. + IncludeIncomplete bool `protobuf:"varint,1,opt,name=include_incomplete,json=includeIncomplete,proto3" json:"include_incomplete,omitempty"` + //* + //The index of a payment that will be used as either the start or end of a + //query to determine which payments should be returned in the response. The + //index_offset is exclusive. In the case of a zero index_offset, the query + //will start with the oldest payment when paginating forwards, or will end + //with the most recent payment when paginating backwards. + IndexOffset uint64 `protobuf:"varint,2,opt,name=index_offset,json=indexOffset,proto3" json:"index_offset,omitempty"` + /// The maximal number of payments returned in the response to this query. + MaxPayments uint64 `protobuf:"varint,3,opt,name=max_payments,json=maxPayments,proto3" json:"max_payments,omitempty"` + //* + //If set, the payments returned will result from seeking backwards from the + //specified index offset. This can be used to paginate backwards. The order + //of the returned payments is always oldest first (ascending index order). + Reversed bool `protobuf:"varint,4,opt,name=reversed,proto3" json:"reversed,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -7602,7 +9810,7 @@ func (m *ListPaymentsRequest) Reset() { *m = ListPaymentsRequest{} } func (m *ListPaymentsRequest) String() string { return proto.CompactTextString(m) } func (*ListPaymentsRequest) ProtoMessage() {} func (*ListPaymentsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{103} + return fileDescriptor_77a6da22d6a3feb1, []int{122} } func (m *ListPaymentsRequest) XXX_Unmarshal(b []byte) error { @@ -7630,19 +9838,48 @@ func (m *ListPaymentsRequest) GetIncludeIncomplete() bool { return false } +func (m *ListPaymentsRequest) GetIndexOffset() uint64 { + if m != nil { + return m.IndexOffset + } + return 0 +} + +func (m *ListPaymentsRequest) GetMaxPayments() uint64 { + if m != nil { + return m.MaxPayments + } + return 0 +} + +func (m *ListPaymentsRequest) GetReversed() bool { + if m != nil { + return m.Reversed + } + return false +} + type ListPaymentsResponse struct { /// The list of payments - Payments []*Payment `protobuf:"bytes,1,rep,name=payments,proto3" json:"payments,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Payments []*Payment `protobuf:"bytes,1,rep,name=payments,proto3" json:"payments,omitempty"` + //* + //The index of the first item in the set of returned payments. This can be + //used as the index_offset to continue seeking backwards in the next request. + FirstIndexOffset uint64 `protobuf:"varint,2,opt,name=first_index_offset,json=firstIndexOffset,proto3" json:"first_index_offset,omitempty"` + //* + //The index of the last item in the set of returned payments. This can be used + //as the index_offset to continue seeking forwards in the next request. + LastIndexOffset uint64 `protobuf:"varint,3,opt,name=last_index_offset,json=lastIndexOffset,proto3" json:"last_index_offset,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *ListPaymentsResponse) Reset() { *m = ListPaymentsResponse{} } func (m *ListPaymentsResponse) String() string { return proto.CompactTextString(m) } func (*ListPaymentsResponse) ProtoMessage() {} func (*ListPaymentsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{104} + return fileDescriptor_77a6da22d6a3feb1, []int{123} } func (m *ListPaymentsResponse) XXX_Unmarshal(b []byte) error { @@ -7670,6 +9907,20 @@ func (m *ListPaymentsResponse) GetPayments() []*Payment { return nil } +func (m *ListPaymentsResponse) GetFirstIndexOffset() uint64 { + if m != nil { + return m.FirstIndexOffset + } + return 0 +} + +func (m *ListPaymentsResponse) GetLastIndexOffset() uint64 { + if m != nil { + return m.LastIndexOffset + } + return 0 +} + type DeleteAllPaymentsRequest struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -7680,7 +9931,7 @@ func (m *DeleteAllPaymentsRequest) Reset() { *m = DeleteAllPaymentsReque func (m *DeleteAllPaymentsRequest) String() string { return proto.CompactTextString(m) } func (*DeleteAllPaymentsRequest) ProtoMessage() {} func (*DeleteAllPaymentsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{105} + return fileDescriptor_77a6da22d6a3feb1, []int{124} } func (m *DeleteAllPaymentsRequest) XXX_Unmarshal(b []byte) error { @@ -7711,7 +9962,7 @@ func (m *DeleteAllPaymentsResponse) Reset() { *m = DeleteAllPaymentsResp func (m *DeleteAllPaymentsResponse) String() string { return proto.CompactTextString(m) } func (*DeleteAllPaymentsResponse) ProtoMessage() {} func (*DeleteAllPaymentsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{106} + return fileDescriptor_77a6da22d6a3feb1, []int{125} } func (m *DeleteAllPaymentsResponse) XXX_Unmarshal(b []byte) error { @@ -7743,7 +9994,7 @@ func (m *AbandonChannelRequest) Reset() { *m = AbandonChannelRequest{} } func (m *AbandonChannelRequest) String() string { return proto.CompactTextString(m) } func (*AbandonChannelRequest) ProtoMessage() {} func (*AbandonChannelRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{107} + return fileDescriptor_77a6da22d6a3feb1, []int{126} } func (m *AbandonChannelRequest) XXX_Unmarshal(b []byte) error { @@ -7781,7 +10032,7 @@ func (m *AbandonChannelResponse) Reset() { *m = AbandonChannelResponse{} func (m *AbandonChannelResponse) String() string { return proto.CompactTextString(m) } func (*AbandonChannelResponse) ProtoMessage() {} func (*AbandonChannelResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{108} + return fileDescriptor_77a6da22d6a3feb1, []int{127} } func (m *AbandonChannelResponse) XXX_Unmarshal(b []byte) error { @@ -7814,7 +10065,7 @@ func (m *DebugLevelRequest) Reset() { *m = DebugLevelRequest{} } func (m *DebugLevelRequest) String() string { return proto.CompactTextString(m) } func (*DebugLevelRequest) ProtoMessage() {} func (*DebugLevelRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{109} + return fileDescriptor_77a6da22d6a3feb1, []int{128} } func (m *DebugLevelRequest) XXX_Unmarshal(b []byte) error { @@ -7850,7 +10101,7 @@ func (m *DebugLevelRequest) GetLevelSpec() string { } type DebugLevelResponse struct { - SubSystems string `protobuf:"bytes,1,opt,name=sub_systems,proto3" json:"sub_systems,omitempty"` + SubSystems string `protobuf:"bytes,1,opt,name=sub_systems,json=subSystems,proto3" json:"sub_systems,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -7860,7 +10111,7 @@ func (m *DebugLevelResponse) Reset() { *m = DebugLevelResponse{} } func (m *DebugLevelResponse) String() string { return proto.CompactTextString(m) } func (*DebugLevelResponse) ProtoMessage() {} func (*DebugLevelResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{110} + return fileDescriptor_77a6da22d6a3feb1, []int{129} } func (m *DebugLevelResponse) XXX_Unmarshal(b []byte) error { @@ -7900,7 +10151,7 @@ func (m *PayReqString) Reset() { *m = PayReqString{} } func (m *PayReqString) String() string { return proto.CompactTextString(m) } func (*PayReqString) ProtoMessage() {} func (*PayReqString) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{111} + return fileDescriptor_77a6da22d6a3feb1, []int{130} } func (m *PayReqString) XXX_Unmarshal(b []byte) error { @@ -7929,26 +10180,29 @@ func (m *PayReqString) GetPayReq() string { } type PayReq struct { - Destination string `protobuf:"bytes,1,opt,name=destination,proto3" json:"destination,omitempty"` - PaymentHash string `protobuf:"bytes,2,opt,name=payment_hash,proto3" json:"payment_hash,omitempty"` - NumSatoshis int64 `protobuf:"varint,3,opt,name=num_satoshis,proto3" json:"num_satoshis,omitempty"` - Timestamp int64 `protobuf:"varint,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - Expiry int64 `protobuf:"varint,5,opt,name=expiry,proto3" json:"expiry,omitempty"` - Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"` - DescriptionHash string `protobuf:"bytes,7,opt,name=description_hash,proto3" json:"description_hash,omitempty"` - FallbackAddr string `protobuf:"bytes,8,opt,name=fallback_addr,proto3" json:"fallback_addr,omitempty"` - CltvExpiry int64 `protobuf:"varint,9,opt,name=cltv_expiry,proto3" json:"cltv_expiry,omitempty"` - RouteHints []*RouteHint `protobuf:"bytes,10,rep,name=route_hints,proto3" json:"route_hints,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Destination string `protobuf:"bytes,1,opt,name=destination,proto3" json:"destination,omitempty"` + PaymentHash string `protobuf:"bytes,2,opt,name=payment_hash,json=paymentHash,proto3" json:"payment_hash,omitempty"` + NumSatoshis int64 `protobuf:"varint,3,opt,name=num_satoshis,json=numSatoshis,proto3" json:"num_satoshis,omitempty"` + Timestamp int64 `protobuf:"varint,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Expiry int64 `protobuf:"varint,5,opt,name=expiry,proto3" json:"expiry,omitempty"` + Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"` + DescriptionHash string `protobuf:"bytes,7,opt,name=description_hash,json=descriptionHash,proto3" json:"description_hash,omitempty"` + FallbackAddr string `protobuf:"bytes,8,opt,name=fallback_addr,json=fallbackAddr,proto3" json:"fallback_addr,omitempty"` + CltvExpiry int64 `protobuf:"varint,9,opt,name=cltv_expiry,json=cltvExpiry,proto3" json:"cltv_expiry,omitempty"` + RouteHints []*RouteHint `protobuf:"bytes,10,rep,name=route_hints,json=routeHints,proto3" json:"route_hints,omitempty"` + PaymentAddr []byte `protobuf:"bytes,11,opt,name=payment_addr,json=paymentAddr,proto3" json:"payment_addr,omitempty"` + NumMsat int64 `protobuf:"varint,12,opt,name=num_msat,json=numMsat,proto3" json:"num_msat,omitempty"` + Features map[uint32]*Feature `protobuf:"bytes,13,rep,name=features,proto3" json:"features,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *PayReq) Reset() { *m = PayReq{} } func (m *PayReq) String() string { return proto.CompactTextString(m) } func (*PayReq) ProtoMessage() {} func (*PayReq) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{112} + return fileDescriptor_77a6da22d6a3feb1, []int{131} } func (m *PayReq) XXX_Unmarshal(b []byte) error { @@ -8039,6 +10293,82 @@ func (m *PayReq) GetRouteHints() []*RouteHint { return nil } +func (m *PayReq) GetPaymentAddr() []byte { + if m != nil { + return m.PaymentAddr + } + return nil +} + +func (m *PayReq) GetNumMsat() int64 { + if m != nil { + return m.NumMsat + } + return 0 +} + +func (m *PayReq) GetFeatures() map[uint32]*Feature { + if m != nil { + return m.Features + } + return nil +} + +type Feature struct { + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + IsRequired bool `protobuf:"varint,3,opt,name=is_required,json=isRequired,proto3" json:"is_required,omitempty"` + IsKnown bool `protobuf:"varint,4,opt,name=is_known,json=isKnown,proto3" json:"is_known,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Feature) Reset() { *m = Feature{} } +func (m *Feature) String() string { return proto.CompactTextString(m) } +func (*Feature) ProtoMessage() {} +func (*Feature) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{132} +} + +func (m *Feature) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Feature.Unmarshal(m, b) +} +func (m *Feature) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Feature.Marshal(b, m, deterministic) +} +func (m *Feature) XXX_Merge(src proto.Message) { + xxx_messageInfo_Feature.Merge(m, src) +} +func (m *Feature) XXX_Size() int { + return xxx_messageInfo_Feature.Size(m) +} +func (m *Feature) XXX_DiscardUnknown() { + xxx_messageInfo_Feature.DiscardUnknown(m) +} + +var xxx_messageInfo_Feature proto.InternalMessageInfo + +func (m *Feature) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Feature) GetIsRequired() bool { + if m != nil { + return m.IsRequired + } + return false +} + +func (m *Feature) GetIsKnown() bool { + if m != nil { + return m.IsKnown + } + return false +} + type FeeReportRequest struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -8049,7 +10379,7 @@ func (m *FeeReportRequest) Reset() { *m = FeeReportRequest{} } func (m *FeeReportRequest) String() string { return proto.CompactTextString(m) } func (*FeeReportRequest) ProtoMessage() {} func (*FeeReportRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{113} + return fileDescriptor_77a6da22d6a3feb1, []int{133} } func (m *FeeReportRequest) XXX_Unmarshal(b []byte) error { @@ -8071,14 +10401,18 @@ func (m *FeeReportRequest) XXX_DiscardUnknown() { var xxx_messageInfo_FeeReportRequest proto.InternalMessageInfo type ChannelFeeReport struct { + /// The short channel id that this fee report belongs to. + ChanId uint64 `protobuf:"varint,5,opt,name=chan_id,json=chanId,proto3" json:"chan_id,omitempty"` /// The channel that this fee report belongs to. - ChanPoint string `protobuf:"bytes,1,opt,name=chan_point,json=channel_point,proto3" json:"chan_point,omitempty"` + ChannelPoint string `protobuf:"bytes,1,opt,name=channel_point,json=channelPoint,proto3" json:"channel_point,omitempty"` /// The base fee charged regardless of the number of milli-satoshis sent. - BaseFeeMsat int64 `protobuf:"varint,2,opt,name=base_fee_msat,proto3" json:"base_fee_msat,omitempty"` - /// The amount charged per milli-satoshis transferred expressed in millionths of a satoshi. - FeePerMil int64 `protobuf:"varint,3,opt,name=fee_per_mil,proto3" json:"fee_per_mil,omitempty"` - /// The effective fee rate in milli-satoshis. Computed by dividing the fee_per_mil value by 1 million. - FeeRate float64 `protobuf:"fixed64,4,opt,name=fee_rate,proto3" json:"fee_rate,omitempty"` + BaseFeeMsat int64 `protobuf:"varint,2,opt,name=base_fee_msat,json=baseFeeMsat,proto3" json:"base_fee_msat,omitempty"` + /// The amount charged per milli-satoshis transferred expressed in + /// millionths of a satoshi. + FeePerMil int64 `protobuf:"varint,3,opt,name=fee_per_mil,json=feePerMil,proto3" json:"fee_per_mil,omitempty"` + /// The effective fee rate in milli-satoshis. Computed by dividing the + /// fee_per_mil value by 1 million. + FeeRate float64 `protobuf:"fixed64,4,opt,name=fee_rate,json=feeRate,proto3" json:"fee_rate,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -8088,7 +10422,7 @@ func (m *ChannelFeeReport) Reset() { *m = ChannelFeeReport{} } func (m *ChannelFeeReport) String() string { return proto.CompactTextString(m) } func (*ChannelFeeReport) ProtoMessage() {} func (*ChannelFeeReport) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{114} + return fileDescriptor_77a6da22d6a3feb1, []int{134} } func (m *ChannelFeeReport) XXX_Unmarshal(b []byte) error { @@ -8109,9 +10443,16 @@ func (m *ChannelFeeReport) XXX_DiscardUnknown() { var xxx_messageInfo_ChannelFeeReport proto.InternalMessageInfo -func (m *ChannelFeeReport) GetChanPoint() string { +func (m *ChannelFeeReport) GetChanId() uint64 { if m != nil { - return m.ChanPoint + return m.ChanId + } + return 0 +} + +func (m *ChannelFeeReport) GetChannelPoint() string { + if m != nil { + return m.ChannelPoint } return "" } @@ -8138,14 +10479,18 @@ func (m *ChannelFeeReport) GetFeeRate() float64 { } type FeeReportResponse struct { - /// An array of channel fee reports which describes the current fee schedule for each channel. - ChannelFees []*ChannelFeeReport `protobuf:"bytes,1,rep,name=channel_fees,proto3" json:"channel_fees,omitempty"` - /// The total amount of fee revenue (in satoshis) the switch has collected over the past 24 hrs. - DayFeeSum uint64 `protobuf:"varint,2,opt,name=day_fee_sum,proto3" json:"day_fee_sum,omitempty"` - /// The total amount of fee revenue (in satoshis) the switch has collected over the past 1 week. - WeekFeeSum uint64 `protobuf:"varint,3,opt,name=week_fee_sum,proto3" json:"week_fee_sum,omitempty"` - /// The total amount of fee revenue (in satoshis) the switch has collected over the past 1 month. - MonthFeeSum uint64 `protobuf:"varint,4,opt,name=month_fee_sum,proto3" json:"month_fee_sum,omitempty"` + /// An array of channel fee reports which describes the current fee schedule + /// for each channel. + ChannelFees []*ChannelFeeReport `protobuf:"bytes,1,rep,name=channel_fees,json=channelFees,proto3" json:"channel_fees,omitempty"` + /// The total amount of fee revenue (in satoshis) the switch has collected + /// over the past 24 hrs. + DayFeeSum uint64 `protobuf:"varint,2,opt,name=day_fee_sum,json=dayFeeSum,proto3" json:"day_fee_sum,omitempty"` + /// The total amount of fee revenue (in satoshis) the switch has collected + /// over the past 1 week. + WeekFeeSum uint64 `protobuf:"varint,3,opt,name=week_fee_sum,json=weekFeeSum,proto3" json:"week_fee_sum,omitempty"` + /// The total amount of fee revenue (in satoshis) the switch has collected + /// over the past 1 month. + MonthFeeSum uint64 `protobuf:"varint,4,opt,name=month_fee_sum,json=monthFeeSum,proto3" json:"month_fee_sum,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -8155,7 +10500,7 @@ func (m *FeeReportResponse) Reset() { *m = FeeReportResponse{} } func (m *FeeReportResponse) String() string { return proto.CompactTextString(m) } func (*FeeReportResponse) ProtoMessage() {} func (*FeeReportResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{115} + return fileDescriptor_77a6da22d6a3feb1, []int{135} } func (m *FeeReportResponse) XXX_Unmarshal(b []byte) error { @@ -8210,13 +10555,20 @@ type PolicyUpdateRequest struct { // *PolicyUpdateRequest_ChanPoint Scope isPolicyUpdateRequest_Scope `protobuf_oneof:"scope"` /// The base fee charged regardless of the number of milli-satoshis sent. - BaseFeeMsat int64 `protobuf:"varint,3,opt,name=base_fee_msat,proto3" json:"base_fee_msat,omitempty"` - /// The effective fee rate in milli-satoshis. The precision of this value goes up to 6 decimal places, so 1e-6. - FeeRate float64 `protobuf:"fixed64,4,opt,name=fee_rate,proto3" json:"fee_rate,omitempty"` + BaseFeeMsat int64 `protobuf:"varint,3,opt,name=base_fee_msat,json=baseFeeMsat,proto3" json:"base_fee_msat,omitempty"` + /// The effective fee rate in milli-satoshis. The precision of this value + /// goes up to 6 decimal places, so 1e-6. + FeeRate float64 `protobuf:"fixed64,4,opt,name=fee_rate,json=feeRate,proto3" json:"fee_rate,omitempty"` /// The required timelock delta for HTLCs forwarded over the channel. - TimeLockDelta uint32 `protobuf:"varint,5,opt,name=time_lock_delta,proto3" json:"time_lock_delta,omitempty"` - /// If set, the maximum HTLC size in milli-satoshis. If unset, the maximum HTLC will be unchanged. - MaxHtlcMsat uint64 `protobuf:"varint,6,opt,name=max_htlc_msat,proto3" json:"max_htlc_msat,omitempty"` + TimeLockDelta uint32 `protobuf:"varint,5,opt,name=time_lock_delta,json=timeLockDelta,proto3" json:"time_lock_delta,omitempty"` + /// If set, the maximum HTLC size in milli-satoshis. If unset, the maximum + /// HTLC will be unchanged. + MaxHtlcMsat uint64 `protobuf:"varint,6,opt,name=max_htlc_msat,json=maxHtlcMsat,proto3" json:"max_htlc_msat,omitempty"` + /// The minimum HTLC size in milli-satoshis. Only applied if + /// min_htlc_msat_specified is true. + MinHtlcMsat uint64 `protobuf:"varint,7,opt,name=min_htlc_msat,json=minHtlcMsat,proto3" json:"min_htlc_msat,omitempty"` + /// If true, min_htlc_msat is applied. + MinHtlcMsatSpecified bool `protobuf:"varint,8,opt,name=min_htlc_msat_specified,json=minHtlcMsatSpecified,proto3" json:"min_htlc_msat_specified,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -8226,7 +10578,7 @@ func (m *PolicyUpdateRequest) Reset() { *m = PolicyUpdateRequest{} } func (m *PolicyUpdateRequest) String() string { return proto.CompactTextString(m) } func (*PolicyUpdateRequest) ProtoMessage() {} func (*PolicyUpdateRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{116} + return fileDescriptor_77a6da22d6a3feb1, []int{136} } func (m *PolicyUpdateRequest) XXX_Unmarshal(b []byte) error { @@ -8256,7 +10608,7 @@ type PolicyUpdateRequest_Global struct { } type PolicyUpdateRequest_ChanPoint struct { - ChanPoint *ChannelPoint `protobuf:"bytes,2,opt,name=chan_point,proto3,oneof"` + ChanPoint *ChannelPoint `protobuf:"bytes,2,opt,name=chan_point,json=chanPoint,proto3,oneof"` } func (*PolicyUpdateRequest_Global) isPolicyUpdateRequest_Scope() {} @@ -8312,6 +10664,20 @@ func (m *PolicyUpdateRequest) GetMaxHtlcMsat() uint64 { return 0 } +func (m *PolicyUpdateRequest) GetMinHtlcMsat() uint64 { + if m != nil { + return m.MinHtlcMsat + } + return 0 +} + +func (m *PolicyUpdateRequest) GetMinHtlcMsatSpecified() bool { + if m != nil { + return m.MinHtlcMsatSpecified + } + return false +} + // XXX_OneofWrappers is for the internal use of the proto package. func (*PolicyUpdateRequest) XXX_OneofWrappers() []interface{} { return []interface{}{ @@ -8330,7 +10696,7 @@ func (m *PolicyUpdateResponse) Reset() { *m = PolicyUpdateResponse{} } func (m *PolicyUpdateResponse) String() string { return proto.CompactTextString(m) } func (*PolicyUpdateResponse) ProtoMessage() {} func (*PolicyUpdateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{117} + return fileDescriptor_77a6da22d6a3feb1, []int{137} } func (m *PolicyUpdateResponse) XXX_Unmarshal(b []byte) error { @@ -8352,14 +10718,20 @@ func (m *PolicyUpdateResponse) XXX_DiscardUnknown() { var xxx_messageInfo_PolicyUpdateResponse proto.InternalMessageInfo type ForwardingHistoryRequest struct { - /// Start time is the starting point of the forwarding history request. All records beyond this point will be included, respecting the end time, and the index offset. - StartTime uint64 `protobuf:"varint,1,opt,name=start_time,proto3" json:"start_time,omitempty"` - /// End time is the end point of the forwarding history request. The response will carry at most 50k records between the start time and the end time. The index offset can be used to implement pagination. - EndTime uint64 `protobuf:"varint,2,opt,name=end_time,proto3" json:"end_time,omitempty"` - /// Index offset is the offset in the time series to start at. As each response can only contain 50k records, callers can use this to skip around within a packed time series. - IndexOffset uint32 `protobuf:"varint,3,opt,name=index_offset,proto3" json:"index_offset,omitempty"` + /// Start time is the starting point of the forwarding history request. All + /// records beyond this point will be included, respecting the end time, and + /// the index offset. + StartTime uint64 `protobuf:"varint,1,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + /// End time is the end point of the forwarding history request. The + /// response will carry at most 50k records between the start time and the + /// end time. The index offset can be used to implement pagination. + EndTime uint64 `protobuf:"varint,2,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` + /// Index offset is the offset in the time series to start at. As each + /// response can only contain 50k records, callers can use this to skip + /// around within a packed time series. + IndexOffset uint32 `protobuf:"varint,3,opt,name=index_offset,json=indexOffset,proto3" json:"index_offset,omitempty"` /// The max number of events to return in the response to this query. - NumMaxEvents uint32 `protobuf:"varint,4,opt,name=num_max_events,proto3" json:"num_max_events,omitempty"` + NumMaxEvents uint32 `protobuf:"varint,4,opt,name=num_max_events,json=numMaxEvents,proto3" json:"num_max_events,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -8369,7 +10741,7 @@ func (m *ForwardingHistoryRequest) Reset() { *m = ForwardingHistoryReque func (m *ForwardingHistoryRequest) String() string { return proto.CompactTextString(m) } func (*ForwardingHistoryRequest) ProtoMessage() {} func (*ForwardingHistoryRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{118} + return fileDescriptor_77a6da22d6a3feb1, []int{138} } func (m *ForwardingHistoryRequest) XXX_Unmarshal(b []byte) error { @@ -8419,20 +10791,30 @@ func (m *ForwardingHistoryRequest) GetNumMaxEvents() uint32 { } type ForwardingEvent struct { - /// Timestamp is the time (unix epoch offset) that this circuit was completed. + /// Timestamp is the time (unix epoch offset) that this circuit was + /// completed. Timestamp uint64 `protobuf:"varint,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` /// The incoming channel ID that carried the HTLC that created the circuit. - ChanIdIn uint64 `protobuf:"varint,2,opt,name=chan_id_in,proto3" json:"chan_id_in,omitempty"` - /// The outgoing channel ID that carried the preimage that completed the circuit. - ChanIdOut uint64 `protobuf:"varint,4,opt,name=chan_id_out,proto3" json:"chan_id_out,omitempty"` - /// The total amount (in satoshis) of the incoming HTLC that created half the circuit. - AmtIn uint64 `protobuf:"varint,5,opt,name=amt_in,proto3" json:"amt_in,omitempty"` - /// The total amount (in satoshis) of the outgoing HTLC that created the second half of the circuit. - AmtOut uint64 `protobuf:"varint,6,opt,name=amt_out,proto3" json:"amt_out,omitempty"` + ChanIdIn uint64 `protobuf:"varint,2,opt,name=chan_id_in,json=chanIdIn,proto3" json:"chan_id_in,omitempty"` + /// The outgoing channel ID that carried the preimage that completed the + /// circuit. + ChanIdOut uint64 `protobuf:"varint,4,opt,name=chan_id_out,json=chanIdOut,proto3" json:"chan_id_out,omitempty"` + /// The total amount (in satoshis) of the incoming HTLC that created half + /// the circuit. + AmtIn uint64 `protobuf:"varint,5,opt,name=amt_in,json=amtIn,proto3" json:"amt_in,omitempty"` + /// The total amount (in satoshis) of the outgoing HTLC that created the + /// second half of the circuit. + AmtOut uint64 `protobuf:"varint,6,opt,name=amt_out,json=amtOut,proto3" json:"amt_out,omitempty"` /// The total fee (in satoshis) that this payment circuit carried. Fee uint64 `protobuf:"varint,7,opt,name=fee,proto3" json:"fee,omitempty"` /// The total fee (in milli-satoshis) that this payment circuit carried. - FeeMsat uint64 `protobuf:"varint,8,opt,name=fee_msat,proto3" json:"fee_msat,omitempty"` + FeeMsat uint64 `protobuf:"varint,8,opt,name=fee_msat,json=feeMsat,proto3" json:"fee_msat,omitempty"` + /// The total amount (in milli-satoshis) of the incoming HTLC that created + /// half the circuit. + AmtInMsat uint64 `protobuf:"varint,9,opt,name=amt_in_msat,json=amtInMsat,proto3" json:"amt_in_msat,omitempty"` + /// The total amount (in milli-satoshis) of the outgoing HTLC that created + /// the second half of the circuit. + AmtOutMsat uint64 `protobuf:"varint,10,opt,name=amt_out_msat,json=amtOutMsat,proto3" json:"amt_out_msat,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -8442,7 +10824,7 @@ func (m *ForwardingEvent) Reset() { *m = ForwardingEvent{} } func (m *ForwardingEvent) String() string { return proto.CompactTextString(m) } func (*ForwardingEvent) ProtoMessage() {} func (*ForwardingEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{119} + return fileDescriptor_77a6da22d6a3feb1, []int{139} } func (m *ForwardingEvent) XXX_Unmarshal(b []byte) error { @@ -8512,11 +10894,27 @@ func (m *ForwardingEvent) GetFeeMsat() uint64 { return 0 } +func (m *ForwardingEvent) GetAmtInMsat() uint64 { + if m != nil { + return m.AmtInMsat + } + return 0 +} + +func (m *ForwardingEvent) GetAmtOutMsat() uint64 { + if m != nil { + return m.AmtOutMsat + } + return 0 +} + type ForwardingHistoryResponse struct { - /// A list of forwarding events from the time slice of the time series specified in the request. - ForwardingEvents []*ForwardingEvent `protobuf:"bytes,1,rep,name=forwarding_events,proto3" json:"forwarding_events,omitempty"` - /// The index of the last time in the set of returned forwarding events. Can be used to seek further, pagination style. - LastOffsetIndex uint32 `protobuf:"varint,2,opt,name=last_offset_index,proto3" json:"last_offset_index,omitempty"` + /// A list of forwarding events from the time slice of the time series + /// specified in the request. + ForwardingEvents []*ForwardingEvent `protobuf:"bytes,1,rep,name=forwarding_events,json=forwardingEvents,proto3" json:"forwarding_events,omitempty"` + /// The index of the last time in the set of returned forwarding events. Can + /// be used to seek further, pagination style. + LastOffsetIndex uint32 `protobuf:"varint,2,opt,name=last_offset_index,json=lastOffsetIndex,proto3" json:"last_offset_index,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -8526,7 +10924,7 @@ func (m *ForwardingHistoryResponse) Reset() { *m = ForwardingHistoryResp func (m *ForwardingHistoryResponse) String() string { return proto.CompactTextString(m) } func (*ForwardingHistoryResponse) ProtoMessage() {} func (*ForwardingHistoryResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{120} + return fileDescriptor_77a6da22d6a3feb1, []int{140} } func (m *ForwardingHistoryResponse) XXX_Unmarshal(b []byte) error { @@ -8573,7 +10971,7 @@ func (m *ExportChannelBackupRequest) Reset() { *m = ExportChannelBackupR func (m *ExportChannelBackupRequest) String() string { return proto.CompactTextString(m) } func (*ExportChannelBackupRequest) ProtoMessage() {} func (*ExportChannelBackupRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{121} + return fileDescriptor_77a6da22d6a3feb1, []int{141} } func (m *ExportChannelBackupRequest) XXX_Unmarshal(b []byte) error { @@ -8604,12 +11002,13 @@ func (m *ExportChannelBackupRequest) GetChanPoint() *ChannelPoint { type ChannelBackup struct { //* //Identifies the channel that this backup belongs to. - ChanPoint *ChannelPoint `protobuf:"bytes,1,opt,name=chan_point,proto3" json:"chan_point,omitempty"` + ChanPoint *ChannelPoint `protobuf:"bytes,1,opt,name=chan_point,json=chanPoint,proto3" json:"chan_point,omitempty"` //* //Is an encrypted single-chan backup. this can be passed to //RestoreChannelBackups, or the WalletUnlocker Init and Unlock methods in - //order to trigger the recovery protocol. - ChanBackup []byte `protobuf:"bytes,2,opt,name=chan_backup,proto3" json:"chan_backup,omitempty"` + //order to trigger the recovery protocol. When using REST, this field must be + //encoded as base64. + ChanBackup []byte `protobuf:"bytes,2,opt,name=chan_backup,json=chanBackup,proto3" json:"chan_backup,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -8619,7 +11018,7 @@ func (m *ChannelBackup) Reset() { *m = ChannelBackup{} } func (m *ChannelBackup) String() string { return proto.CompactTextString(m) } func (*ChannelBackup) ProtoMessage() {} func (*ChannelBackup) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{122} + return fileDescriptor_77a6da22d6a3feb1, []int{142} } func (m *ChannelBackup) XXX_Unmarshal(b []byte) error { @@ -8657,12 +11056,13 @@ func (m *ChannelBackup) GetChanBackup() []byte { type MultiChanBackup struct { //* //Is the set of all channels that are included in this multi-channel backup. - ChanPoints []*ChannelPoint `protobuf:"bytes,1,rep,name=chan_points,proto3" json:"chan_points,omitempty"` + ChanPoints []*ChannelPoint `protobuf:"bytes,1,rep,name=chan_points,json=chanPoints,proto3" json:"chan_points,omitempty"` //* //A single encrypted blob containing all the static channel backups of the //channel listed above. This can be stored as a single file or blob, and - //safely be replaced with any prior/future versions. - MultiChanBackup []byte `protobuf:"bytes,2,opt,name=multi_chan_backup,proto3" json:"multi_chan_backup,omitempty"` + //safely be replaced with any prior/future versions. When using REST, this + //field must be encoded as base64. + MultiChanBackup []byte `protobuf:"bytes,2,opt,name=multi_chan_backup,json=multiChanBackup,proto3" json:"multi_chan_backup,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -8672,7 +11072,7 @@ func (m *MultiChanBackup) Reset() { *m = MultiChanBackup{} } func (m *MultiChanBackup) String() string { return proto.CompactTextString(m) } func (*MultiChanBackup) ProtoMessage() {} func (*MultiChanBackup) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{123} + return fileDescriptor_77a6da22d6a3feb1, []int{143} } func (m *MultiChanBackup) XXX_Unmarshal(b []byte) error { @@ -8717,7 +11117,7 @@ func (m *ChanBackupExportRequest) Reset() { *m = ChanBackupExportRequest func (m *ChanBackupExportRequest) String() string { return proto.CompactTextString(m) } func (*ChanBackupExportRequest) ProtoMessage() {} func (*ChanBackupExportRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{124} + return fileDescriptor_77a6da22d6a3feb1, []int{144} } func (m *ChanBackupExportRequest) XXX_Unmarshal(b []byte) error { @@ -8742,11 +11142,11 @@ type ChanBackupSnapshot struct { //* //The set of new channels that have been added since the last channel backup //snapshot was requested. - SingleChanBackups *ChannelBackups `protobuf:"bytes,1,opt,name=single_chan_backups,proto3" json:"single_chan_backups,omitempty"` + SingleChanBackups *ChannelBackups `protobuf:"bytes,1,opt,name=single_chan_backups,json=singleChanBackups,proto3" json:"single_chan_backups,omitempty"` //* //A multi-channel backup that covers all open channels currently known to //lnd. - MultiChanBackup *MultiChanBackup `protobuf:"bytes,2,opt,name=multi_chan_backup,proto3" json:"multi_chan_backup,omitempty"` + MultiChanBackup *MultiChanBackup `protobuf:"bytes,2,opt,name=multi_chan_backup,json=multiChanBackup,proto3" json:"multi_chan_backup,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -8756,7 +11156,7 @@ func (m *ChanBackupSnapshot) Reset() { *m = ChanBackupSnapshot{} } func (m *ChanBackupSnapshot) String() string { return proto.CompactTextString(m) } func (*ChanBackupSnapshot) ProtoMessage() {} func (*ChanBackupSnapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{125} + return fileDescriptor_77a6da22d6a3feb1, []int{145} } func (m *ChanBackupSnapshot) XXX_Unmarshal(b []byte) error { @@ -8794,7 +11194,7 @@ func (m *ChanBackupSnapshot) GetMultiChanBackup() *MultiChanBackup { type ChannelBackups struct { //* //A set of single-chan static channel backups. - ChanBackups []*ChannelBackup `protobuf:"bytes,1,rep,name=chan_backups,proto3" json:"chan_backups,omitempty"` + ChanBackups []*ChannelBackup `protobuf:"bytes,1,rep,name=chan_backups,json=chanBackups,proto3" json:"chan_backups,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -8804,7 +11204,7 @@ func (m *ChannelBackups) Reset() { *m = ChannelBackups{} } func (m *ChannelBackups) String() string { return proto.CompactTextString(m) } func (*ChannelBackups) ProtoMessage() {} func (*ChannelBackups) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{126} + return fileDescriptor_77a6da22d6a3feb1, []int{146} } func (m *ChannelBackups) XXX_Unmarshal(b []byte) error { @@ -8846,7 +11246,7 @@ func (m *RestoreChanBackupRequest) Reset() { *m = RestoreChanBackupReque func (m *RestoreChanBackupRequest) String() string { return proto.CompactTextString(m) } func (*RestoreChanBackupRequest) ProtoMessage() {} func (*RestoreChanBackupRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{127} + return fileDescriptor_77a6da22d6a3feb1, []int{147} } func (m *RestoreChanBackupRequest) XXX_Unmarshal(b []byte) error { @@ -8872,11 +11272,11 @@ type isRestoreChanBackupRequest_Backup interface { } type RestoreChanBackupRequest_ChanBackups struct { - ChanBackups *ChannelBackups `protobuf:"bytes,1,opt,name=chan_backups,proto3,oneof"` + ChanBackups *ChannelBackups `protobuf:"bytes,1,opt,name=chan_backups,json=chanBackups,proto3,oneof"` } type RestoreChanBackupRequest_MultiChanBackup struct { - MultiChanBackup []byte `protobuf:"bytes,2,opt,name=multi_chan_backup,proto3,oneof"` + MultiChanBackup []byte `protobuf:"bytes,2,opt,name=multi_chan_backup,json=multiChanBackup,proto3,oneof"` } func (*RestoreChanBackupRequest_ChanBackups) isRestoreChanBackupRequest_Backup() {} @@ -8885,134 +11285,549 @@ func (*RestoreChanBackupRequest_MultiChanBackup) isRestoreChanBackupRequest_Back func (m *RestoreChanBackupRequest) GetBackup() isRestoreChanBackupRequest_Backup { if m != nil { - return m.Backup + return m.Backup + } + return nil +} + +func (m *RestoreChanBackupRequest) GetChanBackups() *ChannelBackups { + if x, ok := m.GetBackup().(*RestoreChanBackupRequest_ChanBackups); ok { + return x.ChanBackups + } + return nil +} + +func (m *RestoreChanBackupRequest) GetMultiChanBackup() []byte { + if x, ok := m.GetBackup().(*RestoreChanBackupRequest_MultiChanBackup); ok { + return x.MultiChanBackup + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*RestoreChanBackupRequest) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*RestoreChanBackupRequest_ChanBackups)(nil), + (*RestoreChanBackupRequest_MultiChanBackup)(nil), + } +} + +type RestoreBackupResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RestoreBackupResponse) Reset() { *m = RestoreBackupResponse{} } +func (m *RestoreBackupResponse) String() string { return proto.CompactTextString(m) } +func (*RestoreBackupResponse) ProtoMessage() {} +func (*RestoreBackupResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{148} +} + +func (m *RestoreBackupResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RestoreBackupResponse.Unmarshal(m, b) +} +func (m *RestoreBackupResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RestoreBackupResponse.Marshal(b, m, deterministic) +} +func (m *RestoreBackupResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RestoreBackupResponse.Merge(m, src) +} +func (m *RestoreBackupResponse) XXX_Size() int { + return xxx_messageInfo_RestoreBackupResponse.Size(m) +} +func (m *RestoreBackupResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RestoreBackupResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RestoreBackupResponse proto.InternalMessageInfo + +type ChannelBackupSubscription struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ChannelBackupSubscription) Reset() { *m = ChannelBackupSubscription{} } +func (m *ChannelBackupSubscription) String() string { return proto.CompactTextString(m) } +func (*ChannelBackupSubscription) ProtoMessage() {} +func (*ChannelBackupSubscription) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{149} +} + +func (m *ChannelBackupSubscription) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ChannelBackupSubscription.Unmarshal(m, b) +} +func (m *ChannelBackupSubscription) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ChannelBackupSubscription.Marshal(b, m, deterministic) +} +func (m *ChannelBackupSubscription) XXX_Merge(src proto.Message) { + xxx_messageInfo_ChannelBackupSubscription.Merge(m, src) +} +func (m *ChannelBackupSubscription) XXX_Size() int { + return xxx_messageInfo_ChannelBackupSubscription.Size(m) +} +func (m *ChannelBackupSubscription) XXX_DiscardUnknown() { + xxx_messageInfo_ChannelBackupSubscription.DiscardUnknown(m) +} + +var xxx_messageInfo_ChannelBackupSubscription proto.InternalMessageInfo + +type VerifyChanBackupResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VerifyChanBackupResponse) Reset() { *m = VerifyChanBackupResponse{} } +func (m *VerifyChanBackupResponse) String() string { return proto.CompactTextString(m) } +func (*VerifyChanBackupResponse) ProtoMessage() {} +func (*VerifyChanBackupResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{150} +} + +func (m *VerifyChanBackupResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VerifyChanBackupResponse.Unmarshal(m, b) +} +func (m *VerifyChanBackupResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VerifyChanBackupResponse.Marshal(b, m, deterministic) +} +func (m *VerifyChanBackupResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_VerifyChanBackupResponse.Merge(m, src) +} +func (m *VerifyChanBackupResponse) XXX_Size() int { + return xxx_messageInfo_VerifyChanBackupResponse.Size(m) +} +func (m *VerifyChanBackupResponse) XXX_DiscardUnknown() { + xxx_messageInfo_VerifyChanBackupResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_VerifyChanBackupResponse proto.InternalMessageInfo + +type MacaroonPermission struct { + /// The entity a permission grants access to. + Entity string `protobuf:"bytes,1,opt,name=entity,proto3" json:"entity,omitempty"` + /// The action that is granted. + Action string `protobuf:"bytes,2,opt,name=action,proto3" json:"action,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MacaroonPermission) Reset() { *m = MacaroonPermission{} } +func (m *MacaroonPermission) String() string { return proto.CompactTextString(m) } +func (*MacaroonPermission) ProtoMessage() {} +func (*MacaroonPermission) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{151} +} + +func (m *MacaroonPermission) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MacaroonPermission.Unmarshal(m, b) +} +func (m *MacaroonPermission) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MacaroonPermission.Marshal(b, m, deterministic) +} +func (m *MacaroonPermission) XXX_Merge(src proto.Message) { + xxx_messageInfo_MacaroonPermission.Merge(m, src) +} +func (m *MacaroonPermission) XXX_Size() int { + return xxx_messageInfo_MacaroonPermission.Size(m) +} +func (m *MacaroonPermission) XXX_DiscardUnknown() { + xxx_messageInfo_MacaroonPermission.DiscardUnknown(m) +} + +var xxx_messageInfo_MacaroonPermission proto.InternalMessageInfo + +func (m *MacaroonPermission) GetEntity() string { + if m != nil { + return m.Entity + } + return "" +} + +func (m *MacaroonPermission) GetAction() string { + if m != nil { + return m.Action + } + return "" +} + +type BakeMacaroonRequest struct { + /// The list of permissions the new macaroon should grant. + Permissions []*MacaroonPermission `protobuf:"bytes,1,rep,name=permissions,proto3" json:"permissions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BakeMacaroonRequest) Reset() { *m = BakeMacaroonRequest{} } +func (m *BakeMacaroonRequest) String() string { return proto.CompactTextString(m) } +func (*BakeMacaroonRequest) ProtoMessage() {} +func (*BakeMacaroonRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{152} +} + +func (m *BakeMacaroonRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BakeMacaroonRequest.Unmarshal(m, b) +} +func (m *BakeMacaroonRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BakeMacaroonRequest.Marshal(b, m, deterministic) +} +func (m *BakeMacaroonRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BakeMacaroonRequest.Merge(m, src) +} +func (m *BakeMacaroonRequest) XXX_Size() int { + return xxx_messageInfo_BakeMacaroonRequest.Size(m) +} +func (m *BakeMacaroonRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BakeMacaroonRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_BakeMacaroonRequest proto.InternalMessageInfo + +func (m *BakeMacaroonRequest) GetPermissions() []*MacaroonPermission { + if m != nil { + return m.Permissions + } + return nil +} + +type BakeMacaroonResponse struct { + /// The hex encoded macaroon, serialized in binary format. + Macaroon string `protobuf:"bytes,1,opt,name=macaroon,proto3" json:"macaroon,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BakeMacaroonResponse) Reset() { *m = BakeMacaroonResponse{} } +func (m *BakeMacaroonResponse) String() string { return proto.CompactTextString(m) } +func (*BakeMacaroonResponse) ProtoMessage() {} +func (*BakeMacaroonResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{153} +} + +func (m *BakeMacaroonResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BakeMacaroonResponse.Unmarshal(m, b) +} +func (m *BakeMacaroonResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BakeMacaroonResponse.Marshal(b, m, deterministic) +} +func (m *BakeMacaroonResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_BakeMacaroonResponse.Merge(m, src) +} +func (m *BakeMacaroonResponse) XXX_Size() int { + return xxx_messageInfo_BakeMacaroonResponse.Size(m) +} +func (m *BakeMacaroonResponse) XXX_DiscardUnknown() { + xxx_messageInfo_BakeMacaroonResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_BakeMacaroonResponse proto.InternalMessageInfo + +func (m *BakeMacaroonResponse) GetMacaroon() string { + if m != nil { + return m.Macaroon + } + return "" +} + +type Failure struct { + /// Failure code as defined in the Lightning spec + Code Failure_FailureCode `protobuf:"varint,1,opt,name=code,proto3,enum=lnrpc.Failure_FailureCode" json:"code,omitempty"` + /// An optional channel update message. + ChannelUpdate *ChannelUpdate `protobuf:"bytes,3,opt,name=channel_update,json=channelUpdate,proto3" json:"channel_update,omitempty"` + /// A failure type-dependent htlc value. + HtlcMsat uint64 `protobuf:"varint,4,opt,name=htlc_msat,json=htlcMsat,proto3" json:"htlc_msat,omitempty"` + /// The sha256 sum of the onion payload. + OnionSha_256 []byte `protobuf:"bytes,5,opt,name=onion_sha_256,json=onionSha256,proto3" json:"onion_sha_256,omitempty"` + /// A failure type-dependent cltv expiry value. + CltvExpiry uint32 `protobuf:"varint,6,opt,name=cltv_expiry,json=cltvExpiry,proto3" json:"cltv_expiry,omitempty"` + /// A failure type-dependent flags value. + Flags uint32 `protobuf:"varint,7,opt,name=flags,proto3" json:"flags,omitempty"` + //* + //The position in the path of the intermediate or final node that generated + //the failure message. Position zero is the sender node. + FailureSourceIndex uint32 `protobuf:"varint,8,opt,name=failure_source_index,json=failureSourceIndex,proto3" json:"failure_source_index,omitempty"` + /// A failure type-dependent block height. + Height uint32 `protobuf:"varint,9,opt,name=height,proto3" json:"height,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Failure) Reset() { *m = Failure{} } +func (m *Failure) String() string { return proto.CompactTextString(m) } +func (*Failure) ProtoMessage() {} +func (*Failure) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{154} +} + +func (m *Failure) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Failure.Unmarshal(m, b) +} +func (m *Failure) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Failure.Marshal(b, m, deterministic) +} +func (m *Failure) XXX_Merge(src proto.Message) { + xxx_messageInfo_Failure.Merge(m, src) +} +func (m *Failure) XXX_Size() int { + return xxx_messageInfo_Failure.Size(m) +} +func (m *Failure) XXX_DiscardUnknown() { + xxx_messageInfo_Failure.DiscardUnknown(m) +} + +var xxx_messageInfo_Failure proto.InternalMessageInfo + +func (m *Failure) GetCode() Failure_FailureCode { + if m != nil { + return m.Code + } + return Failure_RESERVED +} + +func (m *Failure) GetChannelUpdate() *ChannelUpdate { + if m != nil { + return m.ChannelUpdate + } + return nil +} + +func (m *Failure) GetHtlcMsat() uint64 { + if m != nil { + return m.HtlcMsat + } + return 0 +} + +func (m *Failure) GetOnionSha_256() []byte { + if m != nil { + return m.OnionSha_256 + } + return nil +} + +func (m *Failure) GetCltvExpiry() uint32 { + if m != nil { + return m.CltvExpiry } - return nil + return 0 } -func (m *RestoreChanBackupRequest) GetChanBackups() *ChannelBackups { - if x, ok := m.GetBackup().(*RestoreChanBackupRequest_ChanBackups); ok { - return x.ChanBackups +func (m *Failure) GetFlags() uint32 { + if m != nil { + return m.Flags } - return nil + return 0 } -func (m *RestoreChanBackupRequest) GetMultiChanBackup() []byte { - if x, ok := m.GetBackup().(*RestoreChanBackupRequest_MultiChanBackup); ok { - return x.MultiChanBackup +func (m *Failure) GetFailureSourceIndex() uint32 { + if m != nil { + return m.FailureSourceIndex } - return nil + return 0 } -// XXX_OneofWrappers is for the internal use of the proto package. -func (*RestoreChanBackupRequest) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*RestoreChanBackupRequest_ChanBackups)(nil), - (*RestoreChanBackupRequest_MultiChanBackup)(nil), +func (m *Failure) GetHeight() uint32 { + if m != nil { + return m.Height } + return 0 } -type RestoreBackupResponse struct { +type ChannelUpdate struct { + //* + //The signature that validates the announced data and proves the ownership + //of node id. + Signature []byte `protobuf:"bytes,1,opt,name=signature,proto3" json:"signature,omitempty"` + //* + //The target chain that this channel was opened within. This value + //should be the genesis hash of the target chain. Along with the short + //channel ID, this uniquely identifies the channel globally in a + //blockchain. + ChainHash []byte `protobuf:"bytes,2,opt,name=chain_hash,json=chainHash,proto3" json:"chain_hash,omitempty"` + //* + //The unique description of the funding transaction. + ChanId uint64 `protobuf:"varint,3,opt,name=chan_id,json=chanId,proto3" json:"chan_id,omitempty"` + //* + //A timestamp that allows ordering in the case of multiple announcements. + //We should ignore the message if timestamp is not greater than the + //last-received. + Timestamp uint32 `protobuf:"varint,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + //* + //The bitfield that describes whether optional fields are present in this + //update. Currently, the least-significant bit must be set to 1 if the + //optional field MaxHtlc is present. + MessageFlags uint32 `protobuf:"varint,10,opt,name=message_flags,json=messageFlags,proto3" json:"message_flags,omitempty"` + //* + //The bitfield that describes additional meta-data concerning how the + //update is to be interpreted. Currently, the least-significant bit must be + //set to 0 if the creating node corresponds to the first node in the + //previously sent channel announcement and 1 otherwise. If the second bit + //is set, then the channel is set to be disabled. + ChannelFlags uint32 `protobuf:"varint,5,opt,name=channel_flags,json=channelFlags,proto3" json:"channel_flags,omitempty"` + //* + //The minimum number of blocks this node requires to be added to the expiry + //of HTLCs. This is a security parameter determined by the node operator. + //This value represents the required gap between the time locks of the + //incoming and outgoing HTLC's set to this node. + TimeLockDelta uint32 `protobuf:"varint,6,opt,name=time_lock_delta,json=timeLockDelta,proto3" json:"time_lock_delta,omitempty"` + //* + //The minimum HTLC value which will be accepted. + HtlcMinimumMsat uint64 `protobuf:"varint,7,opt,name=htlc_minimum_msat,json=htlcMinimumMsat,proto3" json:"htlc_minimum_msat,omitempty"` + //* + //The base fee that must be used for incoming HTLC's to this particular + //channel. This value will be tacked onto the required for a payment + //independent of the size of the payment. + BaseFee uint32 `protobuf:"varint,8,opt,name=base_fee,json=baseFee,proto3" json:"base_fee,omitempty"` + //* + //The fee rate that will be charged per millionth of a satoshi. + FeeRate uint32 `protobuf:"varint,9,opt,name=fee_rate,json=feeRate,proto3" json:"fee_rate,omitempty"` + //* + //The maximum HTLC value which will be accepted. + HtlcMaximumMsat uint64 `protobuf:"varint,11,opt,name=htlc_maximum_msat,json=htlcMaximumMsat,proto3" json:"htlc_maximum_msat,omitempty"` + //* + //The set of data that was appended to this message, some of which we may + //not actually know how to iterate or parse. By holding onto this data, we + //ensure that we're able to properly validate the set of signatures that + //cover these new fields, and ensure we're able to make upgrades to the + //network in a forwards compatible manner. + ExtraOpaqueData []byte `protobuf:"bytes,12,opt,name=extra_opaque_data,json=extraOpaqueData,proto3" json:"extra_opaque_data,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *RestoreBackupResponse) Reset() { *m = RestoreBackupResponse{} } -func (m *RestoreBackupResponse) String() string { return proto.CompactTextString(m) } -func (*RestoreBackupResponse) ProtoMessage() {} -func (*RestoreBackupResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{128} +func (m *ChannelUpdate) Reset() { *m = ChannelUpdate{} } +func (m *ChannelUpdate) String() string { return proto.CompactTextString(m) } +func (*ChannelUpdate) ProtoMessage() {} +func (*ChannelUpdate) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{155} } -func (m *RestoreBackupResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RestoreBackupResponse.Unmarshal(m, b) +func (m *ChannelUpdate) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ChannelUpdate.Unmarshal(m, b) } -func (m *RestoreBackupResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RestoreBackupResponse.Marshal(b, m, deterministic) +func (m *ChannelUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ChannelUpdate.Marshal(b, m, deterministic) } -func (m *RestoreBackupResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_RestoreBackupResponse.Merge(m, src) +func (m *ChannelUpdate) XXX_Merge(src proto.Message) { + xxx_messageInfo_ChannelUpdate.Merge(m, src) } -func (m *RestoreBackupResponse) XXX_Size() int { - return xxx_messageInfo_RestoreBackupResponse.Size(m) +func (m *ChannelUpdate) XXX_Size() int { + return xxx_messageInfo_ChannelUpdate.Size(m) } -func (m *RestoreBackupResponse) XXX_DiscardUnknown() { - xxx_messageInfo_RestoreBackupResponse.DiscardUnknown(m) +func (m *ChannelUpdate) XXX_DiscardUnknown() { + xxx_messageInfo_ChannelUpdate.DiscardUnknown(m) } -var xxx_messageInfo_RestoreBackupResponse proto.InternalMessageInfo +var xxx_messageInfo_ChannelUpdate proto.InternalMessageInfo -type ChannelBackupSubscription struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (m *ChannelUpdate) GetSignature() []byte { + if m != nil { + return m.Signature + } + return nil } -func (m *ChannelBackupSubscription) Reset() { *m = ChannelBackupSubscription{} } -func (m *ChannelBackupSubscription) String() string { return proto.CompactTextString(m) } -func (*ChannelBackupSubscription) ProtoMessage() {} -func (*ChannelBackupSubscription) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{129} +func (m *ChannelUpdate) GetChainHash() []byte { + if m != nil { + return m.ChainHash + } + return nil } -func (m *ChannelBackupSubscription) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ChannelBackupSubscription.Unmarshal(m, b) -} -func (m *ChannelBackupSubscription) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ChannelBackupSubscription.Marshal(b, m, deterministic) -} -func (m *ChannelBackupSubscription) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChannelBackupSubscription.Merge(m, src) -} -func (m *ChannelBackupSubscription) XXX_Size() int { - return xxx_messageInfo_ChannelBackupSubscription.Size(m) -} -func (m *ChannelBackupSubscription) XXX_DiscardUnknown() { - xxx_messageInfo_ChannelBackupSubscription.DiscardUnknown(m) +func (m *ChannelUpdate) GetChanId() uint64 { + if m != nil { + return m.ChanId + } + return 0 } -var xxx_messageInfo_ChannelBackupSubscription proto.InternalMessageInfo +func (m *ChannelUpdate) GetTimestamp() uint32 { + if m != nil { + return m.Timestamp + } + return 0 +} -type VerifyChanBackupResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (m *ChannelUpdate) GetMessageFlags() uint32 { + if m != nil { + return m.MessageFlags + } + return 0 } -func (m *VerifyChanBackupResponse) Reset() { *m = VerifyChanBackupResponse{} } -func (m *VerifyChanBackupResponse) String() string { return proto.CompactTextString(m) } -func (*VerifyChanBackupResponse) ProtoMessage() {} -func (*VerifyChanBackupResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{130} +func (m *ChannelUpdate) GetChannelFlags() uint32 { + if m != nil { + return m.ChannelFlags + } + return 0 } -func (m *VerifyChanBackupResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VerifyChanBackupResponse.Unmarshal(m, b) +func (m *ChannelUpdate) GetTimeLockDelta() uint32 { + if m != nil { + return m.TimeLockDelta + } + return 0 } -func (m *VerifyChanBackupResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VerifyChanBackupResponse.Marshal(b, m, deterministic) + +func (m *ChannelUpdate) GetHtlcMinimumMsat() uint64 { + if m != nil { + return m.HtlcMinimumMsat + } + return 0 } -func (m *VerifyChanBackupResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_VerifyChanBackupResponse.Merge(m, src) + +func (m *ChannelUpdate) GetBaseFee() uint32 { + if m != nil { + return m.BaseFee + } + return 0 } -func (m *VerifyChanBackupResponse) XXX_Size() int { - return xxx_messageInfo_VerifyChanBackupResponse.Size(m) + +func (m *ChannelUpdate) GetFeeRate() uint32 { + if m != nil { + return m.FeeRate + } + return 0 } -func (m *VerifyChanBackupResponse) XXX_DiscardUnknown() { - xxx_messageInfo_VerifyChanBackupResponse.DiscardUnknown(m) + +func (m *ChannelUpdate) GetHtlcMaximumMsat() uint64 { + if m != nil { + return m.HtlcMaximumMsat + } + return 0 } -var xxx_messageInfo_VerifyChanBackupResponse proto.InternalMessageInfo +func (m *ChannelUpdate) GetExtraOpaqueData() []byte { + if m != nil { + return m.ExtraOpaqueData + } + return nil +} func init() { proto.RegisterEnum("lnrpc.AddressType", AddressType_name, AddressType_value) + proto.RegisterEnum("lnrpc.CommitmentType", CommitmentType_name, CommitmentType_value) + proto.RegisterEnum("lnrpc.Initiator", Initiator_name, Initiator_value) + proto.RegisterEnum("lnrpc.NodeMetricType", NodeMetricType_name, NodeMetricType_value) proto.RegisterEnum("lnrpc.InvoiceHTLCState", InvoiceHTLCState_name, InvoiceHTLCState_value) + proto.RegisterEnum("lnrpc.PaymentFailureReason", PaymentFailureReason_name, PaymentFailureReason_value) + proto.RegisterEnum("lnrpc.FeatureBit", FeatureBit_name, FeatureBit_value) proto.RegisterEnum("lnrpc.ChannelCloseSummary_ClosureType", ChannelCloseSummary_ClosureType_name, ChannelCloseSummary_ClosureType_value) proto.RegisterEnum("lnrpc.Peer_SyncType", Peer_SyncType_name, Peer_SyncType_value) + proto.RegisterEnum("lnrpc.PeerEvent_EventType", PeerEvent_EventType_name, PeerEvent_EventType_value) + proto.RegisterEnum("lnrpc.PendingChannelsResponse_ForceClosedChannel_AnchorState", PendingChannelsResponse_ForceClosedChannel_AnchorState_name, PendingChannelsResponse_ForceClosedChannel_AnchorState_value) proto.RegisterEnum("lnrpc.ChannelEventUpdate_UpdateType", ChannelEventUpdate_UpdateType_name, ChannelEventUpdate_UpdateType_value) proto.RegisterEnum("lnrpc.Invoice_InvoiceState", Invoice_InvoiceState_name, Invoice_InvoiceState_value) proto.RegisterEnum("lnrpc.Payment_PaymentStatus", Payment_PaymentStatus_name, Payment_PaymentStatus_value) + proto.RegisterEnum("lnrpc.HTLCAttempt_HTLCStatus", HTLCAttempt_HTLCStatus_name, HTLCAttempt_HTLCStatus_value) + proto.RegisterEnum("lnrpc.Failure_FailureCode", Failure_FailureCode_name, Failure_FailureCode_value) proto.RegisterType((*GenSeedRequest)(nil), "lnrpc.GenSeedRequest") proto.RegisterType((*GenSeedResponse)(nil), "lnrpc.GenSeedResponse") proto.RegisterType((*InitWalletRequest)(nil), "lnrpc.InitWalletRequest") @@ -9027,7 +11842,7 @@ func init() { proto.RegisterType((*TransactionDetails)(nil), "lnrpc.TransactionDetails") proto.RegisterType((*FeeLimit)(nil), "lnrpc.FeeLimit") proto.RegisterType((*SendRequest)(nil), "lnrpc.SendRequest") - proto.RegisterMapType((map[uint64][]byte)(nil), "lnrpc.SendRequest.DestTlvEntry") + proto.RegisterMapType((map[uint64][]byte)(nil), "lnrpc.SendRequest.DestCustomRecordsEntry") proto.RegisterType((*SendResponse)(nil), "lnrpc.SendResponse") proto.RegisterType((*SendToRouteRequest)(nil), "lnrpc.SendToRouteRequest") proto.RegisterType((*ChannelAcceptRequest)(nil), "lnrpc.ChannelAcceptRequest") @@ -9063,10 +11878,15 @@ func init() { proto.RegisterType((*ClosedChannelsRequest)(nil), "lnrpc.ClosedChannelsRequest") proto.RegisterType((*ClosedChannelsResponse)(nil), "lnrpc.ClosedChannelsResponse") proto.RegisterType((*Peer)(nil), "lnrpc.Peer") + proto.RegisterMapType((map[uint32]*Feature)(nil), "lnrpc.Peer.FeaturesEntry") + proto.RegisterType((*TimestampedError)(nil), "lnrpc.TimestampedError") proto.RegisterType((*ListPeersRequest)(nil), "lnrpc.ListPeersRequest") proto.RegisterType((*ListPeersResponse)(nil), "lnrpc.ListPeersResponse") + proto.RegisterType((*PeerEventSubscription)(nil), "lnrpc.PeerEventSubscription") + proto.RegisterType((*PeerEvent)(nil), "lnrpc.PeerEvent") proto.RegisterType((*GetInfoRequest)(nil), "lnrpc.GetInfoRequest") proto.RegisterType((*GetInfoResponse)(nil), "lnrpc.GetInfoResponse") + proto.RegisterMapType((map[uint32]*Feature)(nil), "lnrpc.GetInfoResponse.FeaturesEntry") proto.RegisterType((*Chain)(nil), "lnrpc.Chain") proto.RegisterType((*ConfirmationUpdate)(nil), "lnrpc.ConfirmationUpdate") proto.RegisterType((*ChannelOpenUpdate)(nil), "lnrpc.ChannelOpenUpdate") @@ -9074,14 +11894,26 @@ func init() { proto.RegisterType((*CloseChannelRequest)(nil), "lnrpc.CloseChannelRequest") proto.RegisterType((*CloseStatusUpdate)(nil), "lnrpc.CloseStatusUpdate") proto.RegisterType((*PendingUpdate)(nil), "lnrpc.PendingUpdate") + proto.RegisterType((*ReadyForPsbtFunding)(nil), "lnrpc.ReadyForPsbtFunding") proto.RegisterType((*OpenChannelRequest)(nil), "lnrpc.OpenChannelRequest") proto.RegisterType((*OpenStatusUpdate)(nil), "lnrpc.OpenStatusUpdate") + proto.RegisterType((*KeyLocator)(nil), "lnrpc.KeyLocator") + proto.RegisterType((*KeyDescriptor)(nil), "lnrpc.KeyDescriptor") + proto.RegisterType((*ChanPointShim)(nil), "lnrpc.ChanPointShim") + proto.RegisterType((*PsbtShim)(nil), "lnrpc.PsbtShim") + proto.RegisterType((*FundingShim)(nil), "lnrpc.FundingShim") + proto.RegisterType((*FundingShimCancel)(nil), "lnrpc.FundingShimCancel") + proto.RegisterType((*FundingPsbtVerify)(nil), "lnrpc.FundingPsbtVerify") + proto.RegisterType((*FundingPsbtFinalize)(nil), "lnrpc.FundingPsbtFinalize") + proto.RegisterType((*FundingTransitionMsg)(nil), "lnrpc.FundingTransitionMsg") + proto.RegisterType((*FundingStateStepResp)(nil), "lnrpc.FundingStateStepResp") proto.RegisterType((*PendingHTLC)(nil), "lnrpc.PendingHTLC") proto.RegisterType((*PendingChannelsRequest)(nil), "lnrpc.PendingChannelsRequest") proto.RegisterType((*PendingChannelsResponse)(nil), "lnrpc.PendingChannelsResponse") proto.RegisterType((*PendingChannelsResponse_PendingChannel)(nil), "lnrpc.PendingChannelsResponse.PendingChannel") proto.RegisterType((*PendingChannelsResponse_PendingOpenChannel)(nil), "lnrpc.PendingChannelsResponse.PendingOpenChannel") proto.RegisterType((*PendingChannelsResponse_WaitingCloseChannel)(nil), "lnrpc.PendingChannelsResponse.WaitingCloseChannel") + proto.RegisterType((*PendingChannelsResponse_Commitments)(nil), "lnrpc.PendingChannelsResponse.Commitments") proto.RegisterType((*PendingChannelsResponse_ClosedChannel)(nil), "lnrpc.PendingChannelsResponse.ClosedChannel") proto.RegisterType((*PendingChannelsResponse_ForceClosedChannel)(nil), "lnrpc.PendingChannelsResponse.ForceClosedChannel") proto.RegisterType((*ChannelEventSubscription)(nil), "lnrpc.ChannelEventSubscription") @@ -9091,19 +11923,27 @@ func init() { proto.RegisterType((*ChannelBalanceRequest)(nil), "lnrpc.ChannelBalanceRequest") proto.RegisterType((*ChannelBalanceResponse)(nil), "lnrpc.ChannelBalanceResponse") proto.RegisterType((*QueryRoutesRequest)(nil), "lnrpc.QueryRoutesRequest") + proto.RegisterMapType((map[uint64][]byte)(nil), "lnrpc.QueryRoutesRequest.DestCustomRecordsEntry") proto.RegisterType((*NodePair)(nil), "lnrpc.NodePair") proto.RegisterType((*EdgeLocator)(nil), "lnrpc.EdgeLocator") proto.RegisterType((*QueryRoutesResponse)(nil), "lnrpc.QueryRoutesResponse") proto.RegisterType((*Hop)(nil), "lnrpc.Hop") + proto.RegisterMapType((map[uint64][]byte)(nil), "lnrpc.Hop.CustomRecordsEntry") + proto.RegisterType((*MPPRecord)(nil), "lnrpc.MPPRecord") proto.RegisterType((*Route)(nil), "lnrpc.Route") proto.RegisterType((*NodeInfoRequest)(nil), "lnrpc.NodeInfoRequest") proto.RegisterType((*NodeInfo)(nil), "lnrpc.NodeInfo") proto.RegisterType((*LightningNode)(nil), "lnrpc.LightningNode") + proto.RegisterMapType((map[uint32]*Feature)(nil), "lnrpc.LightningNode.FeaturesEntry") proto.RegisterType((*NodeAddress)(nil), "lnrpc.NodeAddress") proto.RegisterType((*RoutingPolicy)(nil), "lnrpc.RoutingPolicy") proto.RegisterType((*ChannelEdge)(nil), "lnrpc.ChannelEdge") proto.RegisterType((*ChannelGraphRequest)(nil), "lnrpc.ChannelGraphRequest") proto.RegisterType((*ChannelGraph)(nil), "lnrpc.ChannelGraph") + proto.RegisterType((*NodeMetricsRequest)(nil), "lnrpc.NodeMetricsRequest") + proto.RegisterType((*NodeMetricsResponse)(nil), "lnrpc.NodeMetricsResponse") + proto.RegisterMapType((map[string]*FloatMetric)(nil), "lnrpc.NodeMetricsResponse.BetweennessCentralityEntry") + proto.RegisterType((*FloatMetric)(nil), "lnrpc.FloatMetric") proto.RegisterType((*ChanInfoRequest)(nil), "lnrpc.ChanInfoRequest") proto.RegisterType((*NetworkInfoRequest)(nil), "lnrpc.NetworkInfoRequest") proto.RegisterType((*NetworkInfo)(nil), "lnrpc.NetworkInfo") @@ -9117,13 +11957,16 @@ func init() { proto.RegisterType((*HopHint)(nil), "lnrpc.HopHint") proto.RegisterType((*RouteHint)(nil), "lnrpc.RouteHint") proto.RegisterType((*Invoice)(nil), "lnrpc.Invoice") + proto.RegisterMapType((map[uint32]*Feature)(nil), "lnrpc.Invoice.FeaturesEntry") proto.RegisterType((*InvoiceHTLC)(nil), "lnrpc.InvoiceHTLC") + proto.RegisterMapType((map[uint64][]byte)(nil), "lnrpc.InvoiceHTLC.CustomRecordsEntry") proto.RegisterType((*AddInvoiceResponse)(nil), "lnrpc.AddInvoiceResponse") proto.RegisterType((*PaymentHash)(nil), "lnrpc.PaymentHash") proto.RegisterType((*ListInvoiceRequest)(nil), "lnrpc.ListInvoiceRequest") proto.RegisterType((*ListInvoiceResponse)(nil), "lnrpc.ListInvoiceResponse") proto.RegisterType((*InvoiceSubscription)(nil), "lnrpc.InvoiceSubscription") proto.RegisterType((*Payment)(nil), "lnrpc.Payment") + proto.RegisterType((*HTLCAttempt)(nil), "lnrpc.HTLCAttempt") proto.RegisterType((*ListPaymentsRequest)(nil), "lnrpc.ListPaymentsRequest") proto.RegisterType((*ListPaymentsResponse)(nil), "lnrpc.ListPaymentsResponse") proto.RegisterType((*DeleteAllPaymentsRequest)(nil), "lnrpc.DeleteAllPaymentsRequest") @@ -9134,6 +11977,8 @@ func init() { proto.RegisterType((*DebugLevelResponse)(nil), "lnrpc.DebugLevelResponse") proto.RegisterType((*PayReqString)(nil), "lnrpc.PayReqString") proto.RegisterType((*PayReq)(nil), "lnrpc.PayReq") + proto.RegisterMapType((map[uint32]*Feature)(nil), "lnrpc.PayReq.FeaturesEntry") + proto.RegisterType((*Feature)(nil), "lnrpc.Feature") proto.RegisterType((*FeeReportRequest)(nil), "lnrpc.FeeReportRequest") proto.RegisterType((*ChannelFeeReport)(nil), "lnrpc.ChannelFeeReport") proto.RegisterType((*FeeReportResponse)(nil), "lnrpc.FeeReportResponse") @@ -9152,542 +11997,771 @@ func init() { proto.RegisterType((*RestoreBackupResponse)(nil), "lnrpc.RestoreBackupResponse") proto.RegisterType((*ChannelBackupSubscription)(nil), "lnrpc.ChannelBackupSubscription") proto.RegisterType((*VerifyChanBackupResponse)(nil), "lnrpc.VerifyChanBackupResponse") + proto.RegisterType((*MacaroonPermission)(nil), "lnrpc.MacaroonPermission") + proto.RegisterType((*BakeMacaroonRequest)(nil), "lnrpc.BakeMacaroonRequest") + proto.RegisterType((*BakeMacaroonResponse)(nil), "lnrpc.BakeMacaroonResponse") + proto.RegisterType((*Failure)(nil), "lnrpc.Failure") + proto.RegisterType((*ChannelUpdate)(nil), "lnrpc.ChannelUpdate") } func init() { proto.RegisterFile("rpc.proto", fileDescriptor_77a6da22d6a3feb1) } var fileDescriptor_77a6da22d6a3feb1 = []byte{ - // 8476 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x7d, 0x5d, 0x6c, 0x1c, 0x5b, - 0xd2, 0x50, 0x7a, 0x7e, 0xec, 0x99, 0x9a, 0xb1, 0x3d, 0x3e, 0x76, 0xec, 0xc9, 0xdc, 0xdc, 0xdc, - 0xdc, 0xde, 0x7c, 0x49, 0x36, 0x7b, 0xd7, 0xc9, 0xcd, 0xee, 0x5e, 0xf2, 0xdd, 0xf0, 0xf1, 0xe1, - 0xd8, 0x4e, 0x9c, 0x5d, 0x5f, 0xc7, 0xdb, 0x4e, 0x36, 0xec, 0xee, 0x87, 0x66, 0xdb, 0x33, 0xc7, - 0x76, 0x6f, 0x66, 0xba, 0x67, 0xbb, 0x7b, 0xec, 0x78, 0x2f, 0x17, 0x09, 0x84, 0x10, 0x42, 0x42, - 0x68, 0xe1, 0x05, 0x10, 0x08, 0x69, 0xf7, 0x7b, 0x60, 0xe1, 0x01, 0x78, 0x00, 0x81, 0xf4, 0x49, - 0xdf, 0x23, 0x4f, 0x08, 0xa1, 0xef, 0x8d, 0x07, 0x3e, 0x21, 0x90, 0x60, 0xe1, 0x0d, 0x89, 0x77, - 0x54, 0x75, 0x7e, 0xfa, 0x9c, 0xee, 0x9e, 0x24, 0x77, 0x77, 0xe1, 0xc9, 0x73, 0xaa, 0xaa, 0xcf, - 0x6f, 0x55, 0x9d, 0xaa, 0x3a, 0x75, 0x8e, 0xa1, 0x19, 0x4f, 0x06, 0x1b, 0x93, 0x38, 0x4a, 0x23, - 0x56, 0x1f, 0x85, 0xf1, 0x64, 0xd0, 0xbb, 0x7a, 0x12, 0x45, 0x27, 0x23, 0x7e, 0xd7, 0x9f, 0x04, - 0x77, 0xfd, 0x30, 0x8c, 0x52, 0x3f, 0x0d, 0xa2, 0x30, 0x11, 0x44, 0xee, 0x8f, 0x60, 0xf1, 0x09, - 0x0f, 0x0f, 0x39, 0x1f, 0x7a, 0xfc, 0x27, 0x53, 0x9e, 0xa4, 0xec, 0x6b, 0xb0, 0xec, 0xf3, 0x9f, - 0x72, 0x3e, 0xec, 0x4f, 0xfc, 0x24, 0x99, 0x9c, 0xc6, 0x7e, 0xc2, 0xbb, 0xce, 0x75, 0xe7, 0x76, - 0xdb, 0xeb, 0x08, 0xc4, 0x81, 0x86, 0xb3, 0x0f, 0xa1, 0x9d, 0x20, 0x29, 0x0f, 0xd3, 0x38, 0x9a, - 0x5c, 0x74, 0x2b, 0x44, 0xd7, 0x42, 0xd8, 0x8e, 0x00, 0xb9, 0x23, 0x58, 0xd2, 0x2d, 0x24, 0x93, - 0x28, 0x4c, 0x38, 0xbb, 0x07, 0xab, 0x83, 0x60, 0x72, 0xca, 0xe3, 0x3e, 0x7d, 0x3c, 0x0e, 0xf9, - 0x38, 0x0a, 0x83, 0x41, 0xd7, 0xb9, 0x5e, 0xbd, 0xdd, 0xf4, 0x98, 0xc0, 0xe1, 0x17, 0x9f, 0x49, - 0x0c, 0xbb, 0x05, 0x4b, 0x3c, 0x14, 0x70, 0x3e, 0xa4, 0xaf, 0x64, 0x53, 0x8b, 0x19, 0x18, 0x3f, - 0x70, 0xff, 0x46, 0x05, 0x96, 0x9f, 0x86, 0x41, 0xfa, 0xd2, 0x1f, 0x8d, 0x78, 0xaa, 0xc6, 0x74, - 0x0b, 0x96, 0xce, 0x09, 0x40, 0x63, 0x3a, 0x8f, 0xe2, 0xa1, 0x1c, 0xd1, 0xa2, 0x00, 0x1f, 0x48, - 0xe8, 0xcc, 0x9e, 0x55, 0x66, 0xf6, 0xac, 0x74, 0xba, 0xaa, 0x33, 0xa6, 0xeb, 0x16, 0x2c, 0xc5, - 0x7c, 0x10, 0x9d, 0xf1, 0xf8, 0xa2, 0x7f, 0x1e, 0x84, 0xc3, 0xe8, 0xbc, 0x5b, 0xbb, 0xee, 0xdc, - 0xae, 0x7b, 0x8b, 0x0a, 0xfc, 0x92, 0xa0, 0xec, 0x11, 0x2c, 0x0d, 0x4e, 0xfd, 0x30, 0xe4, 0xa3, - 0xfe, 0x91, 0x3f, 0x78, 0x35, 0x9d, 0x24, 0xdd, 0xfa, 0x75, 0xe7, 0x76, 0xeb, 0xfe, 0x95, 0x0d, - 0x5a, 0xd5, 0x8d, 0xad, 0x53, 0x3f, 0x7c, 0x44, 0x98, 0xc3, 0xd0, 0x9f, 0x24, 0xa7, 0x51, 0xea, - 0x2d, 0xca, 0x2f, 0x04, 0x38, 0x71, 0x57, 0x81, 0x99, 0x33, 0x21, 0xe6, 0xde, 0xfd, 0x67, 0x0e, - 0xac, 0xbc, 0x08, 0x47, 0xd1, 0xe0, 0xd5, 0xaf, 0x39, 0x45, 0x25, 0x63, 0xa8, 0xbc, 0xeb, 0x18, - 0xaa, 0x5f, 0x76, 0x0c, 0x6b, 0xb0, 0x6a, 0x77, 0x56, 0x8e, 0x82, 0xc3, 0x65, 0xfc, 0xfa, 0x84, - 0xab, 0x6e, 0xa9, 0x61, 0x7c, 0x15, 0x3a, 0x83, 0x69, 0x1c, 0xf3, 0xb0, 0x30, 0x8e, 0x25, 0x09, - 0xd7, 0x03, 0xf9, 0x10, 0xda, 0x21, 0x3f, 0xcf, 0xc8, 0x24, 0xef, 0x86, 0xfc, 0x5c, 0x91, 0xb8, - 0x5d, 0x58, 0xcb, 0x37, 0x23, 0x3b, 0xf0, 0x5f, 0x1c, 0xa8, 0xbd, 0x48, 0x5f, 0x47, 0x6c, 0x03, - 0x6a, 0xe9, 0xc5, 0x44, 0x48, 0xc8, 0xe2, 0x7d, 0x26, 0x87, 0xb6, 0x39, 0x1c, 0xc6, 0x3c, 0x49, - 0x9e, 0x5f, 0x4c, 0xb8, 0xd7, 0xf6, 0x45, 0xa1, 0x8f, 0x74, 0xac, 0x0b, 0xf3, 0xb2, 0x4c, 0x0d, - 0x36, 0x3d, 0x55, 0x64, 0xd7, 0x00, 0xfc, 0x71, 0x34, 0x0d, 0xd3, 0x7e, 0xe2, 0xa7, 0x34, 0x55, - 0x55, 0xcf, 0x80, 0xb0, 0xab, 0xd0, 0x9c, 0xbc, 0xea, 0x27, 0x83, 0x38, 0x98, 0xa4, 0xc4, 0x36, - 0x4d, 0x2f, 0x03, 0xb0, 0xaf, 0x41, 0x23, 0x9a, 0xa6, 0x93, 0x28, 0x08, 0x53, 0xc9, 0x2a, 0x4b, - 0xb2, 0x2f, 0xcf, 0xa6, 0xe9, 0x01, 0x82, 0x3d, 0x4d, 0xc0, 0x6e, 0xc0, 0xc2, 0x20, 0x0a, 0x8f, - 0x83, 0x78, 0x2c, 0x94, 0x41, 0x77, 0x8e, 0x5a, 0xb3, 0x81, 0xee, 0xbf, 0xad, 0x40, 0xeb, 0x79, - 0xec, 0x87, 0x89, 0x3f, 0x40, 0x00, 0x76, 0x3d, 0x7d, 0xdd, 0x3f, 0xf5, 0x93, 0x53, 0x1a, 0x6d, - 0xd3, 0x53, 0x45, 0xb6, 0x06, 0x73, 0xa2, 0xa3, 0x34, 0xa6, 0xaa, 0x27, 0x4b, 0xec, 0x23, 0x58, - 0x0e, 0xa7, 0xe3, 0xbe, 0xdd, 0x56, 0x95, 0xb8, 0xa5, 0x88, 0xc0, 0x09, 0x38, 0xc2, 0xb5, 0x16, - 0x4d, 0x88, 0x11, 0x1a, 0x10, 0xe6, 0x42, 0x5b, 0x96, 0x78, 0x70, 0x72, 0x2a, 0x86, 0x59, 0xf7, - 0x2c, 0x18, 0xd6, 0x91, 0x06, 0x63, 0xde, 0x4f, 0x52, 0x7f, 0x3c, 0x91, 0xc3, 0x32, 0x20, 0x84, - 0x8f, 0x52, 0x7f, 0xd4, 0x3f, 0xe6, 0x3c, 0xe9, 0xce, 0x4b, 0xbc, 0x86, 0xb0, 0x9b, 0xb0, 0x38, - 0xe4, 0x49, 0xda, 0x97, 0x8b, 0xc2, 0x93, 0x6e, 0x83, 0x44, 0x3f, 0x07, 0xc5, 0x7a, 0x62, 0xff, - 0xbc, 0x8f, 0x13, 0xc0, 0x5f, 0x77, 0x9b, 0xa2, 0xaf, 0x19, 0x04, 0x39, 0xe7, 0x09, 0x4f, 0x8d, - 0xd9, 0x4b, 0x24, 0x87, 0xba, 0x7b, 0xc0, 0x0c, 0xf0, 0x36, 0x4f, 0xfd, 0x60, 0x94, 0xb0, 0x4f, - 0xa0, 0x9d, 0x1a, 0xc4, 0xa4, 0x0a, 0x5b, 0x9a, 0x9d, 0x8c, 0x0f, 0x3c, 0x8b, 0xce, 0x7d, 0x02, - 0x8d, 0xc7, 0x9c, 0xef, 0x05, 0xe3, 0x20, 0x65, 0x6b, 0x50, 0x3f, 0x0e, 0x5e, 0x73, 0xc1, 0xf0, - 0xd5, 0xdd, 0x4b, 0x9e, 0x28, 0xb2, 0x1e, 0xcc, 0x4f, 0x78, 0x3c, 0xe0, 0x6a, 0x79, 0x76, 0x2f, - 0x79, 0x0a, 0xf0, 0x68, 0x1e, 0xea, 0x23, 0xfc, 0xd8, 0xfd, 0x55, 0x15, 0x5a, 0x87, 0x3c, 0xd4, - 0x82, 0xc4, 0xa0, 0x86, 0x43, 0x96, 0xc2, 0x43, 0xbf, 0xd9, 0x07, 0xd0, 0xa2, 0x69, 0x48, 0xd2, - 0x38, 0x08, 0x4f, 0x24, 0xff, 0x02, 0x82, 0x0e, 0x09, 0xc2, 0x3a, 0x50, 0xf5, 0xc7, 0x8a, 0x77, - 0xf1, 0x27, 0x0a, 0xd9, 0xc4, 0xbf, 0x18, 0xa3, 0x3c, 0xea, 0x55, 0x6d, 0x7b, 0x2d, 0x09, 0xdb, - 0xc5, 0x65, 0xdd, 0x80, 0x15, 0x93, 0x44, 0xd5, 0x5e, 0xa7, 0xda, 0x97, 0x0d, 0x4a, 0xd9, 0xc8, - 0x2d, 0x58, 0x52, 0xf4, 0xb1, 0xe8, 0x2c, 0xad, 0x73, 0xd3, 0x5b, 0x94, 0x60, 0x35, 0x84, 0xdb, - 0xd0, 0x39, 0x0e, 0x42, 0x7f, 0xd4, 0x1f, 0x8c, 0xd2, 0xb3, 0xfe, 0x90, 0x8f, 0x52, 0x9f, 0x56, - 0xbc, 0xee, 0x2d, 0x12, 0x7c, 0x6b, 0x94, 0x9e, 0x6d, 0x23, 0x94, 0x7d, 0x04, 0xcd, 0x63, 0xce, - 0xfb, 0x34, 0x13, 0xdd, 0x86, 0x25, 0x3d, 0x6a, 0x76, 0xbd, 0xc6, 0xb1, 0x9a, 0xe7, 0xdb, 0xd0, - 0x89, 0xa6, 0xe9, 0x49, 0x14, 0x84, 0x27, 0x7d, 0xd4, 0x57, 0xfd, 0x60, 0x48, 0x1c, 0x50, 0xf3, - 0x16, 0x15, 0x1c, 0xb5, 0xc6, 0xd3, 0x21, 0x7b, 0x1f, 0x80, 0xda, 0x16, 0x15, 0xc3, 0x75, 0xe7, - 0xf6, 0x82, 0xd7, 0x44, 0x88, 0xa8, 0xe8, 0x53, 0x68, 0xd0, 0x7c, 0xa6, 0xa3, 0xb3, 0x6e, 0x8b, - 0x16, 0xfc, 0x03, 0xd9, 0xaa, 0xb1, 0x12, 0x1b, 0xdb, 0x3c, 0x49, 0x9f, 0x8f, 0xce, 0x70, 0x3f, - 0xbd, 0xf0, 0xe6, 0x87, 0xa2, 0xd4, 0xfb, 0x14, 0xda, 0x26, 0x02, 0xa7, 0xfe, 0x15, 0xbf, 0xa0, - 0xe5, 0xaa, 0x79, 0xf8, 0x93, 0xad, 0x42, 0xfd, 0xcc, 0x1f, 0x4d, 0xb9, 0x54, 0x6c, 0xa2, 0xf0, - 0x69, 0xe5, 0x81, 0xe3, 0xfe, 0x1b, 0x07, 0xda, 0xa2, 0x05, 0xb9, 0x21, 0xdf, 0x80, 0x05, 0x35, - 0xa5, 0x3c, 0x8e, 0xa3, 0x58, 0xca, 0xb7, 0x0d, 0x64, 0x77, 0xa0, 0xa3, 0x00, 0x93, 0x98, 0x07, - 0x63, 0xff, 0x44, 0xd5, 0x5d, 0x80, 0xb3, 0xfb, 0x59, 0x8d, 0x71, 0x34, 0x4d, 0xb9, 0x54, 0xfd, - 0x6d, 0x39, 0x3e, 0x0f, 0x61, 0x9e, 0x4d, 0x82, 0xf2, 0x5d, 0xc2, 0x2b, 0x16, 0xcc, 0xfd, 0x99, - 0x03, 0x0c, 0xbb, 0xfe, 0x3c, 0x12, 0x55, 0xc8, 0xa5, 0xce, 0xb3, 0x99, 0xf3, 0xce, 0x6c, 0x56, - 0x99, 0xc5, 0x66, 0x2e, 0xd4, 0x45, 0xcf, 0x6b, 0x25, 0x3d, 0x17, 0xa8, 0x6f, 0xd7, 0x1a, 0xd5, - 0x4e, 0xcd, 0xfd, 0x4f, 0x55, 0x58, 0xdd, 0x12, 0xfb, 0xd6, 0xe6, 0x60, 0xc0, 0x27, 0x9a, 0x01, - 0x3f, 0x80, 0x56, 0x18, 0x0d, 0x79, 0x7f, 0x32, 0x3d, 0x52, 0x6b, 0xd3, 0xf6, 0x00, 0x41, 0x07, - 0x04, 0x21, 0xfe, 0x38, 0xf5, 0x83, 0x50, 0x74, 0x5a, 0xcc, 0x65, 0x93, 0x20, 0xd4, 0xe5, 0x9b, - 0xb0, 0x34, 0xe1, 0xe1, 0xd0, 0xe4, 0x33, 0x61, 0x59, 0x2c, 0x48, 0xb0, 0x64, 0xb3, 0x0f, 0xa0, - 0x75, 0x3c, 0x15, 0x74, 0x28, 0x7e, 0x35, 0xe2, 0x01, 0x90, 0xa0, 0xcd, 0x71, 0xca, 0xae, 0x40, - 0x63, 0x32, 0x4d, 0x4e, 0x09, 0x5b, 0x27, 0xec, 0x3c, 0x96, 0x11, 0xf5, 0x3e, 0xc0, 0x70, 0x9a, - 0xa4, 0x92, 0x45, 0xe7, 0x08, 0xd9, 0x44, 0x88, 0x60, 0xd1, 0xaf, 0xc3, 0xca, 0xd8, 0x7f, 0xdd, - 0x27, 0xde, 0xe9, 0x07, 0x61, 0xff, 0x78, 0x44, 0xaa, 0x77, 0x9e, 0xe8, 0x3a, 0x63, 0xff, 0xf5, - 0xf7, 0x10, 0xf3, 0x34, 0x7c, 0x4c, 0x70, 0x94, 0x4d, 0xb5, 0xe7, 0xc7, 0x3c, 0xe1, 0xf1, 0x19, - 0x27, 0x71, 0xaa, 0xe9, 0x8d, 0xdd, 0x13, 0x50, 0xec, 0xd1, 0x18, 0xc7, 0x9d, 0x8e, 0x06, 0x52, - 0x76, 0xe6, 0xc7, 0x41, 0xb8, 0x9b, 0x8e, 0x06, 0xec, 0x2a, 0x00, 0x0a, 0xe3, 0x84, 0xc7, 0xfd, - 0x57, 0xe7, 0x24, 0x34, 0x35, 0x12, 0xbe, 0x03, 0x1e, 0x7f, 0xe7, 0x9c, 0xbd, 0x07, 0xcd, 0x41, - 0x42, 0xd2, 0xec, 0x5f, 0x74, 0x5b, 0x24, 0x51, 0x8d, 0x41, 0x82, 0x72, 0xec, 0x5f, 0xb0, 0x8f, - 0x80, 0x61, 0x6f, 0x7d, 0x5a, 0x05, 0x3e, 0xa4, 0xea, 0x93, 0x6e, 0x9b, 0xa8, 0xb0, 0xb3, 0x9b, - 0x12, 0x81, 0xed, 0x24, 0xec, 0x2b, 0xb0, 0xa0, 0x3a, 0x7b, 0x3c, 0xf2, 0x4f, 0x92, 0xee, 0x02, - 0x11, 0xb6, 0x25, 0xf0, 0x31, 0xc2, 0xdc, 0x97, 0xc2, 0xd2, 0x30, 0xd6, 0x56, 0xca, 0x0c, 0xee, - 0x79, 0x04, 0xa1, 0x75, 0x6d, 0x78, 0xb2, 0x54, 0xb6, 0x68, 0x95, 0x92, 0x45, 0x73, 0x7f, 0xee, - 0x40, 0x5b, 0xd6, 0x4c, 0xdb, 0x33, 0xbb, 0x07, 0x4c, 0xad, 0x62, 0xfa, 0x3a, 0x18, 0xf6, 0x8f, - 0x2e, 0x52, 0x9e, 0x08, 0xa6, 0xd9, 0xbd, 0xe4, 0x95, 0xe0, 0xd8, 0x47, 0xd0, 0xb1, 0xa0, 0x49, - 0x1a, 0x0b, 0x7e, 0xde, 0xbd, 0xe4, 0x15, 0x30, 0x28, 0x5e, 0x68, 0x00, 0x4c, 0xd3, 0x7e, 0x10, - 0x0e, 0xf9, 0x6b, 0x62, 0xa5, 0x05, 0xcf, 0x82, 0x3d, 0x5a, 0x84, 0xb6, 0xf9, 0x9d, 0xfb, 0x63, - 0x68, 0x28, 0xf3, 0x81, 0xb6, 0xce, 0x5c, 0xbf, 0x3c, 0x03, 0xc2, 0x7a, 0xd0, 0xb0, 0x7b, 0xe1, - 0x35, 0xbe, 0x4c, 0xdb, 0xee, 0x9f, 0x83, 0xce, 0x1e, 0x32, 0x51, 0x88, 0x4c, 0x2b, 0x6d, 0xa2, - 0x35, 0x98, 0x33, 0x84, 0xa7, 0xe9, 0xc9, 0x12, 0xee, 0x4e, 0xa7, 0x51, 0x92, 0xca, 0x76, 0xe8, - 0xb7, 0xfb, 0xef, 0x1c, 0x60, 0x3b, 0x49, 0x1a, 0x8c, 0xfd, 0x94, 0x3f, 0xe6, 0x5a, 0x35, 0x3c, - 0x83, 0x36, 0xd6, 0xf6, 0x3c, 0xda, 0x14, 0x16, 0x8a, 0xd8, 0x59, 0xbf, 0x26, 0xc5, 0xb9, 0xf8, - 0xc1, 0x86, 0x49, 0x2d, 0x94, 0xae, 0x55, 0x01, 0x4a, 0x5b, 0xea, 0xc7, 0x27, 0x3c, 0x25, 0xf3, - 0x45, 0x1a, 0xbf, 0x20, 0x40, 0x5b, 0x51, 0x78, 0xdc, 0xfb, 0x7d, 0x58, 0x2e, 0xd4, 0x61, 0xea, - 0xe7, 0x66, 0x89, 0x7e, 0xae, 0x9a, 0xfa, 0x79, 0x00, 0x2b, 0x56, 0xbf, 0x24, 0xc7, 0x75, 0x61, - 0x1e, 0x05, 0x03, 0xad, 0x43, 0xda, 0xe1, 0x3d, 0x55, 0x64, 0xf7, 0x61, 0xf5, 0x98, 0xf3, 0xd8, - 0x4f, 0xa9, 0x48, 0xa2, 0x83, 0x6b, 0x22, 0x6b, 0x2e, 0xc5, 0xb9, 0xff, 0xd5, 0x81, 0x25, 0xd4, - 0xa4, 0x9f, 0xf9, 0xe1, 0x85, 0x9a, 0xab, 0xbd, 0xd2, 0xb9, 0xba, 0x6d, 0x6c, 0x4a, 0x06, 0xf5, - 0x97, 0x9d, 0xa8, 0x6a, 0x7e, 0xa2, 0xd8, 0x75, 0x68, 0x5b, 0xdd, 0xad, 0x0b, 0x73, 0x2c, 0xf1, - 0xd3, 0x03, 0x1e, 0x3f, 0xba, 0x48, 0xf9, 0x6f, 0x3e, 0x95, 0x37, 0xa1, 0x93, 0x75, 0x5b, 0xce, - 0x23, 0x83, 0x1a, 0x32, 0xa6, 0xac, 0x80, 0x7e, 0xbb, 0xff, 0xd0, 0x11, 0x84, 0x5b, 0x51, 0xa0, - 0x4d, 0x35, 0x24, 0x44, 0x8b, 0x4f, 0x11, 0xe2, 0xef, 0x99, 0xa6, 0xee, 0x6f, 0x3e, 0x58, 0xd4, - 0x89, 0x09, 0x0f, 0x87, 0x7d, 0x7f, 0x34, 0x22, 0x45, 0xdc, 0xf0, 0xe6, 0xb1, 0xbc, 0x39, 0x1a, - 0xb9, 0xb7, 0x60, 0xd9, 0xe8, 0xdd, 0x1b, 0xc6, 0xb1, 0x0f, 0x6c, 0x2f, 0x48, 0xd2, 0x17, 0x61, - 0x32, 0x31, 0x2c, 0xa1, 0xf7, 0xa0, 0x89, 0xda, 0x16, 0x7b, 0x26, 0x24, 0xb7, 0xee, 0xa1, 0xfa, - 0xc5, 0x7e, 0x25, 0x84, 0xf4, 0x5f, 0x4b, 0x64, 0x45, 0x22, 0xfd, 0xd7, 0x84, 0x74, 0x1f, 0xc0, - 0x8a, 0x55, 0x9f, 0x6c, 0xfa, 0x43, 0xa8, 0x4f, 0xd3, 0xd7, 0x91, 0xb2, 0x53, 0x5b, 0x92, 0x43, - 0xd0, 0x23, 0xf2, 0x04, 0xc6, 0x7d, 0x08, 0xcb, 0xfb, 0xfc, 0x5c, 0x0a, 0xb2, 0xea, 0xc8, 0xcd, - 0xb7, 0x7a, 0x4b, 0x84, 0x77, 0x37, 0x80, 0x99, 0x1f, 0x67, 0x02, 0xa0, 0x7c, 0x27, 0xc7, 0xf2, - 0x9d, 0xdc, 0x9b, 0xc0, 0x0e, 0x83, 0x93, 0xf0, 0x33, 0x9e, 0x24, 0xfe, 0x89, 0x16, 0xfd, 0x0e, - 0x54, 0xc7, 0xc9, 0x89, 0x54, 0x55, 0xf8, 0xd3, 0xfd, 0x06, 0xac, 0x58, 0x74, 0xb2, 0xe2, 0xab, - 0xd0, 0x4c, 0x82, 0x93, 0xd0, 0x4f, 0xa7, 0x31, 0x97, 0x55, 0x67, 0x00, 0xf7, 0x31, 0xac, 0x7e, - 0x8f, 0xc7, 0xc1, 0xf1, 0xc5, 0xdb, 0xaa, 0xb7, 0xeb, 0xa9, 0xe4, 0xeb, 0xd9, 0x81, 0xcb, 0xb9, - 0x7a, 0x64, 0xf3, 0x82, 0x7d, 0xe5, 0x4a, 0x36, 0x3c, 0x51, 0x30, 0x74, 0x5f, 0xc5, 0xd4, 0x7d, - 0xee, 0x0b, 0x60, 0x5b, 0x51, 0x18, 0xf2, 0x41, 0x7a, 0xc0, 0x79, 0x9c, 0x85, 0x6d, 0x32, 0x5e, - 0x6d, 0xdd, 0x5f, 0x97, 0x33, 0x9b, 0x57, 0xa8, 0x92, 0x89, 0x19, 0xd4, 0x26, 0x3c, 0x1e, 0x53, - 0xc5, 0x0d, 0x8f, 0x7e, 0xbb, 0x97, 0x61, 0xc5, 0xaa, 0x56, 0x3a, 0xba, 0x1f, 0xc3, 0xe5, 0xed, - 0x20, 0x19, 0x14, 0x1b, 0xec, 0xc2, 0xfc, 0x64, 0x7a, 0xd4, 0xcf, 0x24, 0x51, 0x15, 0xd1, 0xf7, - 0xc9, 0x7f, 0x22, 0x2b, 0xfb, 0xeb, 0x0e, 0xd4, 0x76, 0x9f, 0xef, 0x6d, 0xe1, 0x5e, 0x11, 0x84, - 0x83, 0x68, 0x8c, 0x16, 0x98, 0x18, 0xb4, 0x2e, 0xcf, 0x94, 0xb0, 0xab, 0xd0, 0x24, 0xc3, 0x0d, - 0xdd, 0x3d, 0x69, 0x07, 0x65, 0x00, 0x74, 0x35, 0xf9, 0xeb, 0x49, 0x10, 0x93, 0x2f, 0xa9, 0x3c, - 0xc4, 0x1a, 0x6d, 0x33, 0x45, 0x84, 0xfb, 0x3f, 0xe7, 0x60, 0x5e, 0x6e, 0xbe, 0x62, 0x23, 0x4f, - 0x83, 0x33, 0x9e, 0x6d, 0xe4, 0x58, 0x42, 0xa3, 0x38, 0xe6, 0xe3, 0x28, 0xd5, 0xf6, 0x9b, 0x58, - 0x06, 0x1b, 0x48, 0xae, 0xb4, 0x34, 0x22, 0x84, 0xf3, 0x5d, 0x15, 0x54, 0x16, 0x10, 0x27, 0x4b, - 0x19, 0x03, 0xc2, 0x3a, 0x53, 0x45, 0x9c, 0x89, 0x81, 0x3f, 0xf1, 0x07, 0x41, 0x7a, 0x21, 0x55, - 0x82, 0x2e, 0x63, 0xdd, 0xa3, 0x68, 0xe0, 0x8f, 0xfa, 0x47, 0xfe, 0xc8, 0x0f, 0x07, 0x5c, 0xb9, - 0xe9, 0x16, 0x10, 0x5d, 0x56, 0xd9, 0x25, 0x45, 0x26, 0xdc, 0xda, 0x1c, 0x14, 0xf7, 0xef, 0x41, - 0x34, 0x1e, 0x07, 0x29, 0x7a, 0xba, 0x64, 0x96, 0x55, 0x3d, 0x03, 0x22, 0x82, 0x02, 0x54, 0x3a, - 0x17, 0xb3, 0xd7, 0x54, 0x41, 0x01, 0x03, 0x88, 0xb5, 0xe4, 0xac, 0xb3, 0xaa, 0x67, 0x40, 0x70, - 0x1d, 0xa6, 0x61, 0xc2, 0xd3, 0x74, 0xc4, 0x87, 0xba, 0x43, 0x2d, 0x22, 0x2b, 0x22, 0xd8, 0x3d, - 0x58, 0x11, 0xce, 0x77, 0xe2, 0xa7, 0x51, 0x72, 0x1a, 0x24, 0xfd, 0x04, 0xdd, 0xd4, 0x36, 0xd1, - 0x97, 0xa1, 0xd8, 0x03, 0x58, 0xcf, 0x81, 0x63, 0x3e, 0xe0, 0xc1, 0x19, 0x1f, 0x92, 0xf9, 0x56, - 0xf5, 0x66, 0xa1, 0xd9, 0x75, 0x68, 0x85, 0xd3, 0x71, 0x7f, 0x3a, 0x19, 0xfa, 0x68, 0xc0, 0x2c, - 0xd2, 0x3a, 0x98, 0x20, 0xf6, 0x31, 0x28, 0x1b, 0x4d, 0x5a, 0x8e, 0x4b, 0x96, 0x76, 0x43, 0xce, - 0xf5, 0x6c, 0x0a, 0x64, 0xca, 0xcc, 0x1c, 0xed, 0x48, 0x07, 0x4f, 0x01, 0x48, 0x46, 0xe2, 0xe0, - 0xcc, 0x4f, 0x79, 0x77, 0x59, 0x28, 0x74, 0x59, 0xc4, 0xef, 0x82, 0x30, 0x48, 0x03, 0x3f, 0x8d, - 0xe2, 0x2e, 0x23, 0x5c, 0x06, 0xc0, 0x49, 0x24, 0xfe, 0x48, 0x52, 0x3f, 0x9d, 0x26, 0xd2, 0x3a, - 0x5d, 0x11, 0x9e, 0x4a, 0x01, 0xc1, 0x3e, 0x81, 0x35, 0xc1, 0x11, 0x84, 0x92, 0x76, 0x37, 0x99, - 0x09, 0xab, 0x34, 0x23, 0x33, 0xb0, 0x38, 0x95, 0x92, 0x45, 0x0a, 0x1f, 0x5e, 0x16, 0x53, 0x39, - 0x03, 0x8d, 0xfd, 0xc3, 0x1e, 0x04, 0x83, 0xbe, 0xa4, 0x40, 0xf1, 0x58, 0xa3, 0x51, 0x14, 0x11, - 0xee, 0x3f, 0x76, 0xc4, 0x26, 0x22, 0x05, 0x2e, 0x31, 0xdc, 0x23, 0x21, 0x6a, 0xfd, 0x28, 0x1c, - 0x5d, 0x48, 0xe9, 0x03, 0x01, 0x7a, 0x16, 0x8e, 0x2e, 0xd0, 0x40, 0x0f, 0x42, 0x93, 0x44, 0xe8, - 0xab, 0xb6, 0x02, 0x12, 0xd1, 0x07, 0xd0, 0x9a, 0x4c, 0x8f, 0x46, 0xc1, 0x40, 0x90, 0x54, 0x45, - 0x2d, 0x02, 0x44, 0x04, 0xe8, 0x1b, 0x8a, 0x59, 0x17, 0x14, 0x35, 0xa2, 0x68, 0x49, 0x18, 0x92, - 0xb8, 0x8f, 0x60, 0xd5, 0xee, 0xa0, 0x54, 0xcc, 0x77, 0xa0, 0x21, 0xe5, 0x38, 0x91, 0x0e, 0xfa, - 0xa2, 0x11, 0xbb, 0x44, 0x77, 0x46, 0xe3, 0xdd, 0x7f, 0x5d, 0x83, 0x15, 0x09, 0xdd, 0x1a, 0x45, - 0x09, 0x3f, 0x9c, 0x8e, 0xc7, 0x7e, 0x5c, 0xa2, 0x20, 0x9c, 0xb7, 0x28, 0x88, 0x8a, 0xad, 0x20, - 0xae, 0x59, 0x3e, 0xa2, 0xd0, 0x2e, 0x06, 0x84, 0xdd, 0x86, 0xa5, 0xc1, 0x28, 0x4a, 0x84, 0xc9, - 0x6e, 0x86, 0xce, 0xf2, 0xe0, 0xa2, 0x42, 0xab, 0x97, 0x29, 0x34, 0x53, 0x21, 0xcd, 0xe5, 0x14, - 0x92, 0x0b, 0x6d, 0xac, 0x94, 0x2b, 0xfd, 0x3a, 0x2f, 0x1d, 0x26, 0x03, 0x86, 0xfd, 0xc9, 0x8b, - 0xbf, 0xd0, 0x35, 0x4b, 0x65, 0xc2, 0x1f, 0x8c, 0x39, 0xe9, 0x6f, 0x83, 0xba, 0x29, 0x85, 0xbf, - 0x88, 0x62, 0x8f, 0x01, 0x44, 0x5b, 0x64, 0x44, 0x00, 0x19, 0x11, 0x37, 0xed, 0x15, 0x31, 0xe7, - 0x7e, 0x03, 0x0b, 0xd3, 0x98, 0x93, 0x61, 0x61, 0x7c, 0xe9, 0xfe, 0x4d, 0x07, 0x5a, 0x06, 0x8e, - 0x5d, 0x86, 0xe5, 0xad, 0x67, 0xcf, 0x0e, 0x76, 0xbc, 0xcd, 0xe7, 0x4f, 0xbf, 0xb7, 0xd3, 0xdf, - 0xda, 0x7b, 0x76, 0xb8, 0xd3, 0xb9, 0x84, 0xe0, 0xbd, 0x67, 0x5b, 0x9b, 0x7b, 0xfd, 0xc7, 0xcf, - 0xbc, 0x2d, 0x05, 0x76, 0xd8, 0x1a, 0x30, 0x6f, 0xe7, 0xb3, 0x67, 0xcf, 0x77, 0x2c, 0x78, 0x85, - 0x75, 0xa0, 0xfd, 0xc8, 0xdb, 0xd9, 0xdc, 0xda, 0x95, 0x90, 0x2a, 0x5b, 0x85, 0xce, 0xe3, 0x17, - 0xfb, 0xdb, 0x4f, 0xf7, 0x9f, 0xf4, 0xb7, 0x36, 0xf7, 0xb7, 0x76, 0xf6, 0x76, 0xb6, 0x3b, 0x35, - 0xb6, 0x00, 0xcd, 0xcd, 0x47, 0x9b, 0xfb, 0xdb, 0xcf, 0xf6, 0x77, 0xb6, 0x3b, 0x75, 0xf7, 0x3f, - 0x3b, 0x70, 0x99, 0x7a, 0x3d, 0xcc, 0x0b, 0xc8, 0x75, 0x68, 0x0d, 0xa2, 0x68, 0x82, 0xc6, 0x7b, - 0xb6, 0x3d, 0x99, 0x20, 0x64, 0x7e, 0x21, 0xdc, 0xc7, 0x51, 0x3c, 0xe0, 0x52, 0x3e, 0x80, 0x40, - 0x8f, 0x11, 0x82, 0xcc, 0x2f, 0x97, 0x57, 0x50, 0x08, 0xf1, 0x68, 0x09, 0x98, 0x20, 0x59, 0x83, - 0xb9, 0xa3, 0x98, 0xfb, 0x83, 0x53, 0x29, 0x19, 0xb2, 0xc4, 0xbe, 0x9a, 0x79, 0x97, 0x03, 0x9c, - 0xfd, 0x11, 0x1f, 0x12, 0xc7, 0x34, 0xbc, 0x25, 0x09, 0xdf, 0x92, 0x60, 0xd4, 0x66, 0xfe, 0x91, - 0x1f, 0x0e, 0xa3, 0x90, 0x0f, 0xa5, 0xe9, 0x9a, 0x01, 0xdc, 0x03, 0x58, 0xcb, 0x8f, 0x4f, 0xca, - 0xd7, 0x27, 0x86, 0x7c, 0x09, 0x4b, 0xb2, 0x37, 0x7b, 0x35, 0x0d, 0x59, 0xfb, 0xd3, 0x0a, 0xd4, - 0xd0, 0xb0, 0x98, 0x6d, 0x84, 0x98, 0xb6, 0x62, 0xb5, 0x10, 0x67, 0x27, 0x87, 0x55, 0x6c, 0x35, - 0x32, 0x58, 0x92, 0x41, 0x32, 0x7c, 0xcc, 0x07, 0x67, 0x32, 0x5c, 0x62, 0x40, 0x50, 0x40, 0xd0, - 0x90, 0xa7, 0xaf, 0xa5, 0x80, 0xa8, 0xb2, 0xc2, 0xd1, 0x97, 0xf3, 0x19, 0x8e, 0xbe, 0xeb, 0xc2, - 0x7c, 0x10, 0x1e, 0x45, 0xd3, 0x70, 0x48, 0x02, 0xd1, 0xf0, 0x54, 0x91, 0x22, 0xfb, 0x24, 0xa8, - 0xc1, 0x58, 0xb1, 0x7f, 0x06, 0x60, 0xf7, 0xa1, 0x99, 0x5c, 0x84, 0x03, 0x93, 0xe7, 0x57, 0xe5, - 0x2c, 0xe1, 0x1c, 0x6c, 0x1c, 0x5e, 0x84, 0x03, 0xe2, 0xf0, 0x8c, 0xcc, 0xfd, 0x7d, 0x68, 0x28, - 0x30, 0xb2, 0xe5, 0x8b, 0xfd, 0xef, 0xec, 0x3f, 0x7b, 0xb9, 0xdf, 0x3f, 0xfc, 0xfe, 0xfe, 0x56, - 0xe7, 0x12, 0x5b, 0x82, 0xd6, 0xe6, 0x16, 0x71, 0x3a, 0x01, 0x1c, 0x24, 0x39, 0xd8, 0x3c, 0x3c, - 0xd4, 0x90, 0x8a, 0xcb, 0xd0, 0x19, 0x4f, 0xc8, 0x7a, 0xd3, 0x91, 0xeb, 0x4f, 0x60, 0xd9, 0x80, - 0x65, 0x9e, 0xc0, 0x04, 0x01, 0x39, 0x4f, 0x80, 0xcc, 0x3e, 0x81, 0x71, 0x3b, 0xb0, 0xf8, 0x84, - 0xa7, 0x4f, 0xc3, 0xe3, 0x48, 0xd5, 0xf4, 0xdf, 0x6b, 0xb0, 0xa4, 0x41, 0xb2, 0xa2, 0xdb, 0xb0, - 0x14, 0x0c, 0x79, 0x98, 0x06, 0xe9, 0x45, 0xdf, 0xf2, 0xf9, 0xf3, 0x60, 0x34, 0x97, 0xfd, 0x51, - 0xe0, 0xab, 0x03, 0x14, 0x51, 0x40, 0x1f, 0x18, 0xf7, 0x72, 0x33, 0xf6, 0x42, 0x7c, 0x25, 0x42, - 0x0d, 0xa5, 0x38, 0xd4, 0x40, 0x08, 0x97, 0x5b, 0x8c, 0xfe, 0x44, 0x98, 0x8d, 0x65, 0x28, 0x5c, - 0x2a, 0x51, 0x13, 0x0e, 0xb9, 0x2e, 0xf6, 0x7b, 0x0d, 0x28, 0x9c, 0x50, 0xcc, 0x09, 0xfd, 0x98, - 0x3f, 0xa1, 0x30, 0x4e, 0x39, 0x1a, 0x85, 0x53, 0x0e, 0xd4, 0x9f, 0x17, 0xe1, 0x80, 0x0f, 0xfb, - 0x69, 0xd4, 0x27, 0x3d, 0x4f, 0x2c, 0xd1, 0xf0, 0xf2, 0x60, 0x76, 0x15, 0xe6, 0x53, 0x9e, 0xa4, - 0x21, 0x17, 0xa1, 0xe5, 0xc6, 0xa3, 0x4a, 0xd7, 0xf1, 0x14, 0x08, 0x6d, 0xfc, 0x69, 0x1c, 0x24, - 0xdd, 0x36, 0x9d, 0x5f, 0xd0, 0x6f, 0xf6, 0x4d, 0xb8, 0x7c, 0xc4, 0x93, 0xb4, 0x7f, 0xca, 0xfd, - 0x21, 0x8f, 0x89, 0xbd, 0xc4, 0x41, 0x89, 0x30, 0x9d, 0xca, 0x91, 0xc8, 0xb8, 0x67, 0x3c, 0x4e, - 0x82, 0x28, 0x24, 0xa3, 0xa9, 0xe9, 0xa9, 0x22, 0xd6, 0x87, 0x83, 0xd7, 0x9b, 0xb4, 0x9e, 0xc1, - 0x25, 0x1a, 0x78, 0x39, 0x92, 0xdd, 0x80, 0x39, 0x1a, 0x40, 0xd2, 0xed, 0x10, 0xcf, 0xb4, 0x33, - 0x99, 0x0f, 0x42, 0x4f, 0xe2, 0x70, 0x95, 0x07, 0xd1, 0x28, 0x8a, 0xc9, 0x72, 0x6a, 0x7a, 0xa2, - 0x60, 0xcf, 0xce, 0x49, 0xec, 0x4f, 0x4e, 0xa5, 0xf5, 0x94, 0x07, 0x7f, 0xbb, 0xd6, 0x68, 0x75, - 0xda, 0xee, 0x9f, 0x81, 0x3a, 0x55, 0x4b, 0xd5, 0xd1, 0x64, 0x3a, 0xb2, 0x3a, 0x82, 0x76, 0x61, - 0x3e, 0xe4, 0xe9, 0x79, 0x14, 0xbf, 0x52, 0xa7, 0x71, 0xb2, 0xe8, 0xfe, 0x94, 0xbc, 0x2c, 0x7d, - 0x3a, 0xf5, 0x82, 0x4c, 0x44, 0xf4, 0x95, 0xc5, 0x52, 0x25, 0xa7, 0xbe, 0x74, 0xfc, 0x1a, 0x04, - 0x38, 0x3c, 0xf5, 0x51, 0xd7, 0x5a, 0xab, 0x2f, 0x7c, 0xe9, 0x16, 0xc1, 0x76, 0xc5, 0xe2, 0xdf, - 0x80, 0x45, 0x75, 0xee, 0x95, 0xf4, 0x47, 0xfc, 0x38, 0x55, 0x91, 0xb0, 0x70, 0x3a, 0x26, 0x87, - 0x7b, 0x8f, 0x1f, 0xa7, 0xee, 0x3e, 0x2c, 0x4b, 0xfd, 0xf7, 0x6c, 0xc2, 0x55, 0xd3, 0xbf, 0x5b, - 0x66, 0x47, 0xb4, 0xee, 0xaf, 0xd8, 0x0a, 0x53, 0x9c, 0xf4, 0xd9, 0x94, 0xae, 0x07, 0xcc, 0xd4, - 0xa7, 0xb2, 0x42, 0xb9, 0x99, 0xab, 0x58, 0x9f, 0x1c, 0x8e, 0x05, 0xc3, 0xf9, 0x49, 0xa6, 0x83, - 0x81, 0x3a, 0xad, 0x6c, 0x78, 0xaa, 0xe8, 0xfe, 0x13, 0x07, 0x56, 0xa8, 0x36, 0x65, 0x09, 0xc9, - 0x3d, 0xeb, 0xc1, 0x97, 0xe8, 0xa6, 0x8a, 0xb4, 0x8a, 0xf8, 0xe2, 0x2a, 0xd4, 0xcd, 0x5d, 0x4c, - 0x14, 0xbe, 0x7c, 0x5c, 0xa5, 0x96, 0x8f, 0xab, 0xb8, 0x7f, 0xcf, 0x81, 0x65, 0xb1, 0x91, 0x90, - 0xd5, 0x2c, 0x87, 0xff, 0x67, 0x61, 0x41, 0x58, 0x04, 0x52, 0x2b, 0xc8, 0x8e, 0x66, 0xaa, 0x95, - 0xa0, 0x82, 0x78, 0xf7, 0x92, 0x67, 0x13, 0xb3, 0x87, 0x64, 0x95, 0x85, 0x7d, 0x82, 0x96, 0x9c, - 0x6b, 0xdb, 0x73, 0xbd, 0x7b, 0xc9, 0x33, 0xc8, 0x1f, 0x35, 0x60, 0x4e, 0xb8, 0x1c, 0xee, 0x13, - 0x58, 0xb0, 0x1a, 0xb2, 0x62, 0x3a, 0x6d, 0x11, 0xd3, 0x29, 0x04, 0x4f, 0x2b, 0x25, 0xc1, 0xd3, - 0x7f, 0x59, 0x05, 0x86, 0xcc, 0x92, 0x5b, 0x8d, 0xeb, 0xf6, 0x09, 0x84, 0x3a, 0xe2, 0xce, 0x40, - 0x6c, 0x03, 0x98, 0x51, 0x54, 0xa7, 0x22, 0x62, 0xcb, 0x2c, 0xc1, 0xa0, 0x9a, 0x95, 0x16, 0x87, - 0x3e, 0x71, 0x20, 0x5f, 0x5d, 0x4c, 0x7b, 0x29, 0x0e, 0x77, 0x45, 0x3a, 0x7e, 0x40, 0xcf, 0x42, - 0xfa, 0xb8, 0xaa, 0x9c, 0x5f, 0xdf, 0xb9, 0xb7, 0xae, 0xef, 0x7c, 0x21, 0x6e, 0x66, 0x78, 0x59, - 0x0d, 0xdb, 0xcb, 0xba, 0x01, 0x0b, 0xea, 0x94, 0xa1, 0x3f, 0xc6, 0xd6, 0xa5, 0x4b, 0x6b, 0x01, - 0xd9, 0x1d, 0xe8, 0x28, 0x47, 0x47, 0xbb, 0x72, 0xe2, 0xac, 0xae, 0x00, 0x47, 0xfd, 0x9f, 0x45, - 0xd2, 0x5a, 0xd4, 0xd9, 0x0c, 0x40, 0x7e, 0x11, 0x72, 0x48, 0x7f, 0x1a, 0xca, 0xa3, 0x6d, 0x3e, - 0x24, 0x67, 0x16, 0xfd, 0xa2, 0x3c, 0xc2, 0xfd, 0x3b, 0x0e, 0x74, 0x70, 0xcd, 0x2c, 0xb6, 0xfc, - 0x14, 0x48, 0x2a, 0xde, 0x91, 0x2b, 0x2d, 0x5a, 0xf6, 0x00, 0x9a, 0x54, 0x8e, 0x26, 0x3c, 0x94, - 0x3c, 0xd9, 0xb5, 0x79, 0x32, 0xd3, 0x27, 0xbb, 0x97, 0xbc, 0x8c, 0xd8, 0xe0, 0xc8, 0xff, 0xe0, - 0x40, 0x4b, 0xb6, 0xf2, 0x6b, 0x47, 0x6a, 0x7a, 0x46, 0x2e, 0x82, 0xe0, 0xa4, 0x2c, 0xf5, 0xe0, - 0x36, 0x2c, 0x8d, 0xfd, 0x74, 0x1a, 0xe3, 0x7e, 0x6e, 0x45, 0x69, 0xf2, 0x60, 0xdc, 0x9c, 0x49, - 0x75, 0x26, 0xfd, 0x34, 0x18, 0xf5, 0x15, 0x56, 0x9e, 0xfa, 0x97, 0xa1, 0x50, 0x83, 0x24, 0xa9, - 0x7f, 0xc2, 0xe5, 0xbe, 0x2b, 0x0a, 0x6e, 0x17, 0xd6, 0x0e, 0xb2, 0x93, 0x17, 0xc3, 0xbe, 0x76, - 0xff, 0xf9, 0x02, 0xac, 0x17, 0x50, 0x3a, 0x47, 0x49, 0x86, 0x1f, 0x46, 0xc1, 0xf8, 0x28, 0xd2, - 0xce, 0x89, 0x63, 0x46, 0x26, 0x2c, 0x14, 0x3b, 0x81, 0xcb, 0xca, 0xc0, 0xc0, 0x39, 0xcd, 0x36, - 0xc3, 0x0a, 0xed, 0x72, 0x1f, 0xdb, 0x4b, 0x98, 0x6f, 0x50, 0xc1, 0x4d, 0x21, 0x2e, 0xaf, 0x8f, - 0x9d, 0x42, 0x57, 0x5b, 0x32, 0x52, 0x59, 0x1b, 0xd6, 0x0e, 0xb6, 0xf5, 0xd1, 0x5b, 0xda, 0xb2, - 0xcc, 0x71, 0x6f, 0x66, 0x6d, 0xec, 0x02, 0xae, 0x29, 0x1c, 0x69, 0xe3, 0x62, 0x7b, 0xb5, 0x77, - 0x1a, 0x1b, 0x39, 0x1a, 0x76, 0xa3, 0x6f, 0xa9, 0x98, 0xfd, 0x18, 0xd6, 0xce, 0xfd, 0x20, 0x55, - 0xdd, 0x32, 0x6c, 0x8b, 0x3a, 0x35, 0x79, 0xff, 0x2d, 0x4d, 0xbe, 0x14, 0x1f, 0x5b, 0x5b, 0xd4, - 0x8c, 0x1a, 0x7b, 0x7f, 0x54, 0x81, 0x45, 0xbb, 0x1e, 0x64, 0x53, 0x29, 0xfb, 0x4a, 0x07, 0x2a, - 0x6b, 0x34, 0x07, 0x2e, 0xfa, 0xf7, 0x95, 0x32, 0xff, 0xde, 0xf4, 0xaa, 0xab, 0x6f, 0x0b, 0xf3, - 0xd5, 0xde, 0x2d, 0xcc, 0x57, 0x2f, 0x0d, 0xf3, 0xcd, 0x8e, 0x06, 0xcd, 0xfd, 0xba, 0xd1, 0xa0, - 0xf9, 0x37, 0x46, 0x83, 0x7a, 0xff, 0xc7, 0x01, 0x56, 0xe4, 0x5e, 0xf6, 0x44, 0x84, 0x34, 0x42, - 0x3e, 0x92, 0x4a, 0xec, 0xeb, 0xef, 0x26, 0x01, 0x6a, 0xb5, 0xd4, 0xd7, 0x28, 0x8a, 0x66, 0xa2, - 0x90, 0x69, 0x5e, 0x2d, 0x78, 0x65, 0xa8, 0x5c, 0xa8, 0xb3, 0xf6, 0xf6, 0x50, 0x67, 0xfd, 0xed, - 0xa1, 0xce, 0xb9, 0x7c, 0xa8, 0xb3, 0xf7, 0xd7, 0x1c, 0x58, 0x29, 0x61, 0xb3, 0xdf, 0xde, 0xc0, - 0x91, 0x31, 0x2c, 0xed, 0x53, 0x91, 0x8c, 0x61, 0x02, 0x7b, 0x7f, 0x09, 0x16, 0x2c, 0xd1, 0xfa, - 0xed, 0xb5, 0x9f, 0xb7, 0x10, 0x05, 0x67, 0x5b, 0xb0, 0xde, 0xff, 0xaa, 0x00, 0x2b, 0x8a, 0xf7, - 0xff, 0xd7, 0x3e, 0x14, 0xe7, 0xa9, 0x5a, 0x32, 0x4f, 0xff, 0x4f, 0x77, 0x9e, 0x8f, 0x60, 0x59, - 0x66, 0x3f, 0x1a, 0x81, 0x2c, 0xc1, 0x31, 0x45, 0x04, 0xda, 0xc8, 0x76, 0x9c, 0xb9, 0x61, 0x65, - 0x7b, 0x19, 0xdb, 0x6f, 0x2e, 0xdc, 0xec, 0xf6, 0xa0, 0x2b, 0x67, 0x68, 0xe7, 0x8c, 0x87, 0xe9, - 0xe1, 0xf4, 0x48, 0xa4, 0xff, 0x05, 0x51, 0xe8, 0xfe, 0xab, 0xaa, 0x36, 0xf3, 0x09, 0x29, 0x0d, - 0x8a, 0x6f, 0x42, 0xdb, 0xdc, 0x3e, 0xe4, 0x72, 0xe4, 0xe2, 0x98, 0x68, 0x4a, 0x98, 0x54, 0x6c, - 0x1b, 0x16, 0x49, 0x49, 0x0e, 0xf5, 0x77, 0x15, 0xfa, 0xee, 0x0d, 0xf1, 0x99, 0xdd, 0x4b, 0x5e, - 0xee, 0x1b, 0xf6, 0x7b, 0xb0, 0x68, 0x3b, 0x7f, 0xd2, 0x2a, 0x29, 0xf3, 0x06, 0xf0, 0x73, 0x9b, - 0x98, 0x6d, 0x42, 0x27, 0xef, 0x3d, 0xca, 0x6c, 0x9c, 0x19, 0x15, 0x14, 0xc8, 0xd9, 0x03, 0x79, - 0xe0, 0x58, 0xa7, 0xb8, 0xc9, 0x0d, 0xfb, 0x33, 0x63, 0x9a, 0x36, 0xc4, 0x1f, 0xe3, 0x08, 0xf2, - 0x0f, 0x00, 0x32, 0x18, 0xeb, 0x40, 0xfb, 0xd9, 0xc1, 0xce, 0x7e, 0x7f, 0x6b, 0x77, 0x73, 0x7f, - 0x7f, 0x67, 0xaf, 0x73, 0x89, 0x31, 0x58, 0xa4, 0x30, 0xdf, 0xb6, 0x86, 0x39, 0x08, 0x93, 0x81, - 0x15, 0x05, 0xab, 0xb0, 0x55, 0xe8, 0x3c, 0xdd, 0xcf, 0x41, 0xab, 0x8f, 0x9a, 0x5a, 0x3e, 0xdc, - 0x35, 0x58, 0x15, 0xd9, 0xad, 0x8f, 0x04, 0x7b, 0x28, 0xeb, 0xe4, 0x1f, 0x39, 0x70, 0x39, 0x87, - 0xc8, 0xd2, 0xb5, 0x84, 0x01, 0x62, 0x5b, 0x25, 0x36, 0x90, 0x0e, 0x11, 0x94, 0xad, 0x99, 0xd3, - 0x20, 0x45, 0x04, 0xf2, 0xbc, 0x61, 0x9b, 0xe6, 0x24, 0xa9, 0x0c, 0xe5, 0xae, 0xeb, 0xcc, 0x98, - 0x5c, 0xc7, 0x8f, 0x45, 0xd6, 0xac, 0x89, 0xc8, 0x0e, 0x70, 0xed, 0x2e, 0xab, 0x22, 0xba, 0x15, - 0x96, 0xb1, 0x63, 0xf7, 0xb7, 0x14, 0xe7, 0xfe, 0xd3, 0x2a, 0xb0, 0xef, 0x4e, 0x79, 0x7c, 0x41, - 0x39, 0x59, 0x3a, 0x6a, 0xba, 0x9e, 0x8f, 0x09, 0xce, 0x4d, 0xa6, 0x47, 0xdf, 0xe1, 0x17, 0x2a, - 0x3b, 0xb1, 0x92, 0x65, 0x27, 0x96, 0x65, 0x08, 0xd6, 0xde, 0x9e, 0x21, 0x58, 0x7f, 0x5b, 0x86, - 0xe0, 0x57, 0x60, 0x21, 0x38, 0x09, 0x23, 0x94, 0x79, 0xb4, 0x13, 0x92, 0xee, 0xdc, 0xf5, 0x2a, - 0xfa, 0xd6, 0x12, 0xb8, 0x8f, 0x30, 0xf6, 0x30, 0x23, 0xe2, 0xc3, 0x13, 0xca, 0x46, 0x35, 0xb5, - 0xc0, 0xce, 0xf0, 0x84, 0xef, 0x45, 0x03, 0x3f, 0x8d, 0x62, 0x0a, 0xec, 0xa8, 0x8f, 0x11, 0x9e, - 0xb0, 0x1b, 0xb0, 0x98, 0x44, 0x53, 0xb4, 0x9c, 0xd4, 0x58, 0x45, 0x24, 0xa9, 0x2d, 0xa0, 0x07, - 0x62, 0xc4, 0x1b, 0xb0, 0x32, 0x4d, 0x78, 0x7f, 0x1c, 0x24, 0x09, 0xee, 0x8e, 0x83, 0x28, 0x4c, - 0xe3, 0x68, 0x24, 0xe3, 0x49, 0xcb, 0xd3, 0x84, 0x7f, 0x26, 0x30, 0x5b, 0x02, 0xc1, 0xbe, 0x99, - 0x75, 0x69, 0xe2, 0x07, 0x71, 0xd2, 0x05, 0xea, 0x92, 0x1a, 0x29, 0xf6, 0xfb, 0xc0, 0x0f, 0x62, - 0xdd, 0x17, 0x2c, 0x24, 0xb9, 0x2c, 0xc7, 0x56, 0x2e, 0xcb, 0x51, 0x26, 0xc9, 0x6d, 0x40, 0x43, - 0x7d, 0x8e, 0x4e, 0xee, 0x71, 0x1c, 0x8d, 0x95, 0x93, 0x8b, 0xbf, 0xd9, 0x22, 0x54, 0xd2, 0x48, - 0x3a, 0xa8, 0x95, 0x34, 0x72, 0xbf, 0x0f, 0x2d, 0x63, 0x06, 0x64, 0xa6, 0x1c, 0x19, 0x54, 0xd2, - 0x3b, 0xae, 0x09, 0xff, 0x25, 0xe4, 0xa3, 0xa7, 0x43, 0xf6, 0x35, 0x58, 0x1e, 0x06, 0x31, 0xa7, - 0xa4, 0xd8, 0x7e, 0xcc, 0xcf, 0x78, 0x9c, 0xa8, 0x38, 0x42, 0x47, 0x23, 0x3c, 0x01, 0x77, 0xfb, - 0xb0, 0x62, 0xb1, 0x8d, 0x96, 0xaa, 0x39, 0xca, 0xea, 0x53, 0xa1, 0x4c, 0x3b, 0xe3, 0x4f, 0xe2, - 0x70, 0x3f, 0x92, 0x21, 0x90, 0xfe, 0x24, 0x8e, 0x8e, 0xa8, 0x11, 0xc7, 0xb3, 0x60, 0xee, 0x2f, - 0x2b, 0x50, 0xdd, 0x8d, 0x26, 0xe6, 0xa1, 0x8e, 0x63, 0x1f, 0xea, 0x48, 0xa3, 0xb1, 0xaf, 0x6d, - 0x42, 0xb9, 0xb3, 0x5b, 0x40, 0x76, 0x07, 0x16, 0xfd, 0x71, 0xda, 0x4f, 0x23, 0x34, 0x92, 0xcf, - 0xfd, 0x58, 0xa4, 0xff, 0x55, 0x89, 0x1d, 0x72, 0x18, 0xb6, 0x0a, 0x55, 0x6d, 0xeb, 0x10, 0x01, - 0x16, 0xd1, 0x43, 0xa3, 0xc3, 0xef, 0x0b, 0x19, 0xab, 0x94, 0x25, 0x94, 0x76, 0xfb, 0x7b, 0xe1, - 0x1e, 0x8b, 0x1d, 0xab, 0x0c, 0x85, 0x06, 0x2c, 0x0a, 0xc0, 0x38, 0xb3, 0x07, 0x75, 0xd9, 0x8c, - 0xc2, 0x37, 0xec, 0x28, 0xfc, 0x75, 0x68, 0xa5, 0xa3, 0xb3, 0xfe, 0xc4, 0xbf, 0x18, 0x45, 0xfe, - 0x50, 0x32, 0x9e, 0x09, 0x72, 0x7f, 0xe5, 0x40, 0x9d, 0x66, 0x18, 0xf7, 0x67, 0xa1, 0xc0, 0xf4, - 0xc9, 0x0f, 0xcd, 0xda, 0x82, 0x97, 0x07, 0x33, 0xd7, 0x4a, 0xe2, 0xae, 0xe8, 0x21, 0x9b, 0x89, - 0xdc, 0xd7, 0xa1, 0x29, 0x4a, 0x3a, 0x21, 0x99, 0x48, 0x32, 0x20, 0xbb, 0x06, 0xb5, 0xd3, 0x68, - 0xa2, 0x5c, 0x18, 0x50, 0x87, 0xbc, 0xd1, 0xc4, 0x23, 0x78, 0xd6, 0x1f, 0xac, 0x4f, 0x0c, 0x5c, - 0x98, 0x89, 0x79, 0x30, 0x9a, 0xe6, 0xba, 0x5a, 0x73, 0x22, 0x73, 0x50, 0xf7, 0x05, 0x2c, 0xa1, - 0x0c, 0x18, 0x91, 0xf0, 0xd9, 0xca, 0xea, 0xab, 0xb8, 0xf7, 0x0d, 0x46, 0xd3, 0x21, 0x37, 0x1d, - 0x49, 0x8a, 0x74, 0x4a, 0xb8, 0x32, 0xa1, 0xdc, 0x7f, 0xe1, 0x08, 0xd9, 0xc2, 0x7a, 0xd9, 0x6d, - 0xa8, 0xa1, 0xca, 0xc9, 0xc5, 0x0d, 0x74, 0x1e, 0x08, 0xd2, 0x79, 0x44, 0x81, 0x9c, 0x4c, 0xb1, - 0x48, 0xb3, 0x76, 0x11, 0x89, 0xcc, 0xbc, 0x30, 0x3d, 0xb2, 0x9c, 0xf3, 0x92, 0x83, 0xb2, 0x0d, - 0xe3, 0x20, 0xa7, 0x66, 0xa9, 0x31, 0xb5, 0xd5, 0x0e, 0x4f, 0xb8, 0x71, 0x80, 0xf3, 0x4b, 0x07, - 0x16, 0xac, 0x3e, 0x21, 0xa7, 0x8c, 0xfc, 0x24, 0x95, 0x67, 0xf1, 0x72, 0xe5, 0x4d, 0x90, 0xc9, - 0x65, 0x15, 0x9b, 0xcb, 0xf4, 0x81, 0x40, 0xd5, 0x3c, 0x10, 0xb8, 0x07, 0xcd, 0x2c, 0x8b, 0xdf, - 0xee, 0x14, 0xb6, 0xa8, 0x32, 0x62, 0x32, 0xa2, 0x2c, 0xe4, 0x5c, 0x37, 0x42, 0xce, 0xee, 0x43, - 0x68, 0x19, 0xf4, 0x66, 0xc8, 0xd8, 0xb1, 0x42, 0xc6, 0x3a, 0x5d, 0xac, 0x92, 0xa5, 0x8b, 0xb9, - 0x3f, 0xab, 0xc0, 0x02, 0xb2, 0x77, 0x10, 0x9e, 0x1c, 0x44, 0xa3, 0x60, 0x70, 0x41, 0x6c, 0xa5, - 0x38, 0x59, 0x6e, 0x39, 0x8a, 0xcd, 0x6d, 0x30, 0x8a, 0x9c, 0xce, 0x91, 0x15, 0xfa, 0x41, 0x97, - 0x51, 0x81, 0xa0, 0xf8, 0x1d, 0xf9, 0x89, 0x94, 0x49, 0x69, 0xf2, 0x5a, 0x40, 0x14, 0x73, 0x04, - 0x50, 0xf2, 0xdf, 0x38, 0x18, 0x8d, 0x02, 0x41, 0x2b, 0x1c, 0xa2, 0x32, 0x14, 0xb6, 0x39, 0x0c, - 0x12, 0xff, 0x28, 0x3b, 0xec, 0xd3, 0x65, 0x8a, 0xa6, 0xf9, 0xaf, 0x8d, 0x68, 0x9a, 0xc8, 0x16, - 0xb6, 0x81, 0xf9, 0x85, 0x9c, 0x2f, 0x2c, 0xa4, 0xfb, 0xc7, 0x15, 0x68, 0x19, 0x6c, 0x21, 0x4f, - 0xb8, 0x6d, 0xdd, 0x6e, 0x40, 0x14, 0xde, 0x72, 0xaf, 0x0d, 0x08, 0xbb, 0x61, 0xb7, 0x48, 0x11, - 0x75, 0x12, 0x76, 0x8b, 0x7d, 0xae, 0x42, 0x13, 0xd9, 0xfe, 0x63, 0xf2, 0xe5, 0xe5, 0xf5, 0x19, - 0x0d, 0x50, 0xd8, 0xfb, 0x84, 0xad, 0x67, 0x58, 0x02, 0xbc, 0xf1, 0x4c, 0xfc, 0x01, 0xb4, 0x65, - 0x35, 0xb4, 0xbe, 0x34, 0xe0, 0x4c, 0xf0, 0xac, 0xb5, 0xf7, 0x2c, 0x4a, 0xf5, 0xe5, 0x7d, 0xf5, - 0x65, 0xe3, 0x6d, 0x5f, 0x2a, 0x4a, 0xf7, 0x89, 0x4e, 0x35, 0x78, 0x12, 0xfb, 0x93, 0x53, 0xa5, - 0x4c, 0xee, 0xc1, 0x8a, 0xd2, 0x19, 0xd3, 0xd0, 0x0f, 0xc3, 0x68, 0x1a, 0x0e, 0xb8, 0xca, 0x2a, - 0x2b, 0x43, 0xb9, 0x43, 0x9d, 0x83, 0x4c, 0x15, 0xb1, 0x3b, 0x50, 0x17, 0x06, 0x8b, 0xd8, 0x02, - 0xcb, 0xd5, 0x87, 0x20, 0x61, 0xb7, 0xa1, 0x2e, 0xec, 0x96, 0xca, 0x4c, 0x81, 0x17, 0x04, 0xee, - 0x1d, 0x58, 0xa2, 0xa4, 0x67, 0x5b, 0xef, 0xd9, 0x5b, 0xe3, 0xdc, 0x40, 0xa4, 0x45, 0xaf, 0x02, - 0xdb, 0x17, 0xf2, 0x64, 0x1e, 0x18, 0xfe, 0xaa, 0x0a, 0x2d, 0x03, 0x8c, 0x7a, 0x89, 0x4e, 0x79, - 0xfa, 0xc3, 0xc0, 0x1f, 0xf3, 0x94, 0xc7, 0x52, 0x86, 0x72, 0x50, 0xa4, 0xf3, 0xcf, 0x4e, 0xfa, - 0xd1, 0x34, 0xed, 0x0f, 0xf9, 0x49, 0xcc, 0xb9, 0xdc, 0xaf, 0x73, 0x50, 0xa4, 0x43, 0x2e, 0x36, - 0xe8, 0xc4, 0xb9, 0x4c, 0x0e, 0xaa, 0x8e, 0xff, 0xc4, 0x1c, 0xd5, 0xb2, 0xe3, 0x3f, 0x31, 0x23, - 0x79, 0x8d, 0x5a, 0x2f, 0xd1, 0xa8, 0x9f, 0xc0, 0x9a, 0xd0, 0x9d, 0x52, 0x6b, 0xf4, 0x73, 0x8c, - 0x35, 0x03, 0xcb, 0xee, 0x40, 0x07, 0xfb, 0xac, 0xc4, 0x22, 0x09, 0x7e, 0x2a, 0x64, 0xcb, 0xf1, - 0x0a, 0x70, 0xa4, 0xa5, 0x98, 0xb4, 0x49, 0x2b, 0x72, 0x30, 0x0a, 0x70, 0xa2, 0xf5, 0x5f, 0xdb, - 0xb4, 0x4d, 0x49, 0x9b, 0x83, 0xb3, 0x07, 0xb0, 0x3e, 0xe6, 0xc3, 0xc0, 0xb7, 0xab, 0xa0, 0x10, - 0x91, 0x48, 0x04, 0x9b, 0x85, 0xc6, 0x56, 0x70, 0x16, 0x7e, 0x1a, 0x8d, 0x8f, 0x02, 0xb1, 0xa1, - 0x89, 0xe8, 0x79, 0xcd, 0x2b, 0xc0, 0xdd, 0x05, 0x68, 0x1d, 0xa6, 0xd1, 0x44, 0x2d, 0xfd, 0x22, - 0xb4, 0x45, 0x51, 0xe6, 0x10, 0xbe, 0x07, 0x57, 0x88, 0x57, 0x9f, 0x47, 0x93, 0x68, 0x14, 0x9d, - 0x5c, 0x58, 0x3e, 0xf0, 0xbf, 0x77, 0x60, 0xc5, 0xc2, 0x66, 0x4e, 0x30, 0x05, 0xec, 0x54, 0xf2, - 0x97, 0x60, 0xef, 0x65, 0x63, 0x3b, 0x10, 0x84, 0xe2, 0x6c, 0xe4, 0x85, 0xcc, 0x07, 0xdb, 0xcc, - 0x6e, 0x33, 0xa8, 0x0f, 0x05, 0xaf, 0x77, 0x8b, 0xbc, 0x2e, 0xbf, 0x57, 0xf7, 0x1c, 0x54, 0x15, - 0xbf, 0x27, 0x33, 0x66, 0x86, 0x72, 0xd0, 0x55, 0x3b, 0xcb, 0xc1, 0x8c, 0x99, 0xa8, 0x1e, 0x0c, - 0x34, 0x30, 0x71, 0x7f, 0xee, 0x00, 0x64, 0xbd, 0xa3, 0x3c, 0x0b, 0xbd, 0xa5, 0x89, 0xdb, 0xb2, - 0xc6, 0xf6, 0xf5, 0x21, 0xb4, 0xf5, 0x51, 0x79, 0xb6, 0x4b, 0xb6, 0x14, 0x0c, 0xad, 0x8a, 0x5b, - 0xb0, 0x74, 0x32, 0x8a, 0x8e, 0xc8, 0x7a, 0xa1, 0xa4, 0xd4, 0x44, 0x66, 0x52, 0x2e, 0x0a, 0xf0, - 0x63, 0x09, 0xcd, 0xb6, 0xd4, 0x9a, 0xb9, 0xa5, 0x96, 0x6f, 0x90, 0x7f, 0xab, 0xa2, 0xcf, 0x2b, - 0xb3, 0x99, 0x98, 0x29, 0xe1, 0xec, 0x7e, 0x41, 0x9d, 0xcf, 0x38, 0x1e, 0x24, 0xfb, 0xfe, 0xe0, - 0xad, 0xe1, 0xd3, 0x87, 0xb0, 0x18, 0x0b, 0x5d, 0xa9, 0x14, 0x69, 0xed, 0x0d, 0x8a, 0x74, 0x21, - 0xb6, 0x76, 0xe3, 0xaf, 0x42, 0xc7, 0x1f, 0x9e, 0xf1, 0x38, 0x0d, 0x28, 0x9c, 0x44, 0xa6, 0x93, - 0x18, 0xdc, 0x92, 0x01, 0x27, 0x0b, 0xe5, 0x16, 0x2c, 0xc9, 0x9c, 0x56, 0x4d, 0x29, 0xef, 0x9d, - 0x65, 0x60, 0x24, 0x74, 0x7f, 0xa1, 0x8e, 0x46, 0xed, 0x95, 0x9d, 0x3d, 0x23, 0xe6, 0xe8, 0x2a, - 0xb9, 0xd1, 0x7d, 0x45, 0x1e, 0x53, 0x0e, 0x55, 0xcc, 0xaa, 0x6a, 0xe4, 0x5c, 0x0d, 0xe5, 0xb1, - 0xb2, 0x3d, 0xa5, 0xb5, 0x77, 0x99, 0x52, 0xf7, 0x4f, 0x1c, 0x98, 0xdf, 0x8d, 0x26, 0xbb, 0x32, - 0xfb, 0x8c, 0xc4, 0x43, 0x27, 0x93, 0xab, 0xe2, 0x1b, 0xf2, 0xd2, 0x4a, 0x2d, 0x90, 0x85, 0xbc, - 0x05, 0xf2, 0xe7, 0xe1, 0x3d, 0x8a, 0x98, 0xc6, 0xd1, 0x24, 0x8a, 0x51, 0x44, 0xfd, 0x91, 0x30, - 0x37, 0xa2, 0x30, 0x3d, 0x55, 0x2a, 0xf4, 0x4d, 0x24, 0x14, 0xc6, 0x40, 0xef, 0x52, 0x78, 0x2e, - 0xd2, 0x62, 0x12, 0x9a, 0xb5, 0x88, 0x70, 0x7f, 0x17, 0x9a, 0xe4, 0x4d, 0xd0, 0xb0, 0x3e, 0x82, - 0xe6, 0x69, 0x34, 0xe9, 0x9f, 0x06, 0x61, 0xaa, 0x44, 0x7e, 0x31, 0x33, 0xf3, 0x77, 0x69, 0x42, - 0x34, 0x81, 0xfb, 0xc7, 0x73, 0x30, 0xff, 0x34, 0x3c, 0x8b, 0x82, 0x01, 0x1d, 0xc3, 0x8e, 0xf9, - 0x38, 0x52, 0xa9, 0xf5, 0xf8, 0x9b, 0x5d, 0x85, 0x79, 0xca, 0x25, 0x9d, 0x08, 0xa6, 0x6d, 0x8b, - 0x74, 0x0b, 0x09, 0xa2, 0x0b, 0xa1, 0xd9, 0xb5, 0x38, 0x21, 0x54, 0x06, 0x04, 0x3d, 0xb1, 0xd8, - 0xbc, 0xd6, 0x26, 0x4b, 0xd9, 0xd5, 0x85, 0xba, 0x71, 0x75, 0x01, 0xdb, 0x92, 0xd9, 0x72, 0x22, - 0x9d, 0x4a, 0xb4, 0x25, 0x41, 0xe4, 0x3d, 0xc6, 0x5c, 0x44, 0xbc, 0xb5, 0x91, 0x85, 0xde, 0xa3, - 0x09, 0x44, 0x43, 0x4c, 0x7c, 0x20, 0x68, 0xc4, 0x06, 0x60, 0x82, 0xd0, 0x14, 0xcd, 0xdf, 0xa4, - 0x14, 0x37, 0x59, 0xf3, 0x60, 0xd4, 0xdf, 0x43, 0xae, 0xd5, 0xac, 0x18, 0x07, 0x88, 0xab, 0x7f, - 0x79, 0xb8, 0xe1, 0x73, 0x8a, 0xb4, 0x5f, 0xe5, 0x73, 0x22, 0xc3, 0xf8, 0xa3, 0xd1, 0x91, 0x3f, - 0x78, 0x45, 0x17, 0x69, 0xe9, 0x60, 0xb4, 0xe9, 0xd9, 0x40, 0xca, 0x79, 0xcb, 0x56, 0x95, 0x12, - 0x53, 0x6a, 0x9e, 0x09, 0x62, 0xf7, 0xa1, 0x45, 0xbe, 0xb8, 0x5c, 0xd7, 0x45, 0x5a, 0xd7, 0x8e, - 0xe9, 0xac, 0xd3, 0xca, 0x9a, 0x44, 0xe6, 0x11, 0xf1, 0x52, 0x21, 0x11, 0xd7, 0x1f, 0x0e, 0xe5, - 0xc9, 0x7a, 0x47, 0xc4, 0x15, 0x34, 0x80, 0xbc, 0x7d, 0x31, 0x61, 0x82, 0x60, 0x99, 0x08, 0x2c, - 0x18, 0xbb, 0x06, 0x0d, 0xf4, 0xf0, 0x26, 0x7e, 0x30, 0xa4, 0x5c, 0x14, 0xe1, 0x68, 0x6a, 0x18, - 0xd6, 0xa1, 0x7e, 0xd3, 0x56, 0xb9, 0x42, 0xb3, 0x62, 0xc1, 0x70, 0x6e, 0x74, 0x79, 0x9c, 0x65, - 0xee, 0xda, 0x40, 0xf6, 0x31, 0x9d, 0x6f, 0xa6, 0x9c, 0xd2, 0x73, 0x17, 0xef, 0xbf, 0x27, 0xc7, - 0x2c, 0x99, 0x56, 0xfd, 0x3d, 0x44, 0x12, 0x4f, 0x50, 0xa2, 0x91, 0x26, 0x42, 0xcc, 0x6b, 0x96, - 0x91, 0x26, 0x49, 0x29, 0xc4, 0x2c, 0x08, 0xdc, 0x4d, 0x68, 0x9b, 0x15, 0xb0, 0x06, 0xd4, 0x9e, - 0x1d, 0xec, 0xec, 0x77, 0x2e, 0xb1, 0x16, 0xcc, 0x1f, 0xee, 0x3c, 0x7f, 0xbe, 0xb7, 0xb3, 0xdd, - 0x71, 0x58, 0x1b, 0x1a, 0x3a, 0x95, 0xb1, 0x82, 0xa5, 0xcd, 0xad, 0xad, 0x9d, 0x83, 0xe7, 0x3b, - 0xdb, 0x9d, 0xaa, 0xfb, 0x87, 0x15, 0x68, 0x19, 0x35, 0xbf, 0x21, 0xfe, 0x71, 0x0d, 0x80, 0x3c, - 0x86, 0x2c, 0xa1, 0xa1, 0xe6, 0x19, 0x10, 0xd4, 0x88, 0xda, 0x97, 0xae, 0x8a, 0x1b, 0x80, 0xaa, - 0x4c, 0x73, 0x45, 0x57, 0xed, 0xcc, 0x28, 0x7e, 0xdd, 0xb3, 0x81, 0xc8, 0x47, 0x12, 0x40, 0x59, - 0x75, 0x42, 0xba, 0x4c, 0x10, 0xae, 0x4b, 0xcc, 0x93, 0x68, 0x74, 0xc6, 0x05, 0x89, 0xb0, 0xbf, - 0x2c, 0x18, 0xb6, 0x25, 0xd5, 0x8b, 0x91, 0xf1, 0x5a, 0xf7, 0x6c, 0x20, 0xfb, 0xba, 0x5a, 0x97, - 0x06, 0xad, 0xcb, 0x7a, 0x71, 0x92, 0xcd, 0x35, 0x71, 0x53, 0x60, 0x9b, 0xc3, 0xa1, 0xc4, 0x9a, - 0xf7, 0x09, 0x63, 0xf3, 0xf2, 0xaa, 0x52, 0x10, 0x25, 0x42, 0x5a, 0x29, 0x17, 0xd2, 0x37, 0xb2, - 0xb2, 0xbb, 0x03, 0xad, 0x03, 0xe3, 0x3a, 0x2c, 0xe9, 0x2b, 0x75, 0x11, 0x56, 0xea, 0x39, 0x03, - 0x62, 0x74, 0xa7, 0x62, 0x76, 0xc7, 0xfd, 0x43, 0x47, 0xdc, 0x30, 0xd2, 0xdd, 0x17, 0x6d, 0xbb, - 0xd0, 0xd6, 0x31, 0xda, 0x2c, 0x99, 0xdb, 0x82, 0x21, 0x0d, 0x75, 0xa5, 0x1f, 0x1d, 0x1f, 0x27, - 0x5c, 0xa5, 0x5e, 0x5a, 0x30, 0x65, 0x28, 0xa2, 0xe9, 0x19, 0x88, 0x16, 0x12, 0x99, 0x82, 0x59, - 0x80, 0x23, 0x93, 0xc8, 0x50, 0x9f, 0x4a, 0x3a, 0xd5, 0x65, 0x9d, 0x73, 0x9e, 0x9f, 0xe5, 0x3b, - 0xd0, 0xd0, 0xf5, 0xda, 0x3b, 0x82, 0xa2, 0xd4, 0x78, 0xdc, 0x79, 0xc8, 0x81, 0xb4, 0x3a, 0x2d, - 0x78, 0xb5, 0x88, 0x60, 0x1b, 0xc0, 0x8e, 0x83, 0x38, 0x4f, 0x2e, 0x98, 0xb7, 0x04, 0xe3, 0xbe, - 0x84, 0x15, 0x25, 0x6f, 0x86, 0x05, 0x6b, 0x2f, 0xa2, 0xf3, 0x36, 0x7d, 0x54, 0x29, 0xea, 0x23, - 0xf7, 0x4f, 0xab, 0x30, 0x2f, 0x57, 0xba, 0x70, 0xa5, 0x5a, 0xac, 0xb3, 0x05, 0x63, 0x5d, 0xeb, - 0xf2, 0x1c, 0x29, 0x2f, 0xb9, 0x0b, 0x15, 0xf6, 0x99, 0x6a, 0xd9, 0x3e, 0xc3, 0xa0, 0x36, 0xf1, - 0xd3, 0x53, 0x0a, 0xb1, 0x34, 0x3d, 0xfa, 0xad, 0xa2, 0x91, 0x75, 0x3b, 0x1a, 0x59, 0x76, 0x81, - 0x5c, 0x98, 0x50, 0xc5, 0x0b, 0xe4, 0x57, 0xa1, 0x29, 0x2e, 0x1d, 0x67, 0x01, 0xc7, 0x0c, 0x80, - 0xdc, 0x2b, 0x0a, 0xa4, 0x21, 0xe4, 0x5d, 0x96, 0x0c, 0xf2, 0x25, 0x76, 0xb6, 0x6f, 0xc2, 0x9c, - 0xb8, 0x4c, 0x21, 0x53, 0x6b, 0xaf, 0xaa, 0x43, 0x38, 0x41, 0xa7, 0xfe, 0x8a, 0x1c, 0x1d, 0x4f, - 0xd2, 0x9a, 0x57, 0x31, 0x5b, 0xf6, 0x55, 0x4c, 0x33, 0x4e, 0xda, 0xb6, 0xe3, 0xa4, 0xee, 0x63, - 0x58, 0xb0, 0xaa, 0x43, 0xcd, 0x2a, 0x53, 0x73, 0x3b, 0x97, 0xd8, 0x02, 0x34, 0x9f, 0xee, 0xf7, - 0x1f, 0xef, 0x3d, 0x7d, 0xb2, 0xfb, 0xbc, 0xe3, 0x60, 0xf1, 0xf0, 0xc5, 0xd6, 0xd6, 0xce, 0xce, - 0x36, 0x69, 0x5a, 0x80, 0xb9, 0xc7, 0x9b, 0x4f, 0xf7, 0x48, 0xcf, 0x6e, 0x0b, 0xde, 0x96, 0x75, - 0xe9, 0x83, 0x8f, 0xaf, 0x03, 0x53, 0x3e, 0x3e, 0xa5, 0xe8, 0x4c, 0x46, 0x3c, 0x55, 0x59, 0xe3, - 0xcb, 0x12, 0xf3, 0x54, 0x23, 0xd4, 0xa5, 0x87, 0xac, 0x96, 0x4c, 0x44, 0xe4, 0x24, 0xe5, 0x45, - 0x44, 0x92, 0x7a, 0x1a, 0xef, 0xf6, 0xa0, 0xbb, 0xcd, 0xb1, 0xb6, 0xcd, 0xd1, 0x28, 0xd7, 0x1d, - 0x74, 0xd4, 0x4a, 0x70, 0xd2, 0x8b, 0xfb, 0x2e, 0x5c, 0xde, 0x14, 0x09, 0xe2, 0xbf, 0xad, 0xfc, - 0x41, 0xb7, 0x0b, 0x6b, 0xf9, 0x2a, 0x65, 0x63, 0x8f, 0x61, 0x79, 0x9b, 0x1f, 0x4d, 0x4f, 0xf6, - 0xf8, 0x59, 0xd6, 0x10, 0x83, 0x5a, 0x72, 0x1a, 0x9d, 0xcb, 0xf9, 0xa1, 0xdf, 0xec, 0x7d, 0x80, - 0x11, 0xd2, 0xf4, 0x93, 0x09, 0x1f, 0xa8, 0x0b, 0x7c, 0x04, 0x39, 0x9c, 0xf0, 0x81, 0xfb, 0x09, - 0x30, 0xb3, 0x1e, 0x39, 0x5f, 0x68, 0x67, 0x4d, 0x8f, 0xfa, 0xc9, 0x45, 0x92, 0xf2, 0xb1, 0xba, - 0x99, 0x68, 0x82, 0xdc, 0x5b, 0xd0, 0x3e, 0xf0, 0x2f, 0x3c, 0xfe, 0x13, 0xf9, 0xb4, 0xc0, 0x3a, - 0xcc, 0x4f, 0xfc, 0x0b, 0x64, 0x41, 0x1d, 0xf4, 0x25, 0xb4, 0xfb, 0xbf, 0x2b, 0x30, 0x27, 0x28, - 0xb1, 0xd6, 0x21, 0x4f, 0xd2, 0x20, 0x24, 0x49, 0x53, 0xb5, 0x1a, 0xa0, 0x82, 0x6c, 0x57, 0x4a, - 0x64, 0x5b, 0x46, 0x24, 0xd4, 0x65, 0x28, 0x29, 0xc0, 0x16, 0x0c, 0x25, 0x2d, 0x4b, 0x04, 0x16, - 0xa1, 0xc1, 0x0c, 0x90, 0x3b, 0x41, 0xc8, 0xac, 0x39, 0xd1, 0x3f, 0xa5, 0xb6, 0xa4, 0x18, 0x9b, - 0xa0, 0x52, 0x9b, 0x71, 0x5e, 0x48, 0x7b, 0xc1, 0x66, 0x2c, 0xd8, 0x86, 0x8d, 0x77, 0xb0, 0x0d, - 0x45, 0x98, 0xe2, 0x4d, 0xb6, 0x21, 0xbc, 0x83, 0x6d, 0xe8, 0x32, 0xe8, 0xd0, 0x2d, 0x6b, 0xf4, - 0x3e, 0x14, 0xef, 0xfe, 0x7d, 0x07, 0x3a, 0x92, 0x8b, 0x34, 0x8e, 0x7d, 0x68, 0x79, 0x59, 0xa5, - 0xd7, 0x78, 0x6e, 0xc0, 0x02, 0xf9, 0x3e, 0x5a, 0x05, 0xc8, 0x73, 0x1d, 0x0b, 0x88, 0xe3, 0x50, - 0x69, 0x24, 0xe3, 0x60, 0x24, 0x17, 0xc5, 0x04, 0x29, 0x2d, 0x12, 0xfb, 0x32, 0xa1, 0xd5, 0xf1, - 0x74, 0xd9, 0xfd, 0x23, 0x07, 0x96, 0x8d, 0x0e, 0x4b, 0x2e, 0x7c, 0x08, 0x6d, 0xfd, 0x98, 0x01, - 0xd7, 0x9b, 0xdb, 0xba, 0x2d, 0x36, 0xd9, 0x67, 0x16, 0x31, 0x2d, 0xa6, 0x7f, 0x41, 0x1d, 0x4c, - 0xa6, 0x63, 0xb9, 0xab, 0x98, 0x20, 0x64, 0xa4, 0x73, 0xce, 0x5f, 0x69, 0x12, 0xb1, 0xaf, 0x59, - 0x30, 0x8a, 0x0f, 0xa3, 0xcf, 0xa6, 0x89, 0x6a, 0x32, 0x3e, 0x6c, 0x02, 0xdd, 0xbf, 0x52, 0x81, - 0x15, 0xe1, 0x7c, 0xcb, 0x80, 0x87, 0xbe, 0x4f, 0x3a, 0x27, 0x62, 0x10, 0x42, 0x22, 0x77, 0x2f, - 0x79, 0xb2, 0xcc, 0xbe, 0xf5, 0x8e, 0x01, 0x03, 0x9d, 0x65, 0x3b, 0x63, 0x2d, 0xaa, 0x65, 0x6b, - 0xf1, 0x86, 0x99, 0x2e, 0x0b, 0xd5, 0xd7, 0xcb, 0x43, 0xf5, 0xef, 0x14, 0x1a, 0x7f, 0x34, 0x0f, - 0xf5, 0x64, 0x10, 0x4d, 0xb8, 0xbb, 0x06, 0xab, 0xf6, 0x14, 0x48, 0x45, 0xf5, 0x73, 0x07, 0xba, - 0x8f, 0xc5, 0xa9, 0x5b, 0x10, 0x9e, 0xec, 0x06, 0x49, 0x1a, 0xc5, 0xfa, 0x72, 0xfe, 0x35, 0x80, - 0x24, 0xf5, 0x63, 0x69, 0xd0, 0xca, 0x30, 0x79, 0x06, 0xc1, 0x91, 0xf0, 0x70, 0x28, 0xb0, 0x62, - 0x05, 0x75, 0xb9, 0x60, 0x7a, 0xc9, 0x20, 0x82, 0x65, 0xc0, 0xdc, 0x14, 0xb9, 0xe9, 0xd8, 0x65, - 0x7e, 0x46, 0xda, 0x5f, 0x78, 0xe7, 0x39, 0xa8, 0xfb, 0x1f, 0x1d, 0x58, 0xca, 0x3a, 0x49, 0x39, - 0x14, 0xb6, 0x0e, 0x91, 0x56, 0x4b, 0xa6, 0x43, 0x54, 0x00, 0x3f, 0x40, 0x33, 0x46, 0x59, 0xfb, - 0x19, 0x84, 0xe4, 0x5a, 0x96, 0xa2, 0xa9, 0xb2, 0x0b, 0x4d, 0x90, 0xc8, 0x34, 0x45, 0x03, 0x4a, - 0x1a, 0x83, 0xb2, 0x44, 0xb7, 0x7c, 0xc6, 0x29, 0x7d, 0x25, 0x66, 0x5c, 0x15, 0x59, 0x47, 0x58, - 0x20, 0xe2, 0xa1, 0x12, 0xb2, 0x3e, 0xcc, 0x9d, 0xb9, 0xa1, 0x5f, 0x15, 0x11, 0x3b, 0xf3, 0xdf, - 0x76, 0xe0, 0x4a, 0xc9, 0xc4, 0x4b, 0xd9, 0xda, 0x86, 0xe5, 0x63, 0x8d, 0x54, 0x93, 0x23, 0x04, - 0x6c, 0x4d, 0x25, 0x01, 0xd8, 0x13, 0xe2, 0x15, 0x3f, 0xd0, 0xe6, 0xa4, 0x98, 0x6e, 0x2b, 0x97, - 0xbb, 0x88, 0x70, 0x0f, 0xa0, 0xb7, 0xf3, 0x1a, 0x45, 0x75, 0xcb, 0x7c, 0x11, 0x4d, 0xf1, 0xc2, - 0xfd, 0x82, 0x2a, 0x7a, 0x7b, 0xc0, 0xe7, 0x18, 0x16, 0xac, 0xba, 0xd8, 0x37, 0xde, 0xb5, 0x12, - 0x53, 0xaa, 0xd4, 0x5a, 0x89, 0x27, 0xdd, 0x54, 0x46, 0xb9, 0x01, 0x72, 0xcf, 0x60, 0xe9, 0xb3, - 0xe9, 0x28, 0x0d, 0xb2, 0xe7, 0xdd, 0xd8, 0xb7, 0xe4, 0x47, 0x54, 0x85, 0x9a, 0xba, 0xd2, 0xa6, - 0x4c, 0x3a, 0x9c, 0xb1, 0x31, 0xd6, 0xd4, 0x2f, 0xb6, 0x58, 0x44, 0xb8, 0x57, 0x60, 0x3d, 0x6b, - 0x52, 0xcc, 0x9d, 0x52, 0xe7, 0xbf, 0x70, 0x44, 0x6a, 0x94, 0xfd, 0xda, 0x1c, 0x7b, 0x02, 0x2b, - 0x49, 0x10, 0x9e, 0x8c, 0xb8, 0x59, 0x4f, 0x22, 0x67, 0xe2, 0xb2, 0xdd, 0x3d, 0xf9, 0x22, 0x9d, - 0x57, 0xf6, 0x05, 0x32, 0x48, 0x79, 0x47, 0x33, 0x06, 0xc9, 0x4d, 0x49, 0xd9, 0x00, 0xbe, 0x0d, - 0x8b, 0x76, 0x63, 0xec, 0x81, 0x4c, 0x06, 0xcf, 0x7a, 0x66, 0x9e, 0xca, 0xd8, 0x9c, 0x61, 0x51, - 0xba, 0x3f, 0x73, 0xa0, 0xeb, 0x71, 0x64, 0x63, 0x6e, 0x34, 0x2a, 0xb9, 0xe7, 0x61, 0xa1, 0xda, - 0xd9, 0x03, 0xd6, 0x49, 0xe6, 0x6a, 0xac, 0x1b, 0x33, 0x17, 0x65, 0xf7, 0x52, 0xc9, 0xa8, 0x1e, - 0x35, 0x60, 0x4e, 0x8e, 0x6f, 0x1d, 0x2e, 0xcb, 0x2e, 0xa9, 0xee, 0x64, 0x21, 0x7d, 0xab, 0x51, - 0x2b, 0xa4, 0xdf, 0x83, 0xae, 0x78, 0x35, 0xc1, 0x1c, 0x87, 0xf8, 0xf0, 0xce, 0x17, 0xd0, 0x32, - 0xde, 0x8e, 0x60, 0xeb, 0xb0, 0xf2, 0xf2, 0xe9, 0xf3, 0xfd, 0x9d, 0xc3, 0xc3, 0xfe, 0xc1, 0x8b, - 0x47, 0xdf, 0xd9, 0xf9, 0x7e, 0x7f, 0x77, 0xf3, 0x70, 0xb7, 0x73, 0x89, 0xad, 0x01, 0xdb, 0xdf, - 0x39, 0x7c, 0xbe, 0xb3, 0x6d, 0xc1, 0x1d, 0x76, 0x0d, 0x7a, 0x2f, 0xf6, 0x5f, 0x1c, 0xee, 0x6c, - 0xf7, 0xcb, 0xbe, 0xab, 0xb0, 0xf7, 0xe1, 0x8a, 0xc4, 0x97, 0x7c, 0x5e, 0xbd, 0xf3, 0x10, 0x3a, - 0x79, 0x1f, 0xdf, 0x8a, 0x88, 0xbc, 0x29, 0x74, 0x72, 0xff, 0x67, 0x55, 0x58, 0x14, 0xe9, 0x5d, - 0xe2, 0x85, 0x43, 0x1e, 0xb3, 0xcf, 0x60, 0x5e, 0x3e, 0x95, 0xc9, 0xd4, 0x62, 0xd8, 0x8f, 0x73, - 0xf6, 0xd6, 0xf2, 0x60, 0x39, 0x83, 0x2b, 0x7f, 0xf5, 0x4f, 0xfe, 0xdb, 0xdf, 0xad, 0x2c, 0xb0, - 0xd6, 0xdd, 0xb3, 0x8f, 0xef, 0x9e, 0xf0, 0x30, 0xc1, 0x3a, 0xfe, 0x00, 0x20, 0x7b, 0x00, 0x92, - 0x75, 0xb5, 0x9f, 0x9b, 0x7b, 0x1d, 0xb3, 0x77, 0xa5, 0x04, 0x23, 0xeb, 0xbd, 0x42, 0xf5, 0xae, - 0xb8, 0x8b, 0x58, 0x6f, 0x10, 0x06, 0xa9, 0x78, 0x0c, 0xf2, 0x53, 0xe7, 0x0e, 0x1b, 0x42, 0xdb, - 0x7c, 0x9a, 0x91, 0xa9, 0x33, 0x8d, 0x92, 0xc7, 0x25, 0x7b, 0xef, 0x95, 0xe2, 0xd4, 0xea, 0x53, - 0x1b, 0x97, 0xdd, 0x0e, 0xb6, 0x31, 0x25, 0x8a, 0xac, 0x95, 0x91, 0x90, 0x89, 0xec, 0x05, 0x46, - 0x76, 0xd5, 0x60, 0xd3, 0xc2, 0xfb, 0x8f, 0xbd, 0xf7, 0x67, 0x60, 0x65, 0x5b, 0xef, 0x53, 0x5b, - 0xeb, 0x2e, 0xc3, 0xb6, 0x06, 0x44, 0xa3, 0xde, 0x7f, 0xfc, 0xd4, 0xb9, 0x73, 0xff, 0x7f, 0xdc, - 0x84, 0xa6, 0x3e, 0xeb, 0x64, 0x3f, 0x86, 0x05, 0x2b, 0xff, 0x8e, 0xa9, 0x61, 0x94, 0xa5, 0xeb, - 0xf5, 0xae, 0x96, 0x23, 0x65, 0xc3, 0xd7, 0xa8, 0xe1, 0x2e, 0x5b, 0xc3, 0x86, 0x65, 0x02, 0xdb, - 0x5d, 0xca, 0x24, 0x15, 0x17, 0xd1, 0x5e, 0x19, 0xb2, 0x2f, 0x1a, 0xbb, 0x9a, 0x17, 0x47, 0xab, - 0xb5, 0xf7, 0x67, 0x60, 0x65, 0x73, 0x57, 0xa9, 0xb9, 0x35, 0xb6, 0x6a, 0x36, 0xa7, 0xcf, 0x20, - 0x39, 0xdd, 0xbe, 0x34, 0x1f, 0x27, 0x64, 0xef, 0x6b, 0xc6, 0x2a, 0x7b, 0xb4, 0x50, 0xb3, 0x48, - 0xf1, 0xe5, 0x42, 0xb7, 0x4b, 0x4d, 0x31, 0x46, 0xcb, 0x67, 0xbe, 0x4d, 0xc8, 0x8e, 0xa0, 0x65, - 0x3c, 0x63, 0xc4, 0xae, 0xcc, 0x7c, 0x72, 0xa9, 0xd7, 0x2b, 0x43, 0x95, 0x0d, 0xc5, 0xac, 0xff, - 0x2e, 0x6e, 0xea, 0x3f, 0x84, 0xa6, 0x7e, 0x18, 0x87, 0xad, 0x1b, 0x0f, 0x15, 0x99, 0x0f, 0xf9, - 0xf4, 0xba, 0x45, 0x44, 0x19, 0xf3, 0x99, 0xb5, 0x23, 0xf3, 0xbd, 0x84, 0x96, 0xf1, 0xf8, 0x8d, - 0x1e, 0x40, 0xf1, 0x81, 0x1d, 0x3d, 0x80, 0x92, 0xb7, 0x72, 0xdc, 0x65, 0x6a, 0xa2, 0xc5, 0x9a, - 0xc4, 0xdf, 0xe9, 0xeb, 0x28, 0x61, 0x7b, 0x70, 0x59, 0xea, 0xb8, 0x23, 0xfe, 0x65, 0x96, 0xa1, - 0xe4, 0x3d, 0xc8, 0x7b, 0x0e, 0x7b, 0x08, 0x0d, 0xf5, 0xc6, 0x11, 0x5b, 0x2b, 0x7f, 0xab, 0xa9, - 0xb7, 0x5e, 0x80, 0x4b, 0xdb, 0xe6, 0xfb, 0x00, 0xd9, 0x4b, 0x3b, 0x5a, 0x49, 0x14, 0x5e, 0xee, - 0xd1, 0x1c, 0x50, 0x7c, 0x96, 0xc7, 0x5d, 0xa3, 0x01, 0x76, 0x18, 0x29, 0x89, 0x90, 0x9f, 0xab, - 0x8b, 0xd6, 0x3f, 0x82, 0x96, 0xf1, 0xd8, 0x8e, 0x9e, 0xbe, 0xe2, 0x43, 0x3d, 0x7a, 0xfa, 0x4a, - 0xde, 0xe6, 0x71, 0x7b, 0x54, 0xfb, 0xaa, 0xbb, 0x84, 0xb5, 0x27, 0xc1, 0x49, 0x38, 0x16, 0x04, - 0xb8, 0x40, 0xa7, 0xb0, 0x60, 0xbd, 0xa8, 0xa3, 0x25, 0xb4, 0xec, 0xbd, 0x1e, 0x2d, 0xa1, 0xa5, - 0x8f, 0xf0, 0x28, 0x3e, 0x73, 0x97, 0xb1, 0x9d, 0x33, 0x22, 0x31, 0x5a, 0xfa, 0x01, 0xb4, 0x8c, - 0xd7, 0x71, 0xf4, 0x58, 0x8a, 0x0f, 0xf1, 0xe8, 0xb1, 0x94, 0x3d, 0xa6, 0xb3, 0x4a, 0x6d, 0x2c, - 0xba, 0xc4, 0x0a, 0x74, 0x65, 0x18, 0xeb, 0xfe, 0x31, 0x2c, 0xda, 0xef, 0xe5, 0x68, 0xd9, 0x2f, - 0x7d, 0x79, 0x47, 0xcb, 0xfe, 0x8c, 0x47, 0x76, 0x24, 0x4b, 0xdf, 0x59, 0xd1, 0x8d, 0xdc, 0xfd, - 0x5c, 0x66, 0x4a, 0x7d, 0xc1, 0xbe, 0x8b, 0x0a, 0x4e, 0xde, 0xe1, 0x66, 0xeb, 0x06, 0xd7, 0x9a, - 0x37, 0xbd, 0xb5, 0xbc, 0x14, 0xae, 0x7b, 0xdb, 0xcc, 0x2c, 0x2e, 0x3d, 0xd3, 0xae, 0x45, 0x77, - 0xb9, 0x8d, 0x5d, 0xcb, 0xbc, 0xee, 0x6d, 0xec, 0x5a, 0xd6, 0x95, 0xef, 0xfc, 0xae, 0x95, 0x06, - 0x58, 0x47, 0x08, 0x4b, 0xb9, 0x3b, 0x02, 0x5a, 0x2a, 0xca, 0xaf, 0x71, 0xf5, 0xae, 0xbd, 0xf9, - 0x6a, 0x81, 0xad, 0x41, 0x94, 0x12, 0xbc, 0xab, 0x2e, 0xcd, 0xfd, 0x45, 0x68, 0x9b, 0x6f, 0x7f, - 0x30, 0x53, 0x94, 0xf3, 0x2d, 0xbd, 0x57, 0x8a, 0xb3, 0x17, 0x97, 0xb5, 0xcd, 0x66, 0xd8, 0xf7, - 0x60, 0x4d, 0x8b, 0xba, 0x99, 0x76, 0x9e, 0xb0, 0x0f, 0x4a, 0x92, 0xd1, 0x4d, 0xcb, 0xa7, 0x77, - 0x65, 0x66, 0xb6, 0xfa, 0x3d, 0x07, 0x99, 0xc6, 0x7e, 0x54, 0x21, 0xdb, 0x30, 0xca, 0xde, 0x92, - 0xc8, 0x36, 0x8c, 0xd2, 0x97, 0x18, 0x14, 0xd3, 0xb0, 0x15, 0x6b, 0x8e, 0xc4, 0x21, 0x33, 0xfb, - 0x01, 0x2c, 0x19, 0x17, 0x7b, 0x0e, 0x2f, 0xc2, 0x81, 0x16, 0x80, 0xe2, 0x9d, 0xd3, 0x5e, 0x99, - 0x5d, 0xef, 0xae, 0x53, 0xfd, 0xcb, 0xae, 0x35, 0x39, 0xc8, 0xfc, 0x5b, 0xd0, 0x32, 0x2f, 0x0d, - 0xbd, 0xa1, 0xde, 0x75, 0x03, 0x65, 0x5e, 0x99, 0xbc, 0xe7, 0xb0, 0x03, 0x91, 0x60, 0xa4, 0x1f, - 0x69, 0x8c, 0xe2, 0xfc, 0xf6, 0x69, 0x3f, 0xde, 0xa8, 0x17, 0xb2, 0xec, 0xd9, 0xce, 0xdb, 0xce, - 0x3d, 0x87, 0xfd, 0x03, 0x07, 0xda, 0xd6, 0xa5, 0x1e, 0x2b, 0x65, 0x23, 0xd7, 0xb3, 0xae, 0x89, - 0x33, 0xbb, 0xe6, 0x7a, 0x34, 0xec, 0xbd, 0x3b, 0xdf, 0xb6, 0xa6, 0xf5, 0x73, 0x2b, 0x68, 0xb4, - 0x91, 0x7f, 0xa9, 0xf1, 0x8b, 0x3c, 0x81, 0x79, 0xd3, 0xf7, 0x8b, 0x7b, 0x0e, 0xfb, 0xa5, 0x03, - 0x8b, 0x76, 0xa8, 0x53, 0x0f, 0xb7, 0x34, 0xa8, 0xaa, 0x17, 0x7f, 0x46, 0x7c, 0xf4, 0x07, 0xd4, - 0xcb, 0xe7, 0x77, 0x3c, 0xab, 0x97, 0xf2, 0x01, 0x8f, 0xdf, 0xac, 0xb7, 0xec, 0x53, 0xf1, 0xac, - 0xb0, 0x3a, 0x90, 0x60, 0xc5, 0x07, 0x6e, 0x35, 0xc3, 0x98, 0x4f, 0xd2, 0xd2, 0x22, 0xfc, 0x48, - 0xbc, 0x50, 0xa8, 0x62, 0xe6, 0xc8, 0x77, 0xef, 0xfa, 0xbd, 0x7b, 0x83, 0xc6, 0x74, 0xcd, 0xbd, - 0x62, 0x8d, 0x29, 0xbf, 0xc3, 0x6f, 0x8a, 0xde, 0xc9, 0xd7, 0x64, 0xb3, 0x2d, 0xaa, 0xf0, 0xc2, - 0xec, 0xec, 0x4e, 0x8e, 0x45, 0x27, 0x25, 0xb9, 0x25, 0x1c, 0xef, 0x58, 0x8d, 0x7b, 0x87, 0xfa, - 0x7a, 0xc3, 0xfd, 0x60, 0x66, 0x5f, 0xef, 0x52, 0xc0, 0x12, 0x7b, 0x7c, 0x00, 0x90, 0x1d, 0x1e, - 0xb2, 0xdc, 0xe1, 0x95, 0x56, 0x19, 0xc5, 0xf3, 0x45, 0x5b, 0x02, 0xd5, 0x19, 0x17, 0xd6, 0xf8, - 0x43, 0xa1, 0x00, 0x9f, 0xaa, 0x63, 0x2f, 0xd3, 0xcc, 0xb1, 0x4f, 0xf9, 0x2c, 0x33, 0x27, 0x5f, - 0xbf, 0xa5, 0xfe, 0xf4, 0x19, 0xda, 0x0b, 0x58, 0xd8, 0x8b, 0xa2, 0x57, 0xd3, 0x89, 0xce, 0xac, - 0xb0, 0xcf, 0x12, 0x76, 0xfd, 0xe4, 0xb4, 0x97, 0x1b, 0x85, 0x7b, 0x9d, 0xaa, 0xea, 0xb1, 0xae, - 0x51, 0xd5, 0xdd, 0xcf, 0xb3, 0xc3, 0xc9, 0x2f, 0x98, 0x0f, 0xcb, 0x5a, 0xab, 0xea, 0x8e, 0xf7, - 0xec, 0x6a, 0x2c, 0x5d, 0x9a, 0x6f, 0xc2, 0xb2, 0xc7, 0x55, 0x6f, 0xef, 0x26, 0xaa, 0x4e, 0xd2, - 0x29, 0xed, 0x6d, 0x3e, 0xa0, 0x2b, 0x0b, 0x14, 0x90, 0x5f, 0xc9, 0x3a, 0xae, 0x23, 0xf9, 0xbd, - 0x05, 0x0b, 0x68, 0xef, 0x34, 0x13, 0xff, 0x22, 0xe6, 0x3f, 0xb9, 0xfb, 0xb9, 0x0c, 0xf5, 0x7f, - 0xa1, 0x76, 0x1a, 0x75, 0x16, 0x62, 0xed, 0x34, 0xb9, 0xc3, 0x13, 0x6b, 0xa7, 0x29, 0x1c, 0x9e, - 0x58, 0x53, 0xad, 0xce, 0x62, 0xd8, 0x08, 0x96, 0x0b, 0xe7, 0x2d, 0x7a, 0x93, 0x99, 0x75, 0x4a, - 0xd3, 0xbb, 0x3e, 0x9b, 0xc0, 0x6e, 0xed, 0x8e, 0xdd, 0xda, 0x21, 0x2c, 0x6c, 0x73, 0x31, 0x59, - 0x22, 0x75, 0x34, 0x77, 0x33, 0xcc, 0x4c, 0x4c, 0xcd, 0x6f, 0x09, 0x84, 0xb3, 0x4d, 0x09, 0xca, - 0xdb, 0x64, 0x3f, 0x84, 0xd6, 0x13, 0x9e, 0xaa, 0x5c, 0x51, 0x6d, 0xcc, 0xe6, 0x92, 0x47, 0x7b, - 0x25, 0xa9, 0xa6, 0x36, 0xcf, 0x50, 0x6d, 0x77, 0xf9, 0xf0, 0x84, 0x0b, 0xe5, 0xd4, 0x0f, 0x86, - 0x5f, 0xb0, 0xbf, 0x40, 0x95, 0xeb, 0x44, 0xf9, 0x35, 0x23, 0xf9, 0xcf, 0xac, 0x7c, 0x29, 0x07, - 0x2f, 0xab, 0x39, 0x8c, 0x86, 0xdc, 0x30, 0xaa, 0x42, 0x68, 0x19, 0x17, 0x4a, 0xb4, 0x00, 0x15, - 0xef, 0x26, 0x69, 0x01, 0x2a, 0xb9, 0x7f, 0xe2, 0xde, 0xa6, 0x76, 0x5c, 0x76, 0x3d, 0x6b, 0x47, - 0xdc, 0x39, 0xc9, 0x5a, 0xba, 0xfb, 0xb9, 0x3f, 0x4e, 0xbf, 0x60, 0x2f, 0xe9, 0x41, 0x1d, 0x33, - 0x1f, 0x36, 0xb3, 0xce, 0xf3, 0xa9, 0xb3, 0x7a, 0xb2, 0x0c, 0x94, 0x6d, 0xb1, 0x8b, 0xa6, 0xc8, - 0xf6, 0xfa, 0x16, 0xc0, 0x61, 0x1a, 0x4d, 0xb6, 0x7d, 0x3e, 0x8e, 0xc2, 0x4c, 0xd7, 0x66, 0xd9, - 0x98, 0x99, 0xfe, 0x32, 0x52, 0x32, 0xd9, 0x4b, 0xc3, 0x9d, 0xb1, 0xd2, 0x89, 0x15, 0x73, 0xcd, - 0x4c, 0xd8, 0xd4, 0x13, 0x52, 0x92, 0xb4, 0x79, 0xcf, 0x61, 0x9b, 0x00, 0xd9, 0x81, 0x9b, 0x76, - 0x4e, 0x0a, 0x67, 0x79, 0x5a, 0xed, 0x95, 0x9c, 0xce, 0x1d, 0x40, 0x33, 0x3b, 0xc1, 0x59, 0xcf, - 0xae, 0x6c, 0x59, 0xe7, 0x3d, 0x7a, 0x07, 0x2f, 0x9c, 0xab, 0xb8, 0x1d, 0x9a, 0x2a, 0x60, 0x0d, - 0x9c, 0x2a, 0x3a, 0x2c, 0x09, 0x60, 0x45, 0x74, 0x50, 0x1b, 0x38, 0x94, 0x49, 0xa8, 0x46, 0x52, - 0x72, 0xb6, 0xa1, 0xa5, 0xb9, 0x34, 0xe8, 0x6f, 0xc5, 0x58, 0x90, 0x5b, 0x45, 0x16, 0x23, 0xaa, - 0xe6, 0x31, 0x2c, 0x17, 0xa2, 0xd2, 0x5a, 0xa4, 0x67, 0x1d, 0x14, 0x68, 0x91, 0x9e, 0x19, 0xd0, - 0x76, 0x2f, 0x53, 0x93, 0x4b, 0x2e, 0x90, 0x4f, 0x75, 0x1e, 0xa4, 0x83, 0x53, 0x6c, 0xee, 0x17, - 0x0e, 0xac, 0x94, 0x04, 0x9d, 0xd9, 0x87, 0xca, 0x3d, 0x9f, 0x19, 0x90, 0xee, 0x95, 0xc6, 0x24, - 0xdd, 0x43, 0x6a, 0xe7, 0x33, 0xf6, 0x1d, 0x6b, 0x63, 0x13, 0xe1, 0x40, 0x29, 0x99, 0x6f, 0x34, - 0x2a, 0x4a, 0x2d, 0x8a, 0x9f, 0xc0, 0xba, 0xe8, 0xc8, 0xe6, 0x68, 0x94, 0x8b, 0x97, 0x5e, 0x2b, - 0xfc, 0x67, 0x11, 0x2b, 0x0e, 0xdc, 0x9b, 0xfd, 0x9f, 0x47, 0x66, 0x18, 0xc0, 0xa2, 0xab, 0x6c, - 0x0a, 0x9d, 0x7c, 0x0c, 0x92, 0xcd, 0xae, 0xab, 0xf7, 0x81, 0xe5, 0x68, 0x16, 0xe3, 0x96, 0xee, - 0xef, 0x50, 0x63, 0x1f, 0xb8, 0xbd, 0xb2, 0x79, 0x11, 0xbe, 0x27, 0xae, 0xc7, 0x5f, 0xd6, 0x01, - 0xd3, 0xdc, 0x38, 0x55, 0x03, 0xb3, 0x22, 0xbc, 0xda, 0xd5, 0x2d, 0x8f, 0xb7, 0xde, 0xa4, 0xe6, - 0xaf, 0xbb, 0xef, 0x95, 0x35, 0x1f, 0x8b, 0x4f, 0x84, 0xd3, 0xbb, 0x9e, 0x97, 0x6b, 0xd5, 0x83, - 0xeb, 0x65, 0xeb, 0x3d, 0xd3, 0x7b, 0xc9, 0xcd, 0xf5, 0xa5, 0x7b, 0xce, 0xa3, 0x5b, 0x3f, 0xf8, - 0x9d, 0x93, 0x20, 0x3d, 0x9d, 0x1e, 0x6d, 0x0c, 0xa2, 0xf1, 0xdd, 0x91, 0x0a, 0xba, 0xc9, 0xbc, - 0xf7, 0xbb, 0xa3, 0x70, 0x78, 0x97, 0xbe, 0x3f, 0x9a, 0xa3, 0x7f, 0x54, 0xf4, 0x8d, 0xff, 0x1b, - 0x00, 0x00, 0xff, 0xff, 0x8c, 0x9e, 0x6c, 0x70, 0xda, 0x68, 0x00, 0x00, + // 12049 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0xbd, 0x59, 0x6c, 0x24, 0x49, + 0x7a, 0x18, 0xdc, 0x75, 0x91, 0x55, 0x5f, 0x1d, 0x2c, 0x06, 0xaf, 0x6a, 0xf6, 0xf4, 0x74, 0x4f, + 0xce, 0xec, 0x74, 0x4f, 0xcf, 0x2c, 0xbb, 0xa7, 0x77, 0x7b, 0x66, 0x77, 0xe6, 0xd7, 0x6a, 0x8b, + 0x64, 0xb1, 0x59, 0xdb, 0x64, 0x91, 0x93, 0x55, 0x9c, 0xd1, 0xac, 0x8e, 0xdc, 0x64, 0x55, 0x90, + 0x4c, 0x75, 0x55, 0x66, 0x4d, 0x66, 0x16, 0x8f, 0x5d, 0xcc, 0xff, 0x60, 0xf8, 0x10, 0x04, 0xdb, + 0x80, 0x60, 0xcb, 0x80, 0x65, 0x09, 0x3e, 0x04, 0xdb, 0x30, 0x0c, 0x08, 0x02, 0x56, 0x7e, 0x30, + 0xe0, 0x77, 0xbd, 0xf8, 0x80, 0x21, 0xf9, 0xc1, 0x86, 0x20, 0xc0, 0xb0, 0x2d, 0xbf, 0x19, 0x02, + 0xfc, 0x6c, 0xc0, 0x88, 0xef, 0x8b, 0xc8, 0x8c, 0xac, 0x4a, 0x76, 0xf7, 0xec, 0xae, 0xf7, 0x85, + 0xac, 0xfc, 0xe2, 0x8b, 0xfb, 0x8b, 0x2f, 0xbe, 0x2b, 0x22, 0xa0, 0xe4, 0x8f, 0xfb, 0x1b, 0x63, + 0xdf, 0x0b, 0x3d, 0x56, 0x18, 0xba, 0xfe, 0xb8, 0xbf, 0xfe, 0xda, 0xa9, 0xe7, 0x9d, 0x0e, 0xf9, + 0x43, 0x7b, 0xec, 0x3c, 0xb4, 0x5d, 0xd7, 0x0b, 0xed, 0xd0, 0xf1, 0xdc, 0x80, 0x90, 0x8c, 0x1f, + 0x40, 0xed, 0x29, 0x77, 0xbb, 0x9c, 0x0f, 0x4c, 0xfe, 0xc5, 0x84, 0x07, 0x21, 0x7b, 0x17, 0x16, + 0x6d, 0xfe, 0x43, 0xce, 0x07, 0xd6, 0xd8, 0x0e, 0x82, 0xf1, 0x99, 0x6f, 0x07, 0xbc, 0x91, 0xb9, + 0x9b, 0xb9, 0x5f, 0x31, 0xeb, 0x94, 0x70, 0x18, 0xc1, 0xd9, 0x1b, 0x50, 0x09, 0x04, 0x2a, 0x77, + 0x43, 0xdf, 0x1b, 0x5f, 0x35, 0xb2, 0x88, 0x57, 0x16, 0xb0, 0x16, 0x81, 0x8c, 0x21, 0x2c, 0x44, + 0x35, 0x04, 0x63, 0xcf, 0x0d, 0x38, 0x7b, 0x04, 0xcb, 0x7d, 0x67, 0x7c, 0xc6, 0x7d, 0x0b, 0x33, + 0x8f, 0x5c, 0x3e, 0xf2, 0x5c, 0xa7, 0xdf, 0xc8, 0xdc, 0xcd, 0xdd, 0x2f, 0x99, 0x8c, 0xd2, 0x44, + 0x8e, 0x7d, 0x99, 0xc2, 0xee, 0xc1, 0x02, 0x77, 0x09, 0xce, 0x07, 0x98, 0x4b, 0x56, 0x55, 0x8b, + 0xc1, 0x22, 0x83, 0xf1, 0x1b, 0x59, 0x58, 0x6c, 0xbb, 0x4e, 0xf8, 0x99, 0x3d, 0x1c, 0xf2, 0x50, + 0xf5, 0xe9, 0x1e, 0x2c, 0x5c, 0x20, 0x00, 0xfb, 0x74, 0xe1, 0xf9, 0x03, 0xd9, 0xa3, 0x1a, 0x81, + 0x0f, 0x25, 0xf4, 0xda, 0x96, 0x65, 0xaf, 0x6d, 0x59, 0xea, 0x70, 0xe5, 0xae, 0x19, 0xae, 0x7b, + 0xb0, 0xe0, 0xf3, 0xbe, 0x77, 0xce, 0xfd, 0x2b, 0xeb, 0xc2, 0x71, 0x07, 0xde, 0x45, 0x23, 0x7f, + 0x37, 0x73, 0xbf, 0x60, 0xd6, 0x14, 0xf8, 0x33, 0x84, 0xb2, 0x4d, 0x58, 0xe8, 0x9f, 0xd9, 0xae, + 0xcb, 0x87, 0xd6, 0xb1, 0xdd, 0x7f, 0x3e, 0x19, 0x07, 0x8d, 0xc2, 0xdd, 0xcc, 0xfd, 0xf2, 0xe3, + 0x9b, 0x1b, 0x38, 0xab, 0x1b, 0x5b, 0x67, 0xb6, 0xbb, 0x89, 0x29, 0x5d, 0xd7, 0x1e, 0x07, 0x67, + 0x5e, 0x68, 0xd6, 0x64, 0x0e, 0x02, 0x07, 0xc6, 0x32, 0x30, 0x7d, 0x24, 0x68, 0xec, 0x8d, 0x7f, + 0x99, 0x81, 0xa5, 0x23, 0x77, 0xe8, 0xf5, 0x9f, 0xff, 0x84, 0x43, 0x94, 0xd2, 0x87, 0xec, 0xab, + 0xf6, 0x21, 0xf7, 0x55, 0xfb, 0xb0, 0x0a, 0xcb, 0xc9, 0xc6, 0xca, 0x5e, 0x70, 0x58, 0x11, 0xb9, + 0x4f, 0xb9, 0x6a, 0x96, 0xea, 0xc6, 0x3b, 0x50, 0xef, 0x4f, 0x7c, 0x9f, 0xbb, 0x33, 0xfd, 0x58, + 0x90, 0xf0, 0xa8, 0x23, 0x6f, 0x40, 0xc5, 0xe5, 0x17, 0x31, 0x9a, 0xa4, 0x5d, 0x97, 0x5f, 0x28, + 0x14, 0xa3, 0x01, 0xab, 0xd3, 0xd5, 0xc8, 0x06, 0xfc, 0x45, 0x06, 0xf2, 0x47, 0xe1, 0xa5, 0xc7, + 0x9e, 0x40, 0xc5, 0x1e, 0x0c, 0x7c, 0x1e, 0x04, 0x56, 0x78, 0x35, 0xa6, 0x95, 0x52, 0x7b, 0xcc, + 0x64, 0x17, 0x9b, 0x94, 0xd4, 0xbb, 0x1a, 0x73, 0xb3, 0x6c, 0xc7, 0x1f, 0xac, 0x01, 0xf3, 0xf2, + 0x13, 0xeb, 0x2d, 0x99, 0xea, 0x93, 0xdd, 0x06, 0xb0, 0x47, 0xde, 0xc4, 0x0d, 0xad, 0xc0, 0x0e, + 0x71, 0xc4, 0x72, 0x66, 0x89, 0x20, 0x5d, 0x3b, 0x64, 0xb7, 0xa0, 0x34, 0x7e, 0x6e, 0x05, 0x7d, + 0xdf, 0x19, 0x87, 0x48, 0x3c, 0x25, 0xb3, 0x38, 0x7e, 0xde, 0xc5, 0x6f, 0xf6, 0x2e, 0x14, 0xbd, + 0x49, 0x38, 0xf6, 0x1c, 0x37, 0x94, 0xf4, 0xb2, 0x20, 0x1b, 0x72, 0x30, 0x09, 0x0f, 0x05, 0xd8, + 0x8c, 0x10, 0xd8, 0x5b, 0x50, 0xed, 0x7b, 0xee, 0x89, 0xe3, 0x8f, 0x88, 0x23, 0x34, 0xe6, 0xb0, + 0xae, 0x24, 0xd0, 0xf8, 0xc3, 0x2c, 0x94, 0x7b, 0xbe, 0xed, 0x06, 0x76, 0x5f, 0x00, 0xd8, 0x1a, + 0xcc, 0x87, 0x97, 0xd6, 0x99, 0x1d, 0x9c, 0x61, 0x57, 0x4b, 0xe6, 0x5c, 0x78, 0xb9, 0x6b, 0x07, + 0x67, 0x6c, 0x15, 0xe6, 0xa8, 0x95, 0xd8, 0xa1, 0x9c, 0x29, 0xbf, 0xc4, 0x02, 0x71, 0x27, 0x23, + 0x2b, 0x59, 0x55, 0x0e, 0x29, 0xa6, 0xee, 0x4e, 0x46, 0x5b, 0x3a, 0x5c, 0x74, 0xfe, 0x58, 0x4c, + 0x37, 0x55, 0x40, 0xdd, 0x2b, 0x21, 0x04, 0xeb, 0x78, 0x03, 0x2a, 0x32, 0x99, 0x3b, 0xa7, 0x67, + 0xd4, 0xc7, 0x82, 0x59, 0x26, 0x04, 0x04, 0x89, 0x12, 0x42, 0x67, 0xc4, 0xad, 0x20, 0xb4, 0x47, + 0x63, 0xd9, 0xa5, 0x92, 0x80, 0x74, 0x05, 0x00, 0x93, 0xbd, 0xd0, 0x1e, 0x5a, 0x27, 0x9c, 0x07, + 0x8d, 0x79, 0x99, 0x2c, 0x20, 0x3b, 0x9c, 0x07, 0xec, 0x6b, 0x50, 0x1b, 0xf0, 0x20, 0xb4, 0xe4, + 0x64, 0xf0, 0xa0, 0x51, 0xc4, 0x95, 0x5f, 0x15, 0xd0, 0xa6, 0x02, 0xb2, 0xd7, 0x00, 0x7c, 0xfb, + 0xc2, 0x12, 0x03, 0xc1, 0x2f, 0x1b, 0x25, 0x9a, 0x05, 0xdf, 0xbe, 0xe8, 0x5d, 0xee, 0xf2, 0x4b, + 0x41, 0x35, 0x4f, 0x79, 0xa8, 0x0d, 0x5a, 0x20, 0xa9, 0xd3, 0xd8, 0x03, 0xa6, 0x81, 0xb7, 0x79, + 0x68, 0x3b, 0xc3, 0x80, 0x7d, 0x00, 0x95, 0x50, 0x43, 0x46, 0x36, 0x58, 0x8e, 0x48, 0x48, 0xcb, + 0x60, 0x26, 0xf0, 0x8c, 0x33, 0x28, 0xee, 0x70, 0xbe, 0xe7, 0x8c, 0x9c, 0x90, 0xad, 0x42, 0xe1, + 0xc4, 0xb9, 0xe4, 0x44, 0xec, 0xb9, 0xdd, 0x1b, 0x26, 0x7d, 0xb2, 0x3b, 0x00, 0xf8, 0xc3, 0x1a, + 0x45, 0xd4, 0xb4, 0x7b, 0xc3, 0x2c, 0x21, 0x6c, 0x3f, 0xb0, 0x43, 0xb6, 0x0e, 0xf3, 0x63, 0xee, + 0xf7, 0xb9, 0x9a, 0xb7, 0xdd, 0x1b, 0xa6, 0x02, 0x6c, 0xce, 0x43, 0x61, 0x28, 0x4a, 0x37, 0xfe, + 0xb8, 0x00, 0xe5, 0x2e, 0x77, 0xa3, 0x55, 0xc6, 0x20, 0x2f, 0x06, 0x44, 0xae, 0x2c, 0xfc, 0xcd, + 0xde, 0x84, 0x32, 0x0e, 0x5d, 0x10, 0xfa, 0x8e, 0x7b, 0x4a, 0x54, 0xbd, 0x99, 0x6d, 0x64, 0x4c, + 0x10, 0xe0, 0x2e, 0x42, 0x59, 0x1d, 0x72, 0xf6, 0x48, 0x51, 0xb5, 0xf8, 0xc9, 0x6e, 0x42, 0xd1, + 0x1e, 0x85, 0xd4, 0xbc, 0x0a, 0x82, 0xe7, 0xed, 0x51, 0x88, 0x4d, 0x7b, 0x03, 0x2a, 0x63, 0xfb, + 0x6a, 0x24, 0xd6, 0x72, 0x44, 0x0e, 0x15, 0xb3, 0x2c, 0x61, 0x48, 0x10, 0x8f, 0x61, 0x49, 0x47, + 0x51, 0x95, 0x17, 0xa2, 0xca, 0x17, 0x35, 0x6c, 0xd9, 0x86, 0x7b, 0xb0, 0xa0, 0xf2, 0xf8, 0xd4, + 0x1f, 0x24, 0x93, 0x92, 0x59, 0x93, 0x60, 0xd5, 0xcb, 0xfb, 0x50, 0x3f, 0x71, 0x5c, 0x7b, 0x68, + 0xf5, 0x87, 0xe1, 0xb9, 0x35, 0xe0, 0xc3, 0xd0, 0x46, 0x8a, 0x29, 0x98, 0x35, 0x84, 0x6f, 0x0d, + 0xc3, 0xf3, 0x6d, 0x01, 0x65, 0xef, 0x41, 0xe9, 0x84, 0x73, 0x0b, 0x07, 0xab, 0x51, 0x4c, 0x2c, + 0x3c, 0x35, 0x43, 0x66, 0xf1, 0x44, 0xcd, 0xd5, 0x7b, 0x50, 0xf7, 0x26, 0xe1, 0xa9, 0xe7, 0xb8, + 0xa7, 0x96, 0xe0, 0x77, 0x96, 0x33, 0x40, 0x1a, 0xca, 0x6f, 0x66, 0x1f, 0x65, 0xcc, 0x9a, 0x4a, + 0x13, 0x9c, 0xa7, 0x3d, 0x60, 0x6f, 0xc3, 0xc2, 0xd0, 0x0e, 0x42, 0xeb, 0xcc, 0x1b, 0x5b, 0xe3, + 0xc9, 0xf1, 0x73, 0x7e, 0xd5, 0xa8, 0xe2, 0x40, 0x54, 0x05, 0x78, 0xd7, 0x1b, 0x1f, 0x22, 0x50, + 0x50, 0x36, 0xb6, 0x93, 0x1a, 0x01, 0x77, 0x33, 0xf7, 0xab, 0x66, 0x49, 0x40, 0xa8, 0xd2, 0xcf, + 0x61, 0x09, 0xa7, 0xa7, 0x3f, 0x09, 0x42, 0x6f, 0x64, 0x09, 0x5e, 0xed, 0x0f, 0x82, 0x46, 0x19, + 0x69, 0xed, 0x1d, 0xd9, 0x58, 0x6d, 0x8e, 0x37, 0xb6, 0x79, 0x10, 0x6e, 0x21, 0xb2, 0x49, 0xb8, + 0x62, 0x43, 0xbf, 0x32, 0x17, 0x07, 0xd3, 0x70, 0xf6, 0x1e, 0x30, 0x7b, 0x38, 0xf4, 0x2e, 0xac, + 0x80, 0x0f, 0x4f, 0x2c, 0x39, 0x88, 0x8d, 0xda, 0xdd, 0xcc, 0xfd, 0xa2, 0x59, 0xc7, 0x94, 0x2e, + 0x1f, 0x9e, 0x1c, 0x12, 0x9c, 0x7d, 0x00, 0xb8, 0x98, 0xac, 0x13, 0x6e, 0x87, 0x13, 0x9f, 0x07, + 0x8d, 0x85, 0xbb, 0xb9, 0xfb, 0xb5, 0xc7, 0x8b, 0xd1, 0x78, 0x21, 0x78, 0xd3, 0x09, 0xcd, 0x8a, + 0xc0, 0x93, 0xdf, 0xc1, 0xfa, 0x36, 0xac, 0xa6, 0x37, 0x49, 0x10, 0x95, 0x18, 0x15, 0x41, 0x8c, + 0x79, 0x53, 0xfc, 0x64, 0xcb, 0x50, 0x38, 0xb7, 0x87, 0x13, 0x2e, 0x79, 0x3a, 0x7d, 0x7c, 0x94, + 0xfd, 0x56, 0xc6, 0xf8, 0xa3, 0x0c, 0x54, 0xa8, 0x97, 0x52, 0x16, 0x79, 0x13, 0xaa, 0x8a, 0x1a, + 0xb8, 0xef, 0x7b, 0xbe, 0xe4, 0x6a, 0x8a, 0xf2, 0x5a, 0x02, 0x26, 0x76, 0x15, 0x85, 0x34, 0xf6, + 0xb9, 0x33, 0xb2, 0x4f, 0x55, 0xd1, 0x8a, 0x94, 0x0e, 0x25, 0x98, 0xbd, 0x1f, 0x97, 0xe7, 0x7b, + 0x93, 0x90, 0xcb, 0x3d, 0xaf, 0x22, 0xbb, 0x67, 0x0a, 0x58, 0x54, 0x3a, 0x7e, 0xbd, 0x02, 0x9d, + 0x1b, 0xbf, 0x9d, 0x01, 0x26, 0x9a, 0xdd, 0xf3, 0xa8, 0x00, 0x49, 0xa1, 0xd3, 0x39, 0x33, 0xaf, + 0xbc, 0x42, 0xb2, 0x2f, 0x5a, 0x21, 0x06, 0x14, 0xa8, 0xed, 0xf9, 0x94, 0xb6, 0x53, 0xd2, 0xf7, + 0xf2, 0xc5, 0x5c, 0x3d, 0x6f, 0xfc, 0x97, 0x1c, 0x2c, 0x6f, 0xd1, 0x96, 0xdd, 0xec, 0xf7, 0xf9, + 0x38, 0x5a, 0x3b, 0x77, 0xa0, 0xec, 0x7a, 0x03, 0xae, 0x28, 0x96, 0x1a, 0x06, 0x02, 0xa4, 0x91, + 0xeb, 0x99, 0xed, 0xb8, 0xd4, 0x70, 0x1a, 0xcc, 0x12, 0x42, 0xb0, 0xd9, 0x6f, 0xc3, 0xc2, 0x98, + 0xbb, 0x03, 0x7d, 0x89, 0x90, 0x50, 0x55, 0x95, 0x60, 0xb9, 0x3a, 0xee, 0x40, 0xf9, 0x64, 0x42, + 0x78, 0x82, 0xb1, 0xe4, 0x91, 0x06, 0x40, 0x82, 0x9a, 0xc4, 0x5f, 0xc6, 0x93, 0xe0, 0x0c, 0x53, + 0x0b, 0x98, 0x3a, 0x2f, 0xbe, 0x45, 0xd2, 0x6d, 0x80, 0xc1, 0x24, 0x08, 0xe5, 0x8a, 0x99, 0xc3, + 0xc4, 0x92, 0x80, 0xd0, 0x8a, 0xf9, 0x3a, 0x2c, 0x8d, 0xec, 0x4b, 0x0b, 0x69, 0xc7, 0x72, 0x5c, + 0xeb, 0x64, 0x88, 0x7b, 0xce, 0x3c, 0xe2, 0xd5, 0x47, 0xf6, 0xe5, 0xa7, 0x22, 0xa5, 0xed, 0xee, + 0x20, 0x5c, 0xb0, 0x15, 0x25, 0xee, 0xf8, 0x3c, 0xe0, 0xfe, 0x39, 0x47, 0x4e, 0x90, 0x8f, 0x64, + 0x1a, 0x93, 0xa0, 0xa2, 0x45, 0x23, 0xd1, 0xef, 0x70, 0xd8, 0xa7, 0x65, 0x6f, 0xce, 0x8f, 0x1c, + 0x77, 0x37, 0x1c, 0xf6, 0xc5, 0xbe, 0x22, 0xf8, 0xc8, 0x98, 0xfb, 0xd6, 0xf3, 0x0b, 0x5c, 0xc3, + 0x79, 0xe4, 0x1b, 0x87, 0xdc, 0x7f, 0x76, 0x21, 0xb6, 0xfe, 0x7e, 0x80, 0x8c, 0xc8, 0xbe, 0x6a, + 0x94, 0x71, 0x81, 0x17, 0xfb, 0x81, 0x60, 0x41, 0xf6, 0x95, 0x58, 0x84, 0xa2, 0xb5, 0x36, 0xce, + 0x02, 0x1f, 0x60, 0xf1, 0x01, 0x72, 0xd4, 0x2a, 0x36, 0xb6, 0x29, 0x13, 0x44, 0x3d, 0x81, 0xa0, + 0x7a, 0xd5, 0xd8, 0x93, 0xa1, 0x7d, 0x1a, 0x20, 0x4b, 0xa9, 0x9a, 0x15, 0x09, 0xdc, 0x11, 0x30, + 0xe3, 0x33, 0x12, 0xb2, 0xb4, 0xb9, 0x95, 0x6b, 0x46, 0x6c, 0xf5, 0x08, 0xc1, 0x79, 0x2d, 0x9a, + 0xf2, 0x2b, 0x6d, 0xd2, 0xb2, 0x29, 0x93, 0x66, 0xfc, 0x5e, 0x06, 0x2a, 0xb2, 0x64, 0x14, 0x4a, + 0xd8, 0x06, 0x30, 0x35, 0x8b, 0xe1, 0xa5, 0x33, 0xb0, 0x8e, 0xaf, 0x42, 0x1e, 0x10, 0xd1, 0xec, + 0xde, 0x30, 0xeb, 0x32, 0xad, 0x77, 0xe9, 0x0c, 0x36, 0x45, 0x0a, 0x7b, 0x00, 0xf5, 0x04, 0x7e, + 0x10, 0xfa, 0x44, 0xd1, 0xbb, 0x37, 0xcc, 0x9a, 0x86, 0xdd, 0x0d, 0x7d, 0xb1, 0x46, 0x84, 0xc8, + 0x33, 0x09, 0x2d, 0xc7, 0x1d, 0xf0, 0x4b, 0x24, 0xa3, 0xaa, 0x59, 0x26, 0x58, 0x5b, 0x80, 0x36, + 0x6b, 0x50, 0xd1, 0x8b, 0x33, 0x4e, 0xa1, 0xa8, 0xe4, 0x25, 0x14, 0x18, 0xa6, 0x9a, 0x64, 0x96, + 0xc2, 0xa8, 0x25, 0x37, 0xa1, 0x98, 0x6c, 0x81, 0x39, 0x1f, 0xbe, 0x72, 0xc5, 0xc6, 0x77, 0xa0, + 0xbe, 0x27, 0x88, 0xc7, 0x15, 0xc4, 0x2a, 0xe5, 0xbf, 0x55, 0x98, 0xd3, 0x16, 0x4d, 0xc9, 0x94, + 0x5f, 0x62, 0xcf, 0x3d, 0xf3, 0x82, 0x50, 0xd6, 0x82, 0xbf, 0x8d, 0x3f, 0xce, 0x00, 0x6b, 0x05, + 0xa1, 0x33, 0xb2, 0x43, 0xbe, 0xc3, 0x23, 0xb6, 0x70, 0x00, 0x15, 0x51, 0x5a, 0xcf, 0x6b, 0x92, + 0x40, 0x46, 0x02, 0xc5, 0xbb, 0x72, 0x19, 0xcf, 0x66, 0xd8, 0xd0, 0xb1, 0x89, 0xcd, 0x27, 0x0a, + 0x10, 0xab, 0x2c, 0xb4, 0xfd, 0x53, 0x1e, 0xa2, 0x18, 0x27, 0xe5, 0x7d, 0x20, 0x90, 0x10, 0xe0, + 0xd6, 0x7f, 0x11, 0x16, 0x67, 0xca, 0xd0, 0xf9, 0x72, 0x29, 0x85, 0x2f, 0xe7, 0x74, 0xbe, 0x6c, + 0xc1, 0x52, 0xa2, 0x5d, 0x92, 0xd2, 0xd6, 0x60, 0x5e, 0x2c, 0x08, 0x21, 0x1c, 0x64, 0x48, 0xaa, + 0x3c, 0xe1, 0x5c, 0x88, 0xc1, 0x0f, 0x61, 0xf9, 0x84, 0x73, 0xdf, 0x0e, 0x31, 0x11, 0x57, 0x8c, + 0x98, 0x21, 0x59, 0xf0, 0xa2, 0x4c, 0xeb, 0xda, 0xe1, 0x21, 0xf7, 0xc5, 0x4c, 0x19, 0xff, 0x2d, + 0x03, 0x0b, 0x82, 0x83, 0xee, 0xdb, 0xee, 0x95, 0x1a, 0xa7, 0xbd, 0xd4, 0x71, 0xba, 0xaf, 0x6d, + 0x86, 0x1a, 0xf6, 0x57, 0x1d, 0xa4, 0xdc, 0xf4, 0x20, 0xb1, 0xbb, 0x50, 0x49, 0xb4, 0xb5, 0x80, + 0x6d, 0x85, 0x20, 0x6a, 0xe4, 0x4f, 0x3f, 0x8c, 0x6f, 0x43, 0x3d, 0x6e, 0xb6, 0x1c, 0x43, 0x06, + 0x79, 0x41, 0x92, 0xb2, 0x00, 0xfc, 0x6d, 0xfc, 0x6e, 0x86, 0x10, 0xb7, 0x3c, 0x27, 0x92, 0x4e, + 0x05, 0xa2, 0x90, 0x7b, 0x15, 0xa2, 0xf8, 0x7d, 0xad, 0x54, 0xff, 0xd3, 0x77, 0x56, 0x2c, 0x9d, + 0x80, 0xbb, 0x03, 0xcb, 0x1e, 0x0e, 0x91, 0xf9, 0x16, 0xcd, 0x79, 0xf1, 0xdd, 0x1c, 0x0e, 0x8d, + 0x7b, 0xb0, 0xa8, 0xb5, 0xee, 0x05, 0xfd, 0xe8, 0x00, 0xdb, 0x73, 0x82, 0xf0, 0xc8, 0x0d, 0xc6, + 0x9a, 0xe0, 0x76, 0x0b, 0x4a, 0x82, 0xc3, 0x8a, 0x96, 0xd1, 0x92, 0x2d, 0x98, 0x82, 0xe5, 0x8a, + 0x76, 0x05, 0x98, 0x68, 0x5f, 0xca, 0xc4, 0xac, 0x4c, 0xb4, 0x2f, 0x31, 0xd1, 0xf8, 0x16, 0x2c, + 0x25, 0xca, 0x93, 0x55, 0xbf, 0x01, 0x85, 0x49, 0x78, 0xe9, 0x29, 0xd1, 0xbc, 0x2c, 0x29, 0x44, + 0x28, 0x80, 0x26, 0xa5, 0x18, 0x1f, 0xc3, 0x62, 0x87, 0x5f, 0xc8, 0x45, 0xac, 0x1a, 0xf2, 0x36, + 0xe4, 0x5f, 0xa2, 0x14, 0x62, 0xba, 0xb1, 0x01, 0x4c, 0xcf, 0x2c, 0x6b, 0xd5, 0x74, 0xc4, 0x4c, + 0x42, 0x47, 0x34, 0xde, 0x06, 0xd6, 0x75, 0x4e, 0xdd, 0x7d, 0x1e, 0x04, 0xf6, 0x69, 0xb4, 0xec, + 0xeb, 0x90, 0x1b, 0x05, 0xa7, 0x92, 0x47, 0x89, 0x9f, 0xc6, 0x37, 0x60, 0x29, 0x81, 0x27, 0x0b, + 0x7e, 0x0d, 0x4a, 0x81, 0x73, 0xea, 0xa2, 0x60, 0x25, 0x8b, 0x8e, 0x01, 0xc6, 0x0e, 0x2c, 0x7f, + 0xca, 0x7d, 0xe7, 0xe4, 0xea, 0x65, 0xc5, 0x27, 0xcb, 0xc9, 0x4e, 0x97, 0xd3, 0x82, 0x95, 0xa9, + 0x72, 0x64, 0xf5, 0x44, 0xbe, 0x72, 0x26, 0x8b, 0x26, 0x7d, 0x68, 0x7c, 0x2f, 0xab, 0xf3, 0x3d, + 0xe3, 0x08, 0xd8, 0x96, 0xe7, 0xba, 0xbc, 0x1f, 0x1e, 0x72, 0xee, 0xc7, 0x56, 0xaa, 0x98, 0x56, + 0xcb, 0x8f, 0xd7, 0xe4, 0xc8, 0x4e, 0x33, 0x53, 0x49, 0xc4, 0x0c, 0xf2, 0x63, 0xee, 0x8f, 0xb0, + 0xe0, 0xa2, 0x89, 0xbf, 0x8d, 0x15, 0x58, 0x4a, 0x14, 0x2b, 0xf5, 0xfa, 0x47, 0xb0, 0xb2, 0xed, + 0x04, 0xfd, 0xd9, 0x0a, 0xd7, 0x60, 0x7e, 0x3c, 0x39, 0xb6, 0x92, 0x7c, 0xf9, 0x19, 0xbf, 0x12, + 0xda, 0xde, 0x74, 0x0e, 0x59, 0xd6, 0x5f, 0xcd, 0x40, 0x7e, 0xb7, 0xb7, 0xb7, 0xc5, 0xd6, 0xa1, + 0xe8, 0xb8, 0x7d, 0x6f, 0x24, 0x04, 0x2f, 0xea, 0x73, 0xf4, 0x7d, 0xed, 0x02, 0xbb, 0x05, 0x25, + 0x94, 0xd7, 0x84, 0x6a, 0x2b, 0x45, 0x9f, 0xa2, 0x00, 0xec, 0x79, 0xfd, 0xe7, 0x42, 0xa7, 0xe6, + 0x97, 0x63, 0xc7, 0x47, 0xad, 0x59, 0x29, 0xc3, 0x79, 0xda, 0xeb, 0xe3, 0x04, 0xd2, 0x88, 0x8d, + 0x7f, 0x5d, 0x84, 0x79, 0xb9, 0xdb, 0xd2, 0xce, 0x1d, 0x3a, 0xe7, 0x3c, 0xde, 0xb9, 0xc5, 0x97, + 0x90, 0x07, 0x7c, 0x3e, 0xf2, 0xc2, 0x48, 0x60, 0xa3, 0x39, 0xa8, 0x10, 0x50, 0x8a, 0x6c, 0x9a, + 0xd0, 0x40, 0x26, 0x86, 0x1c, 0x21, 0xf5, 0xf5, 0xad, 0xfc, 0x16, 0xcc, 0xab, 0xbd, 0x3f, 0x1f, + 0xe9, 0x34, 0x73, 0x7d, 0x92, 0xd6, 0xd6, 0xa1, 0xd8, 0xb7, 0xc7, 0x76, 0xdf, 0x09, 0xaf, 0x24, + 0x43, 0x88, 0xbe, 0x45, 0xe9, 0x43, 0xaf, 0x6f, 0x0f, 0xad, 0x63, 0x7b, 0x68, 0xbb, 0x7d, 0x2e, + 0x75, 0xf7, 0x0a, 0x02, 0x37, 0x09, 0x26, 0xf4, 0x73, 0xd9, 0x4e, 0x85, 0x45, 0x2a, 0xbc, 0x6c, + 0xbd, 0x42, 0x13, 0xc2, 0xa5, 0x37, 0x1a, 0x39, 0x42, 0xcb, 0x20, 0x31, 0x2c, 0x67, 0x96, 0x08, + 0xb2, 0xc3, 0xb1, 0xb7, 0x32, 0xf9, 0x82, 0x86, 0xae, 0x44, 0x55, 0x11, 0xf0, 0x33, 0x32, 0x24, + 0xcc, 0xca, 0x62, 0x39, 0x4d, 0x16, 0x7b, 0x17, 0x16, 0x27, 0x6e, 0xc0, 0xc3, 0x70, 0xc8, 0x07, + 0x51, 0x5b, 0xca, 0x88, 0x54, 0x8f, 0x12, 0x54, 0x73, 0x36, 0x60, 0x89, 0x8c, 0x0e, 0x81, 0x1d, + 0x7a, 0xc1, 0x99, 0x13, 0x58, 0x81, 0xd0, 0x90, 0x48, 0xdd, 0x5d, 0xc4, 0xa4, 0xae, 0x4c, 0xe9, + 0x92, 0x8a, 0xb4, 0x36, 0x85, 0xef, 0xf3, 0x3e, 0x77, 0xce, 0xf9, 0x00, 0xe5, 0xb4, 0x9c, 0xb9, + 0x92, 0xc8, 0x63, 0xca, 0x44, 0x14, 0xba, 0x27, 0x23, 0x6b, 0x32, 0x1e, 0xd8, 0x42, 0x58, 0xa9, + 0x91, 0x30, 0xec, 0x4e, 0x46, 0x47, 0x04, 0x61, 0x8f, 0x40, 0x49, 0x62, 0x52, 0x3e, 0x5c, 0x48, + 0xf0, 0x33, 0x41, 0xac, 0x66, 0x45, 0x62, 0x90, 0xa0, 0x98, 0x90, 0x39, 0xeb, 0x53, 0x32, 0x67, + 0x03, 0xe6, 0xc7, 0xbe, 0x73, 0x6e, 0x87, 0xbc, 0xb1, 0x48, 0x0c, 0x5c, 0x7e, 0x0a, 0xce, 0xe0, + 0xb8, 0x4e, 0xe8, 0xd8, 0xa1, 0xe7, 0x37, 0x18, 0xa6, 0xc5, 0x00, 0xf6, 0x00, 0x16, 0x91, 0x46, + 0x82, 0xd0, 0x0e, 0x27, 0x81, 0x94, 0x40, 0x97, 0x90, 0x98, 0x50, 0x86, 0xee, 0x22, 0x1c, 0x85, + 0x50, 0xf6, 0x0d, 0x58, 0x25, 0xb2, 0xc0, 0x1c, 0x52, 0xb2, 0x46, 0x81, 0x60, 0x19, 0x87, 0x62, + 0x09, 0x53, 0x05, 0x7d, 0x4b, 0xf9, 0x5a, 0x48, 0x07, 0x4f, 0x60, 0x4d, 0x92, 0xc9, 0x4c, 0xae, + 0x15, 0xcc, 0xb5, 0x4c, 0xc9, 0x53, 0xd9, 0x36, 0x60, 0x51, 0x34, 0xc9, 0xe9, 0x5b, 0x32, 0xb7, + 0x58, 0x09, 0xab, 0xa2, 0xf5, 0xa8, 0x29, 0x2d, 0x50, 0xa2, 0x89, 0x69, 0xcf, 0xf8, 0x15, 0xfb, + 0x0e, 0x2c, 0x10, 0xc9, 0xa0, 0x7a, 0x85, 0x9c, 0x7e, 0x1d, 0x39, 0xfd, 0x8a, 0xb2, 0x70, 0x46, + 0xa9, 0xc8, 0xec, 0x6b, 0xfd, 0xc4, 0xb7, 0x58, 0x0e, 0x43, 0xe7, 0x84, 0x87, 0xce, 0x88, 0x37, + 0xd6, 0x88, 0xc0, 0xd4, 0xb7, 0x58, 0xa9, 0x93, 0x31, 0xa6, 0x34, 0x88, 0x2f, 0xd0, 0x17, 0xd2, + 0xee, 0xd0, 0x0b, 0xb8, 0x32, 0x51, 0x35, 0x6e, 0xca, 0x45, 0x28, 0x80, 0x4a, 0x86, 0x14, 0x82, + 0x38, 0x29, 0x3d, 0x91, 0x21, 0xf1, 0x16, 0x12, 0x43, 0x95, 0x74, 0x1f, 0x65, 0x4c, 0x14, 0xbb, + 0xf8, 0x99, 0x7d, 0xa1, 0x38, 0xc8, 0x6b, 0x38, 0xbf, 0x20, 0x40, 0x92, 0x77, 0xfc, 0x38, 0x43, + 0x1b, 0xa2, 0xe4, 0x1f, 0x81, 0xa6, 0xde, 0x11, 0xe7, 0xb0, 0x3c, 0x77, 0x78, 0x25, 0x99, 0x09, + 0x10, 0xe8, 0xc0, 0x1d, 0xe2, 0x6a, 0x76, 0x5c, 0x1d, 0x85, 0x78, 0x6f, 0x45, 0x01, 0x11, 0xe9, + 0x0e, 0x94, 0xc7, 0x93, 0xe3, 0xa1, 0xd3, 0x27, 0x94, 0x1c, 0x95, 0x42, 0x20, 0x44, 0x10, 0xfa, + 0x2d, 0x51, 0x14, 0x61, 0xe4, 0x11, 0xa3, 0x2c, 0x61, 0x88, 0x82, 0xbc, 0x9d, 0xfb, 0xc8, 0x4e, + 0x2a, 0x26, 0xfe, 0x36, 0x36, 0x61, 0x39, 0xd9, 0x68, 0xb9, 0xf1, 0x3c, 0x80, 0xa2, 0xe4, 0x55, + 0xca, 0xf0, 0x51, 0xd3, 0x4c, 0xd1, 0x42, 0x45, 0x8b, 0xd2, 0x8d, 0xdf, 0x2f, 0xc0, 0x92, 0x84, + 0x6e, 0x89, 0xa1, 0xed, 0x4e, 0x46, 0x23, 0xdb, 0x4f, 0x61, 0x82, 0x99, 0x17, 0x33, 0xc1, 0xec, + 0x0c, 0x13, 0x4c, 0x6a, 0xbe, 0xc4, 0x43, 0x93, 0x9a, 0xaf, 0x98, 0x4b, 0x52, 0x46, 0x74, 0x3b, + 0x68, 0x55, 0x82, 0x7b, 0x64, 0x6f, 0x9d, 0x61, 0xd9, 0x85, 0x14, 0x96, 0xad, 0x33, 0xdc, 0xb9, + 0x29, 0x86, 0xfb, 0x06, 0x10, 0xd1, 0xa8, 0xd9, 0x9f, 0x27, 0xfd, 0x04, 0x61, 0xd2, 0x98, 0x7a, + 0x0f, 0x16, 0xa6, 0x79, 0x1c, 0x31, 0xd3, 0x5a, 0x0a, 0x87, 0x73, 0x46, 0x1c, 0x77, 0x2b, 0x0d, + 0xb9, 0x24, 0x39, 0x9c, 0x33, 0xe2, 0x7b, 0x98, 0xa2, 0xf0, 0x5b, 0x00, 0x54, 0x37, 0x2e, 0x1a, + 0xc0, 0x45, 0xf3, 0x76, 0x72, 0x2e, 0xf4, 0x51, 0xdf, 0x10, 0x1f, 0x13, 0x9f, 0xe3, 0x2a, 0x2a, + 0x61, 0x4e, 0x5c, 0x40, 0x1f, 0x42, 0xcd, 0x1b, 0x73, 0xd7, 0x8a, 0x79, 0x4d, 0x19, 0x8b, 0xaa, + 0xcb, 0xa2, 0xda, 0x0a, 0x6e, 0x56, 0x05, 0x5e, 0xf4, 0xc9, 0xbe, 0x4d, 0x83, 0xcc, 0xb5, 0x9c, + 0x95, 0x6b, 0x72, 0xd6, 0x10, 0x31, 0xfa, 0x36, 0x7e, 0x33, 0x03, 0x65, 0xad, 0x39, 0x6c, 0x05, + 0x16, 0xb7, 0x0e, 0x0e, 0x0e, 0x5b, 0x66, 0xb3, 0xd7, 0xfe, 0xb4, 0x65, 0x6d, 0xed, 0x1d, 0x74, + 0x5b, 0xf5, 0x1b, 0x02, 0xbc, 0x77, 0xb0, 0xd5, 0xdc, 0xb3, 0x76, 0x0e, 0xcc, 0x2d, 0x05, 0xce, + 0xb0, 0x55, 0x60, 0x66, 0x6b, 0xff, 0xa0, 0xd7, 0x4a, 0xc0, 0xb3, 0xac, 0x0e, 0x95, 0x4d, 0xb3, + 0xd5, 0xdc, 0xda, 0x95, 0x90, 0x1c, 0x5b, 0x86, 0xfa, 0xce, 0x51, 0x67, 0xbb, 0xdd, 0x79, 0x6a, + 0x6d, 0x35, 0x3b, 0x5b, 0xad, 0xbd, 0xd6, 0x76, 0x3d, 0xcf, 0xaa, 0x50, 0x6a, 0x6e, 0x36, 0x3b, + 0xdb, 0x07, 0x9d, 0xd6, 0x76, 0xbd, 0x60, 0xfc, 0x79, 0x06, 0x56, 0x70, 0xa0, 0x06, 0xd3, 0x2b, + 0xf4, 0x2e, 0x94, 0xfb, 0x9e, 0x37, 0x16, 0x6a, 0x50, 0xbc, 0xdd, 0xeb, 0x20, 0xb1, 0xfa, 0x88, + 0xb3, 0x9e, 0x78, 0x7e, 0x9f, 0xcb, 0x05, 0x0a, 0x08, 0xda, 0x11, 0x10, 0x41, 0x20, 0x92, 0xc2, + 0x08, 0x83, 0xd6, 0x67, 0x99, 0x60, 0x84, 0xb2, 0x0a, 0x73, 0xc7, 0x3e, 0xb7, 0xfb, 0x67, 0x72, + 0x69, 0xca, 0x2f, 0xf6, 0x4e, 0xac, 0xa0, 0xf7, 0xc5, 0x84, 0x0f, 0xf9, 0x00, 0xe9, 0xb3, 0x68, + 0x2e, 0x48, 0xf8, 0x96, 0x04, 0x8b, 0xad, 0xc2, 0x3e, 0xb6, 0xdd, 0x81, 0xe7, 0xf2, 0x81, 0xd4, + 0x03, 0x62, 0x80, 0x71, 0x08, 0xab, 0xd3, 0xfd, 0x93, 0x8b, 0xf9, 0x03, 0x6d, 0x31, 0x93, 0x58, + 0xbe, 0x7e, 0x3d, 0x01, 0x69, 0x0b, 0xfb, 0x6f, 0xe5, 0x21, 0x2f, 0xc4, 0xb4, 0x6b, 0x25, 0x3a, + 0x5d, 0xee, 0xce, 0xcd, 0xf8, 0x66, 0xd0, 0x0e, 0x40, 0xfb, 0x37, 0x19, 0x9b, 0x4a, 0x08, 0xc1, + 0x7d, 0x3b, 0x4a, 0xf6, 0x79, 0xff, 0x5c, 0x5a, 0x9b, 0x28, 0xd9, 0xe4, 0xfd, 0x73, 0x54, 0x78, + 0xec, 0x90, 0xf2, 0xd2, 0x62, 0x9c, 0x0f, 0xec, 0x10, 0x73, 0xca, 0x24, 0xcc, 0x37, 0x1f, 0x25, + 0x61, 0xae, 0x06, 0xcc, 0x3b, 0xee, 0xb1, 0x37, 0x71, 0x07, 0xb8, 0xf6, 0x8a, 0xa6, 0xfa, 0x44, + 0x57, 0x10, 0xb2, 0x09, 0xb1, 0x4b, 0xd0, 0x52, 0x2b, 0x0a, 0x40, 0x4f, 0xec, 0x13, 0xef, 0x43, + 0x29, 0xb8, 0x72, 0xfb, 0xfa, 0x02, 0x5b, 0x96, 0xe3, 0x23, 0x7a, 0xbf, 0xd1, 0xbd, 0x72, 0xfb, + 0xb8, 0x9c, 0x8a, 0x81, 0xfc, 0xc5, 0x9e, 0x40, 0x31, 0x32, 0xca, 0x12, 0x7b, 0xbc, 0xa9, 0xe7, + 0x50, 0x96, 0x58, 0xd2, 0x7d, 0x23, 0x54, 0xf6, 0x10, 0xe6, 0xd0, 0x72, 0x1a, 0x34, 0x2a, 0x98, + 0x49, 0x09, 0xe3, 0xa2, 0x19, 0xe8, 0x85, 0xe1, 0x03, 0xb4, 0xa2, 0x9a, 0x12, 0x6d, 0xfd, 0x19, + 0x54, 0x13, 0x65, 0xe9, 0x1a, 0x6e, 0x95, 0x34, 0xdc, 0xb7, 0x74, 0x0d, 0x37, 0x66, 0xd3, 0x32, + 0x9b, 0xae, 0xf1, 0xfe, 0x22, 0x14, 0x55, 0x57, 0xc4, 0x22, 0x3a, 0xea, 0x3c, 0xeb, 0x1c, 0x7c, + 0xd6, 0xb1, 0xba, 0x9f, 0x77, 0xb6, 0xea, 0x37, 0xd8, 0x02, 0x94, 0x9b, 0x5b, 0xb8, 0x2e, 0x11, + 0x90, 0x11, 0x28, 0x87, 0xcd, 0x6e, 0x37, 0x82, 0x64, 0x8d, 0x1d, 0xa8, 0x4f, 0xb7, 0x54, 0xd0, + 0x64, 0xa8, 0x60, 0xd2, 0xae, 0x1c, 0x03, 0x84, 0xfe, 0x42, 0xa6, 0x62, 0x12, 0x92, 0xe9, 0xc3, + 0x78, 0x02, 0x75, 0xb1, 0xe9, 0x88, 0xa1, 0x0a, 0x34, 0xfb, 0xec, 0x50, 0x08, 0x5e, 0xba, 0x6d, + 0xb9, 0x68, 0x96, 0x09, 0x86, 0x55, 0x19, 0x1f, 0xc0, 0xa2, 0x96, 0x2d, 0xd6, 0x37, 0xc5, 0x46, + 0x36, 0xad, 0x6f, 0xa2, 0x76, 0x41, 0x29, 0xc6, 0x1a, 0xac, 0x88, 0xcf, 0xd6, 0x39, 0x77, 0xc3, + 0xee, 0xe4, 0x98, 0x1c, 0x82, 0x8e, 0xe7, 0x0a, 0xad, 0xa3, 0x14, 0xa5, 0x5c, 0x4f, 0xe4, 0x1b, + 0x52, 0x35, 0xcd, 0x22, 0x69, 0xac, 0x6b, 0x35, 0x60, 0xc6, 0x0d, 0xfc, 0x9b, 0x50, 0x51, 0x4b, + 0x11, 0x48, 0x0c, 0xeb, 0x61, 0xab, 0x65, 0x5a, 0x07, 0x9d, 0xbd, 0x76, 0x47, 0x70, 0x3b, 0x31, + 0xac, 0x08, 0xd8, 0xd9, 0x41, 0x48, 0xc6, 0xa8, 0x43, 0xed, 0x29, 0x0f, 0xdb, 0xee, 0x89, 0xa7, + 0x9c, 0x5f, 0x7f, 0x63, 0x0e, 0x16, 0x22, 0x50, 0xac, 0xe2, 0x9e, 0x73, 0x3f, 0x70, 0x3c, 0x17, + 0xa5, 0xd5, 0x92, 0xa9, 0x3e, 0x05, 0x77, 0x92, 0x32, 0x3a, 0x6e, 0x81, 0xcb, 0x98, 0x2a, 0xa5, + 0x7a, 0xdc, 0xff, 0xee, 0xc1, 0x82, 0x33, 0xe0, 0x6e, 0xe8, 0x84, 0x57, 0x56, 0xc2, 0x60, 0x56, + 0x53, 0x60, 0xb9, 0x07, 0x2e, 0x43, 0xc1, 0x1e, 0x3a, 0xb6, 0x72, 0xb4, 0xd2, 0x87, 0x80, 0xf6, + 0xbd, 0xa1, 0xe7, 0xa3, 0xe4, 0x5a, 0x32, 0xe9, 0x83, 0x3d, 0x82, 0x65, 0x21, 0x41, 0xeb, 0x56, + 0x4c, 0x64, 0x30, 0x64, 0xbb, 0x63, 0xee, 0x64, 0x74, 0x18, 0x5b, 0x32, 0x45, 0x8a, 0xd8, 0xf9, + 0x44, 0x0e, 0x29, 0xea, 0x44, 0x19, 0x48, 0x19, 0x5b, 0x74, 0x27, 0xa3, 0x26, 0xa6, 0x44, 0xf8, + 0x8f, 0x61, 0x45, 0xe0, 0x47, 0xc2, 0x51, 0x94, 0x63, 0x01, 0x73, 0x88, 0xc2, 0xda, 0x32, 0x2d, + 0xca, 0x73, 0x0b, 0x4a, 0xd4, 0x2a, 0x41, 0x12, 0x05, 0x12, 0xc2, 0xb1, 0x29, 0xdc, 0x0f, 0x66, + 0x7c, 0xa2, 0x73, 0xb4, 0x8d, 0x4f, 0xf9, 0x44, 0x35, 0xaf, 0x6a, 0x71, 0xda, 0xab, 0xfa, 0x18, + 0x56, 0x8e, 0x05, 0x8d, 0x9e, 0x71, 0x7b, 0xc0, 0x7d, 0x2b, 0xa6, 0x7c, 0x52, 0x36, 0x96, 0x44, + 0xe2, 0x2e, 0xa6, 0x45, 0x0b, 0x45, 0x48, 0x29, 0x82, 0x6f, 0xf0, 0x81, 0x15, 0x7a, 0x16, 0x0a, + 0x2f, 0xc8, 0x81, 0x8a, 0x66, 0x95, 0xc0, 0x3d, 0x6f, 0x4b, 0x00, 0x93, 0x78, 0xa7, 0xbe, 0x3d, + 0x3e, 0x93, 0xea, 0x40, 0x84, 0xf7, 0x54, 0x00, 0xd9, 0x6b, 0x30, 0x2f, 0xd6, 0x84, 0xcb, 0xc9, + 0x75, 0x45, 0x02, 0xb7, 0x02, 0xb1, 0xb7, 0x60, 0x0e, 0xeb, 0x08, 0x1a, 0x75, 0x5c, 0x10, 0x95, + 0x98, 0xd3, 0x3b, 0xae, 0x29, 0xd3, 0x84, 0x28, 0x38, 0xf1, 0x1d, 0x62, 0x43, 0x25, 0x13, 0x7f, + 0xb3, 0xef, 0x6a, 0x3c, 0x6d, 0x09, 0xf3, 0xbe, 0x25, 0xf3, 0x4e, 0x91, 0xe2, 0x75, 0xec, 0xed, + 0x67, 0xca, 0xad, 0xbe, 0x97, 0x2f, 0x96, 0xeb, 0x15, 0xe3, 0x43, 0x28, 0xd0, 0xe8, 0x08, 0x22, + 0xc4, 0xb1, 0xcb, 0x48, 0x22, 0x44, 0x68, 0x03, 0xe6, 0x5d, 0x1e, 0x5e, 0x78, 0xfe, 0x73, 0x65, + 0x52, 0x96, 0x9f, 0xc6, 0x0f, 0xd1, 0x16, 0x12, 0xf9, 0xcb, 0x49, 0xad, 0x13, 0xe4, 0x41, 0xd3, + 0x1b, 0x9c, 0xd9, 0xd2, 0x3c, 0x53, 0x44, 0x40, 0xf7, 0xcc, 0x9e, 0x21, 0x8f, 0xec, 0xac, 0xcb, + 0xfc, 0x2d, 0xa8, 0x29, 0x0f, 0x7d, 0x60, 0x0d, 0xf9, 0x49, 0x28, 0xc9, 0xbd, 0x22, 0xdd, 0xf3, + 0xc1, 0x1e, 0x3f, 0x09, 0x8d, 0x7d, 0x58, 0x94, 0x04, 0x79, 0x30, 0xe6, 0xaa, 0xea, 0x6f, 0xa5, + 0x49, 0xc3, 0xe5, 0xc7, 0x4b, 0xc9, 0x9d, 0x98, 0x22, 0x0f, 0x12, 0x22, 0xb2, 0xf1, 0x09, 0x30, + 0x7d, 0x9f, 0x96, 0xe5, 0x49, 0x99, 0x54, 0x59, 0xe2, 0x95, 0x43, 0x2b, 0x92, 0x7c, 0x9d, 0x81, + 0x18, 0x9d, 0x60, 0xd2, 0xef, 0xab, 0xc8, 0x89, 0xa2, 0xa9, 0x3e, 0x8d, 0x3f, 0xc9, 0xc0, 0x12, + 0x16, 0xa6, 0xa4, 0x79, 0xc9, 0x85, 0x7f, 0xe2, 0x46, 0x8a, 0xf9, 0xd1, 0x85, 0x23, 0xfa, 0xf8, + 0xea, 0xb6, 0xcf, 0xfc, 0x8c, 0xed, 0xf3, 0x1d, 0xa8, 0x0f, 0xf8, 0xd0, 0xc1, 0x20, 0x1a, 0x25, + 0x6b, 0x90, 0xfc, 0xbe, 0xa0, 0xe0, 0x52, 0x97, 0x33, 0xfe, 0x5e, 0x06, 0x16, 0x49, 0x94, 0x41, + 0xad, 0x58, 0x0e, 0xd4, 0xc7, 0x4a, 0x0d, 0x94, 0xac, 0x4a, 0xf6, 0x29, 0xde, 0xe2, 0x11, 0x4a, + 0xc8, 0xbb, 0x37, 0xa4, 0x7a, 0x28, 0xa1, 0xec, 0x23, 0xd4, 0x40, 0x5c, 0x0b, 0x81, 0x29, 0x41, + 0x39, 0xc9, 0x49, 0xd9, 0xbd, 0x81, 0xea, 0x89, 0x8b, 0xa0, 0xcd, 0xa2, 0xd0, 0x4b, 0x05, 0xd8, + 0xd8, 0x81, 0x6a, 0xa2, 0x9a, 0x84, 0x81, 0xb6, 0x42, 0x06, 0xda, 0x19, 0x27, 0x48, 0x76, 0xd6, + 0x09, 0x72, 0x05, 0x4b, 0x26, 0xb7, 0x07, 0x57, 0x3b, 0x9e, 0x7f, 0x18, 0x1c, 0x87, 0x3b, 0x24, + 0x1f, 0x0a, 0xfe, 0x1e, 0x79, 0xf6, 0x12, 0x56, 0x50, 0xe5, 0xe0, 0x51, 0xca, 0xee, 0xd7, 0xa0, + 0x16, 0xbb, 0x00, 0x35, 0x4b, 0x5a, 0x35, 0xf2, 0x02, 0xa2, 0x41, 0x4d, 0x28, 0x8a, 0xc1, 0x71, + 0x28, 0x6d, 0x69, 0xf8, 0xdb, 0xf8, 0x6b, 0x79, 0x60, 0x82, 0x9a, 0xa7, 0x08, 0x66, 0xca, 0x79, + 0x99, 0x9d, 0x71, 0x5e, 0x3e, 0x02, 0xa6, 0x21, 0x28, 0x9f, 0x6a, 0x2e, 0xf2, 0xa9, 0xd6, 0x63, + 0x5c, 0xe9, 0x52, 0x7d, 0x04, 0xcb, 0x52, 0xd8, 0x4e, 0x36, 0x95, 0x48, 0x83, 0x91, 0xd4, 0x9d, + 0x68, 0xaf, 0x72, 0x5c, 0x0a, 0xe5, 0x9d, 0x6c, 0x65, 0xe8, 0xb8, 0x54, 0x6a, 0xbb, 0x46, 0x80, + 0x73, 0x2f, 0x25, 0xc0, 0xf9, 0x19, 0x02, 0xd4, 0x4c, 0x37, 0xc5, 0xa4, 0xe9, 0xc6, 0x80, 0xaa, + 0x72, 0x4f, 0x52, 0x54, 0x06, 0x49, 0x96, 0x65, 0xe9, 0xa3, 0xc4, 0xc8, 0x8c, 0xfb, 0x50, 0x57, + 0xf6, 0x95, 0xc8, 0x38, 0x44, 0x11, 0x07, 0xd2, 0x3c, 0xb7, 0xa5, 0x4c, 0x44, 0x09, 0x53, 0x7c, + 0x79, 0xca, 0x14, 0xff, 0x2e, 0x2c, 0x06, 0x82, 0x7e, 0xad, 0x89, 0x2b, 0xc3, 0x83, 0xf8, 0x00, + 0xf5, 0xb0, 0xa2, 0x59, 0xc7, 0x84, 0xa3, 0x18, 0x3e, 0x6b, 0xf8, 0xa8, 0xa6, 0x18, 0x3e, 0x9e, + 0xc4, 0x9e, 0xbc, 0xe0, 0xcc, 0x19, 0xa1, 0x50, 0x11, 0x87, 0xd2, 0xc8, 0x01, 0xee, 0x9e, 0x39, + 0x23, 0x53, 0xb9, 0x8d, 0xc5, 0x87, 0xf1, 0xbf, 0x33, 0x50, 0x17, 0x74, 0x90, 0x58, 0x62, 0xdf, + 0x06, 0x64, 0x06, 0xaf, 0xb8, 0xc2, 0xca, 0x02, 0x57, 0x2d, 0xb0, 0x0f, 0x01, 0x57, 0x8c, 0x25, + 0x94, 0x4e, 0xb9, 0xbe, 0x1a, 0xc9, 0xf5, 0x15, 0xf3, 0xd0, 0xdd, 0x1b, 0xa4, 0x9c, 0x08, 0x08, + 0xfb, 0x36, 0x94, 0x04, 0x61, 0x22, 0x95, 0xc8, 0x08, 0x2e, 0x25, 0x9a, 0xa5, 0xac, 0x11, 0x91, + 0x75, 0x2c, 0x3f, 0xd3, 0x9c, 0xaf, 0xf9, 0x14, 0xe7, 0xab, 0xb6, 0x80, 0x77, 0x01, 0x9e, 0xf1, + 0xab, 0x3d, 0xaf, 0x8f, 0x2a, 0xf1, 0x6d, 0x00, 0x41, 0xcb, 0x27, 0xf6, 0xc8, 0x91, 0x16, 0x9d, + 0x82, 0x59, 0x7a, 0xce, 0xaf, 0x76, 0x10, 0x20, 0x26, 0x52, 0x24, 0xc7, 0xab, 0xb8, 0x60, 0x16, + 0x9f, 0xf3, 0x2b, 0x5a, 0xc2, 0x16, 0x54, 0x9f, 0xf1, 0xab, 0x6d, 0x4e, 0x52, 0xa8, 0xe7, 0x0b, + 0x22, 0xf2, 0xed, 0x0b, 0x21, 0x76, 0x26, 0x1c, 0xa7, 0x65, 0xdf, 0xbe, 0x78, 0xc6, 0xaf, 0x94, + 0x13, 0x77, 0x5e, 0xa4, 0x0f, 0xbd, 0xbe, 0xdc, 0x37, 0x55, 0x08, 0x48, 0xdc, 0x28, 0x73, 0xee, + 0x39, 0xfe, 0x36, 0xfe, 0x32, 0x03, 0x55, 0xd1, 0x7e, 0x64, 0xcb, 0x62, 0xca, 0x54, 0x24, 0x51, + 0x26, 0x8e, 0x24, 0x7a, 0x2c, 0xb9, 0x1a, 0xf1, 0xf8, 0xec, 0xf5, 0x3c, 0x1e, 0xe7, 0x86, 0x18, + 0xfc, 0xfb, 0x50, 0xa2, 0x65, 0x29, 0xd6, 0x79, 0x2e, 0x31, 0xc1, 0x89, 0x0e, 0x99, 0x45, 0x44, + 0x7b, 0x46, 0x81, 0x0b, 0x9a, 0x75, 0x90, 0x86, 0xb8, 0xe4, 0x47, 0x36, 0xc1, 0x94, 0x69, 0x28, + 0x5c, 0x13, 0xb8, 0xa0, 0x9b, 0xde, 0xe6, 0x66, 0x4c, 0x6f, 0x07, 0x50, 0x14, 0x53, 0x8d, 0x9d, + 0x4d, 0x29, 0x34, 0x93, 0x56, 0xa8, 0x90, 0x04, 0x6c, 0xb1, 0x29, 0x08, 0x46, 0x97, 0x95, 0x92, + 0x80, 0x1d, 0xf0, 0x43, 0x64, 0x76, 0x19, 0x28, 0x6b, 0x2b, 0x00, 0xad, 0x97, 0xd1, 0x78, 0xd1, + 0x72, 0x49, 0x92, 0x78, 0x62, 0xc0, 0x77, 0x6f, 0x98, 0xd5, 0x7e, 0x62, 0x06, 0x36, 0x24, 0xad, + 0x62, 0xce, 0x6c, 0x22, 0xe8, 0x49, 0x35, 0x5c, 0x11, 0xa8, 0xf8, 0xbd, 0x39, 0x07, 0x79, 0x81, + 0x6a, 0x7c, 0x0c, 0x8b, 0x5a, 0x33, 0xc8, 0x0e, 0xf0, 0xaa, 0x3d, 0x34, 0x7e, 0x25, 0xca, 0x2c, + 0xea, 0x20, 0xff, 0x92, 0x0a, 0x02, 0xe1, 0x03, 0xea, 0xb8, 0x0c, 0x36, 0x21, 0x90, 0x40, 0x7b, + 0xe5, 0xc0, 0x84, 0x5f, 0x83, 0x25, 0xad, 0xf4, 0x1d, 0xc7, 0xb5, 0x87, 0xce, 0x0f, 0x71, 0xc3, + 0x0f, 0x9c, 0x53, 0x77, 0xaa, 0x7c, 0x02, 0x7d, 0xa5, 0xf2, 0xff, 0x7e, 0x16, 0x96, 0x65, 0x05, + 0x18, 0xd6, 0xe7, 0x08, 0x29, 0x6e, 0x3f, 0x38, 0x65, 0xdf, 0x86, 0xaa, 0x18, 0x1b, 0xcb, 0xe7, + 0xa7, 0x4e, 0x10, 0x72, 0xe5, 0xd7, 0x4a, 0x61, 0x5c, 0x62, 0x33, 0x17, 0xa8, 0xa6, 0xc4, 0x64, + 0x1f, 0x43, 0x19, 0xb3, 0x92, 0x9d, 0x45, 0x4e, 0x44, 0x63, 0x36, 0x23, 0x0d, 0xf4, 0xee, 0x0d, + 0x13, 0x82, 0x78, 0xd8, 0x3f, 0x86, 0x32, 0xce, 0xe1, 0x39, 0x0e, 0xe4, 0x14, 0xab, 0x9a, 0x19, + 0x68, 0x91, 0x79, 0x1c, 0x0f, 0x7b, 0x13, 0xaa, 0xc4, 0xac, 0xe4, 0x38, 0xc9, 0x70, 0xa1, 0xf5, + 0xd9, 0xec, 0x6a, 0x24, 0x45, 0xe3, 0xc7, 0xda, 0xf7, 0x66, 0x09, 0xe6, 0x43, 0xdf, 0x39, 0x3d, + 0xe5, 0xbe, 0xb1, 0x1a, 0x0d, 0x8d, 0xe0, 0xc2, 0xbc, 0x1b, 0xf2, 0xb1, 0x90, 0xcd, 0x8d, 0x7f, + 0x9b, 0x81, 0xb2, 0xe4, 0xab, 0x3f, 0xb1, 0x33, 0x6d, 0x5d, 0x8b, 0x8b, 0x25, 0x93, 0x4e, 0x1c, + 0x06, 0x7b, 0x0f, 0x16, 0x46, 0x42, 0x4e, 0x17, 0x7a, 0x64, 0xc2, 0x93, 0x56, 0x53, 0x60, 0x29, + 0x26, 0x6f, 0xc0, 0x12, 0x4a, 0xcd, 0x81, 0x15, 0x3a, 0x43, 0x4b, 0x25, 0xca, 0x18, 0xd4, 0x45, + 0x4a, 0xea, 0x39, 0xc3, 0x7d, 0x99, 0x20, 0x84, 0xc7, 0x20, 0xb4, 0x4f, 0xb9, 0x5c, 0xdb, 0xf4, + 0x61, 0x34, 0x60, 0x75, 0x4a, 0x85, 0x54, 0xfa, 0xf1, 0xff, 0x59, 0x84, 0xb5, 0x99, 0x24, 0xa9, + 0x27, 0x47, 0x1e, 0xa4, 0xa1, 0x33, 0x3a, 0xf6, 0x22, 0xfb, 0x6a, 0x46, 0xf3, 0x20, 0xed, 0x89, + 0x14, 0x65, 0x5f, 0xe5, 0xb0, 0xa2, 0x08, 0x12, 0x0d, 0xa4, 0x91, 0x96, 0x99, 0x45, 0x1d, 0xe8, + 0xfd, 0xe4, 0x26, 0x36, 0x5d, 0x9d, 0x82, 0xeb, 0xa2, 0xd1, 0xd2, 0x78, 0x06, 0x16, 0xb0, 0x5f, + 0x87, 0x46, 0x44, 0xf7, 0x52, 0x6c, 0xd7, 0x54, 0x66, 0x51, 0xd3, 0x7b, 0x2f, 0xa9, 0x29, 0x61, + 0xdc, 0x43, 0xd9, 0x69, 0x55, 0x2d, 0x19, 0x2a, 0x30, 0xaa, 0xeb, 0x1c, 0x5e, 0x57, 0x75, 0xa1, + 0x18, 0x3e, 0x5b, 0x63, 0xfe, 0x95, 0xfa, 0x86, 0x86, 0xcb, 0x44, 0xb5, 0xe6, 0x2d, 0x59, 0x70, + 0x94, 0xa4, 0xd7, 0x7b, 0x06, 0xab, 0x17, 0xb6, 0x13, 0xaa, 0x3e, 0x6a, 0x1a, 0x7b, 0x01, 0xeb, + 0x7b, 0xfc, 0x92, 0xfa, 0x3e, 0xa3, 0xcc, 0x09, 0xc5, 0x64, 0xf9, 0x62, 0x16, 0x18, 0xac, 0xff, + 0xe3, 0x1c, 0xd4, 0x92, 0xa5, 0x08, 0xc6, 0x22, 0x37, 0x1b, 0x25, 0x6f, 0x4a, 0x21, 0x58, 0xda, + 0xfe, 0x3b, 0x24, 0x67, 0xce, 0x7a, 0x25, 0xb2, 0x29, 0x5e, 0x09, 0xdd, 0x19, 0x90, 0x7b, 0x99, + 0xf7, 0x35, 0xff, 0x4a, 0xde, 0xd7, 0x42, 0x9a, 0xf7, 0xf5, 0x7a, 0x97, 0xdd, 0xdc, 0x4f, 0xe4, + 0xb2, 0x9b, 0x7f, 0xa1, 0xcb, 0x4e, 0x73, 0x34, 0x16, 0xaf, 0x31, 0xe1, 0x6b, 0xae, 0xc7, 0x14, + 0x97, 0x5d, 0xe9, 0x2b, 0xb8, 0xec, 0xd6, 0xff, 0x32, 0x03, 0x6c, 0x76, 0x75, 0xb0, 0xa7, 0xe4, + 0xf0, 0x71, 0xf9, 0x50, 0x72, 0xee, 0xaf, 0xbf, 0xda, 0x0a, 0x53, 0x04, 0xa1, 0x72, 0xb3, 0x87, + 0xb0, 0xa4, 0x47, 0xca, 0xeb, 0x5a, 0x7b, 0xd5, 0x64, 0x7a, 0x52, 0x6c, 0xdb, 0xd1, 0x5c, 0xdd, + 0xf9, 0x97, 0xba, 0xba, 0x0b, 0x2f, 0x75, 0x75, 0xcf, 0x25, 0x5d, 0xdd, 0xeb, 0xff, 0x31, 0x03, + 0x4b, 0x29, 0x44, 0xfc, 0xb3, 0xeb, 0xb3, 0xa0, 0xbd, 0x04, 0x5b, 0xcb, 0x4a, 0xda, 0xd3, 0x39, + 0xda, 0x9e, 0xb2, 0x07, 0x8a, 0xa9, 0x50, 0x27, 0x49, 0x1e, 0xbc, 0x8c, 0xbb, 0xc4, 0x39, 0x4c, + 0x3d, 0xfb, 0xfa, 0xef, 0x67, 0xa1, 0xac, 0x25, 0x8a, 0x51, 0x24, 0x92, 0xd5, 0x22, 0x8c, 0x48, + 0x32, 0x44, 0x9b, 0xc3, 0x1d, 0x90, 0x5e, 0x0f, 0x4a, 0xa7, 0xc5, 0x25, 0xc5, 0x40, 0x44, 0xd8, + 0x80, 0x25, 0xe5, 0x8c, 0xe3, 0x71, 0x20, 0xa1, 0xdc, 0x6b, 0x16, 0xa5, 0x4b, 0x8e, 0x47, 0x71, + 0x89, 0xec, 0xa1, 0x52, 0x07, 0xe3, 0xb9, 0x43, 0x52, 0x27, 0x97, 0xc2, 0x22, 0x2d, 0x10, 0x35, + 0x89, 0x82, 0xce, 0xdf, 0x87, 0x15, 0xb5, 0x3c, 0x92, 0x39, 0xc8, 0xcb, 0xc0, 0xe4, 0xe2, 0xd0, + 0xb3, 0x7c, 0x17, 0x6e, 0x4f, 0xb5, 0x69, 0x2a, 0x2b, 0x45, 0xbc, 0xde, 0x4c, 0xb4, 0x4e, 0x2f, + 0x61, 0xfd, 0x47, 0x50, 0x4d, 0x30, 0xca, 0x9f, 0xdd, 0x94, 0x4f, 0xdb, 0x79, 0x68, 0x44, 0x75, + 0x3b, 0xcf, 0xfa, 0xff, 0xca, 0x01, 0x9b, 0xe5, 0xd5, 0x3f, 0xcf, 0x26, 0xcc, 0x12, 0x66, 0x2e, + 0x85, 0x30, 0xff, 0x9f, 0xc9, 0x0f, 0xef, 0xc2, 0xa2, 0x3c, 0x51, 0xa5, 0x79, 0x54, 0x69, 0x71, + 0xd6, 0xa3, 0x04, 0xd5, 0x8a, 0x0f, 0xa7, 0x23, 0x3b, 0x8a, 0x89, 0x43, 0x24, 0x9a, 0x00, 0x35, + 0x15, 0xe0, 0x71, 0x04, 0x73, 0xb6, 0xdb, 0x3f, 0xf3, 0x7c, 0xc9, 0x07, 0x7f, 0xe1, 0x2b, 0x6f, + 0x9f, 0x1b, 0x4d, 0xcc, 0x8f, 0x52, 0x9b, 0x29, 0x0b, 0x33, 0xde, 0x87, 0xb2, 0x06, 0x66, 0x25, + 0x28, 0xec, 0xb5, 0xf7, 0x37, 0x0f, 0xea, 0x37, 0x58, 0x15, 0x4a, 0x66, 0x6b, 0xeb, 0xe0, 0xd3, + 0x96, 0xd9, 0xda, 0xae, 0x67, 0x58, 0x11, 0xf2, 0x7b, 0x07, 0xdd, 0x5e, 0x3d, 0x6b, 0xac, 0x43, + 0x43, 0x96, 0x38, 0xeb, 0xd4, 0xf8, 0xed, 0x7c, 0x64, 0x2e, 0xc4, 0x44, 0xa9, 0xa2, 0x7f, 0x03, + 0x2a, 0xba, 0x78, 0x23, 0x29, 0x62, 0xca, 0xa9, 0x2f, 0x94, 0x73, 0x4f, 0xe3, 0xd5, 0x5b, 0x40, + 0x2e, 0xdd, 0x41, 0x94, 0x2d, 0x9b, 0x90, 0x5b, 0x53, 0xdc, 0x87, 0xa8, 0xfc, 0x24, 0xc8, 0xf0, + 0xff, 0x83, 0x5a, 0xd2, 0x80, 0x2f, 0x39, 0x52, 0x9a, 0xc2, 0x29, 0x72, 0x27, 0x2c, 0xfa, 0xec, + 0xbb, 0x50, 0x9f, 0x76, 0x00, 0x48, 0xe1, 0xf9, 0x9a, 0xfc, 0x0b, 0x4e, 0xd2, 0x27, 0xc0, 0x76, + 0x61, 0x39, 0x4d, 0xc0, 0x43, 0xfa, 0xb8, 0xde, 0x48, 0xc1, 0x66, 0x85, 0x38, 0xf6, 0x2d, 0xe9, + 0x08, 0x2a, 0xe0, 0xf4, 0xbf, 0x95, 0xac, 0x5f, 0x1b, 0xec, 0x0d, 0xfa, 0xa7, 0xb9, 0x84, 0xce, + 0x01, 0x62, 0x18, 0xab, 0x43, 0xe5, 0xe0, 0xb0, 0xd5, 0xb1, 0xb6, 0x76, 0x9b, 0x9d, 0x4e, 0x6b, + 0xaf, 0x7e, 0x83, 0x31, 0xa8, 0xa1, 0x33, 0x7b, 0x3b, 0x82, 0x65, 0x04, 0x4c, 0x3a, 0xe4, 0x14, + 0x2c, 0xcb, 0x96, 0xa1, 0xde, 0xee, 0x4c, 0x41, 0x73, 0xac, 0x01, 0xcb, 0x87, 0x2d, 0xf2, 0x7f, + 0x27, 0xca, 0xcd, 0x0b, 0xa5, 0x41, 0x76, 0x57, 0x28, 0x0d, 0x74, 0x32, 0x50, 0xae, 0x03, 0x25, + 0x4b, 0xff, 0x4e, 0x06, 0x56, 0xa6, 0x12, 0xe2, 0xf3, 0x1e, 0x24, 0x49, 0x27, 0x65, 0xe8, 0x0a, + 0x02, 0xd5, 0x6a, 0x7a, 0x17, 0x16, 0x23, 0xc3, 0xd3, 0xd4, 0xae, 0x54, 0x8f, 0x12, 0x14, 0xf2, + 0x43, 0x58, 0xd2, 0xec, 0x57, 0x53, 0xbc, 0x82, 0x69, 0x49, 0x32, 0x83, 0xb1, 0x16, 0xc5, 0xd5, + 0x4f, 0xb5, 0x7a, 0x40, 0xc7, 0x0d, 0xf5, 0x84, 0xd8, 0x4f, 0x96, 0x6c, 0xaf, 0xfa, 0x64, 0x8f, + 0xa6, 0x08, 0x21, 0xd9, 0x5a, 0x7d, 0xc2, 0x55, 0xf5, 0x7f, 0x30, 0x07, 0xec, 0x93, 0x09, 0xf7, + 0xaf, 0xf0, 0x3c, 0x47, 0xf0, 0xb2, 0x00, 0x47, 0x65, 0x69, 0xc9, 0xbe, 0xd2, 0x99, 0xad, 0xb4, + 0x33, 0x53, 0xf9, 0x97, 0x9f, 0x99, 0x2a, 0xbc, 0xec, 0xcc, 0xd4, 0x9b, 0x50, 0x75, 0x4e, 0x5d, + 0x4f, 0xb0, 0x42, 0x21, 0x09, 0x07, 0x8d, 0xb9, 0xbb, 0xb9, 0xfb, 0x15, 0xb3, 0x22, 0x81, 0x42, + 0x0e, 0x0e, 0xd8, 0xc7, 0x31, 0x12, 0x1f, 0x9c, 0xe2, 0xf9, 0x3e, 0x9d, 0x09, 0xb6, 0x06, 0xa7, + 0x5c, 0x1a, 0x96, 0x50, 0xd3, 0x50, 0x99, 0x05, 0x3c, 0x60, 0x6f, 0x41, 0x2d, 0xf0, 0x26, 0x42, + 0xb1, 0x50, 0xc3, 0x40, 0x8e, 0xb2, 0x0a, 0x41, 0x0f, 0x95, 0xdb, 0x74, 0x69, 0x12, 0x70, 0x6b, + 0xe4, 0x04, 0x81, 0x10, 0xcf, 0xfa, 0x9e, 0x1b, 0xfa, 0xde, 0x50, 0xfa, 0xbe, 0x16, 0x27, 0x01, + 0xdf, 0xa7, 0x94, 0x2d, 0x4a, 0x60, 0xdf, 0x8c, 0x9b, 0x34, 0xb6, 0x1d, 0x3f, 0x68, 0x00, 0x36, + 0x49, 0xf5, 0x14, 0xe5, 0x77, 0xdb, 0xf1, 0xa3, 0xb6, 0x88, 0x8f, 0x60, 0xea, 0x2c, 0x57, 0x79, + 0xfa, 0x2c, 0xd7, 0x0f, 0xd2, 0xcf, 0x72, 0x55, 0xb1, 0xe8, 0x47, 0xb2, 0xe8, 0xd9, 0x29, 0xfe, + 0x4a, 0x47, 0xba, 0x66, 0x8f, 0xa8, 0xd5, 0xbe, 0xca, 0x11, 0xb5, 0x85, 0xb4, 0x23, 0x6a, 0xef, + 0x43, 0x19, 0x0f, 0x0f, 0x59, 0x67, 0x8e, 0x90, 0xe1, 0xc8, 0x97, 0x57, 0xd7, 0x4f, 0x17, 0xed, + 0x3a, 0x6e, 0x68, 0x82, 0xaf, 0x7e, 0x06, 0xb3, 0xa7, 0xc5, 0x16, 0x7f, 0x8e, 0xa7, 0xc5, 0xe4, + 0x21, 0xa7, 0x0d, 0x28, 0xaa, 0x79, 0x62, 0x0c, 0xf2, 0x27, 0xbe, 0x37, 0x52, 0x3e, 0x0e, 0xf1, + 0x9b, 0xd5, 0x20, 0x1b, 0x7a, 0x32, 0x73, 0x36, 0xf4, 0x8c, 0x5f, 0x85, 0xb2, 0x46, 0x6a, 0xec, + 0x0d, 0xb2, 0x4b, 0x0a, 0xdd, 0x4c, 0xca, 0x96, 0x34, 0x8a, 0x25, 0x09, 0x6d, 0x0f, 0x04, 0xbf, + 0x19, 0x38, 0x3e, 0xc7, 0x73, 0x9d, 0x96, 0xcf, 0xcf, 0xb9, 0x1f, 0x28, 0x9f, 0x53, 0x3d, 0x4a, + 0x30, 0x09, 0x6e, 0xfc, 0x1a, 0x2c, 0x25, 0xe6, 0x56, 0xb2, 0x88, 0xb7, 0x60, 0x0e, 0xc7, 0x4d, + 0x05, 0x0d, 0x24, 0x4f, 0x6d, 0xc9, 0x34, 0x3c, 0xb0, 0x4f, 0xee, 0x32, 0x6b, 0xec, 0x7b, 0xc7, + 0x58, 0x49, 0xc6, 0x2c, 0x4b, 0xd8, 0xa1, 0xef, 0x1d, 0x1b, 0x7f, 0x96, 0x83, 0xdc, 0xae, 0x37, + 0xd6, 0x83, 0xd8, 0x32, 0x33, 0x41, 0x6c, 0x52, 0xe1, 0xb4, 0x22, 0x85, 0x52, 0xca, 0xec, 0xe8, + 0x28, 0x52, 0x4a, 0xe5, 0x7d, 0xa8, 0x09, 0x3e, 0x11, 0x7a, 0x42, 0x63, 0xbf, 0xb0, 0x7d, 0x12, + 0x88, 0x73, 0xb4, 0xf8, 0xec, 0x51, 0xd8, 0xf3, 0x76, 0x08, 0xce, 0x96, 0x21, 0x17, 0xa9, 0x2f, + 0x98, 0x2c, 0x3e, 0xd9, 0x2a, 0xcc, 0x61, 0x34, 0xf3, 0x95, 0x74, 0x7a, 0xcb, 0x2f, 0xf6, 0x75, + 0x58, 0x4a, 0x96, 0x4b, 0xac, 0x48, 0xca, 0x46, 0x7a, 0xc1, 0xc8, 0x93, 0x6e, 0x82, 0xe0, 0x23, + 0x84, 0x23, 0x83, 0x6b, 0x4e, 0x38, 0xc7, 0x24, 0x8d, 0xe9, 0x15, 0x13, 0x4c, 0xef, 0x0e, 0x94, + 0xc3, 0xe1, 0xb9, 0x35, 0xb6, 0xaf, 0x86, 0x9e, 0x3d, 0x90, 0xeb, 0x1b, 0xc2, 0xe1, 0xf9, 0x21, + 0x41, 0xd8, 0x43, 0x80, 0xd1, 0x78, 0x2c, 0xd7, 0x1e, 0x3a, 0x3f, 0x62, 0x52, 0xde, 0x3f, 0x3c, + 0x24, 0x92, 0x33, 0x4b, 0xa3, 0xf1, 0x98, 0x7e, 0xb2, 0x6d, 0xa8, 0xa5, 0x9e, 0xbd, 0xbc, 0xad, + 0x82, 0x6f, 0xbd, 0xf1, 0x46, 0xca, 0xe2, 0xac, 0xf6, 0x75, 0xd8, 0xfa, 0x77, 0x81, 0xfd, 0x94, + 0x27, 0x20, 0x7b, 0x50, 0x8a, 0xda, 0xa7, 0x1f, 0x20, 0xc4, 0x70, 0xfa, 0x72, 0xe2, 0x00, 0x61, + 0x73, 0x30, 0xf0, 0x05, 0x5f, 0xa4, 0x0d, 0x33, 0x62, 0xf9, 0xa0, 0xed, 0x98, 0x4d, 0xe2, 0xfb, + 0xc6, 0x7f, 0xcd, 0x40, 0x81, 0x4e, 0x33, 0xbe, 0x0d, 0x0b, 0x84, 0x1f, 0x05, 0x04, 0x4a, 0x57, + 0x39, 0xed, 0xbb, 0x3d, 0x19, 0x0b, 0x28, 0x96, 0x85, 0x76, 0x12, 0x3b, 0x1b, 0xcd, 0xbc, 0x76, + 0x1a, 0xfb, 0x0e, 0x94, 0xa2, 0xaa, 0x35, 0xd2, 0x29, 0xaa, 0x9a, 0xd9, 0xeb, 0x90, 0x3f, 0xf3, + 0xc6, 0xca, 0xf2, 0x03, 0xf1, 0x48, 0x9a, 0x08, 0x8f, 0xdb, 0x22, 0xea, 0xa0, 0xc6, 0x4b, 0x8b, + 0x45, 0x54, 0x09, 0x92, 0xc1, 0x6c, 0x1f, 0xe7, 0x52, 0xfa, 0x78, 0x04, 0x0b, 0x82, 0x0f, 0x68, + 0x31, 0x2d, 0xd7, 0x6f, 0x9a, 0xef, 0x08, 0x09, 0xaf, 0x3f, 0x9c, 0x0c, 0xb8, 0x6e, 0x7b, 0xc3, + 0x00, 0x38, 0x09, 0x57, 0x92, 0xb5, 0xf1, 0x07, 0x19, 0xe2, 0x2f, 0xa2, 0x5c, 0x76, 0x1f, 0xf2, + 0x62, 0x7f, 0x9b, 0xb2, 0xc4, 0x47, 0xe7, 0x1a, 0x04, 0x9e, 0x89, 0x18, 0x78, 0x7d, 0xc1, 0x64, + 0x94, 0x2c, 0xbd, 0x6a, 0x96, 0xdd, 0xc9, 0x28, 0x32, 0x5d, 0x7d, 0x4d, 0x75, 0x6b, 0xca, 0xec, + 0x43, 0xbd, 0x8f, 0x96, 0xe9, 0x86, 0x16, 0x49, 0x97, 0x4f, 0xec, 0x98, 0x4a, 0x0a, 0x1c, 0x9c, + 0x72, 0x2d, 0x82, 0xee, 0x8f, 0xb2, 0x50, 0x4d, 0xb4, 0x08, 0x43, 0x09, 0xc5, 0x06, 0x40, 0x8e, + 0x25, 0x39, 0xdf, 0x20, 0x40, 0x52, 0x50, 0xd7, 0xc6, 0x29, 0x9b, 0x18, 0xa7, 0x28, 0x38, 0x27, + 0xa7, 0x07, 0xe7, 0x3c, 0x82, 0x52, 0x7c, 0x02, 0x3f, 0xd9, 0x24, 0x51, 0x9f, 0x3a, 0xdd, 0x11, + 0x23, 0xc5, 0xe1, 0x3c, 0x05, 0x3d, 0x9c, 0xe7, 0x3b, 0x5a, 0xf4, 0xc7, 0x1c, 0x16, 0x63, 0xa4, + 0x8d, 0xe8, 0xcf, 0x25, 0xf6, 0xc3, 0xf8, 0x18, 0xca, 0x5a, 0xe3, 0xf5, 0x28, 0x8f, 0x4c, 0x22, + 0xca, 0x23, 0x3a, 0x87, 0x95, 0x8d, 0xcf, 0x61, 0x19, 0x7f, 0x3d, 0x0b, 0x55, 0xb1, 0xbe, 0x1c, + 0xf7, 0xf4, 0xd0, 0x1b, 0x3a, 0x7d, 0x74, 0x34, 0x45, 0x2b, 0x4c, 0x0a, 0x5a, 0x6a, 0x9d, 0xc9, + 0x25, 0x46, 0x72, 0x96, 0x7e, 0xdc, 0x94, 0x98, 0x74, 0x74, 0xdc, 0xd4, 0x80, 0xaa, 0x60, 0x8c, + 0xe8, 0x32, 0x8a, 0xef, 0x07, 0x30, 0xcb, 0x27, 0x9c, 0x6f, 0xda, 0x01, 0x71, 0xc8, 0xaf, 0xc3, + 0x92, 0xc0, 0xc1, 0x93, 0x76, 0x23, 0x67, 0x38, 0x74, 0x08, 0x93, 0x0c, 0x4d, 0xf5, 0x13, 0xce, + 0x4d, 0x3b, 0xe4, 0xfb, 0x22, 0x41, 0x5e, 0x27, 0x50, 0x1c, 0x38, 0x81, 0x7d, 0x1c, 0x07, 0x7c, + 0x46, 0xdf, 0xe8, 0x59, 0xb6, 0x2f, 0x35, 0xcf, 0x32, 0x19, 0x20, 0xca, 0x23, 0xfb, 0x32, 0xf2, + 0x2c, 0x4f, 0x51, 0xd2, 0xfc, 0x34, 0x25, 0x19, 0xff, 0x26, 0x0b, 0x65, 0x8d, 0x2c, 0x5f, 0x65, + 0x77, 0xbd, 0x3d, 0xe3, 0x18, 0x2c, 0xe9, 0x3e, 0xc0, 0x37, 0x93, 0x55, 0x62, 0xec, 0x0b, 0x5d, + 0x5c, 0xa0, 0x11, 0xf0, 0x2d, 0x28, 0x89, 0x55, 0xf7, 0x3e, 0x9a, 0x60, 0xe5, 0xb5, 0x1b, 0x08, + 0x38, 0x9c, 0x1c, 0xab, 0xc4, 0xc7, 0x98, 0x58, 0x88, 0x13, 0x1f, 0x8b, 0xc4, 0x17, 0x85, 0x60, + 0x7f, 0x08, 0x15, 0x59, 0x2a, 0xce, 0x29, 0x76, 0x37, 0x5e, 0xf5, 0x89, 0xf9, 0x36, 0xcb, 0x54, + 0x1d, 0x4d, 0xbe, 0xcc, 0xf8, 0x58, 0x65, 0x2c, 0xbe, 0x2c, 0xe3, 0x63, 0xfa, 0x30, 0x76, 0xa2, + 0xa8, 0x76, 0x8c, 0xbb, 0x52, 0x7c, 0xec, 0x21, 0x2c, 0x29, 0x76, 0x35, 0x71, 0x6d, 0xd7, 0xf5, + 0x26, 0x6e, 0x9f, 0xab, 0x03, 0x5a, 0x4c, 0x26, 0x1d, 0xc5, 0x29, 0xc6, 0x20, 0x3a, 0xc1, 0x4b, + 0xf1, 0x5b, 0x0f, 0xa0, 0x40, 0x72, 0x39, 0x09, 0x1f, 0xe9, 0x8c, 0x8b, 0x50, 0xd8, 0x7d, 0x28, + 0x90, 0x78, 0x9e, 0xbd, 0x96, 0xd9, 0x10, 0x82, 0xd1, 0x04, 0x26, 0x32, 0xee, 0xf3, 0xd0, 0x77, + 0xfa, 0x41, 0x7c, 0xf6, 0xab, 0x20, 0xf4, 0x4f, 0xaa, 0x2b, 0xb6, 0xdc, 0xc6, 0x98, 0xa8, 0xa3, + 0x12, 0x8e, 0xd8, 0x98, 0x96, 0x12, 0x65, 0x48, 0x71, 0x69, 0x08, 0xab, 0xc7, 0x3c, 0xbc, 0xe0, + 0xdc, 0x75, 0x85, 0x30, 0xd4, 0xe7, 0x6e, 0xe8, 0xdb, 0x43, 0x31, 0x49, 0xd4, 0x83, 0x27, 0x33, + 0xa5, 0xc6, 0x36, 0x90, 0xcd, 0x38, 0xe3, 0x56, 0x94, 0x8f, 0x78, 0xc7, 0xca, 0x71, 0x5a, 0xda, + 0xfa, 0xaf, 0xc0, 0xfa, 0xf5, 0x99, 0x52, 0x4e, 0x78, 0xde, 0x4f, 0x72, 0x95, 0xc8, 0x0f, 0x38, + 0xf4, 0xec, 0x90, 0x5a, 0xa3, 0x73, 0x96, 0x0e, 0x94, 0xb5, 0x94, 0x78, 0xef, 0xcf, 0xa0, 0x70, + 0x47, 0x1f, 0x62, 0x47, 0x72, 0x3d, 0x7f, 0x84, 0x7e, 0xb7, 0x81, 0x15, 0x97, 0x9e, 0x31, 0x17, + 0x62, 0x38, 0x1e, 0x69, 0x37, 0x36, 0x60, 0x01, 0x25, 0x7b, 0x6d, 0xa3, 0x7b, 0x91, 0x30, 0x68, + 0x2c, 0x03, 0xeb, 0x10, 0xef, 0xd2, 0xe3, 0x3d, 0xff, 0x53, 0x0e, 0xca, 0x1a, 0x58, 0xec, 0x46, + 0x18, 0x00, 0x68, 0x0d, 0x1c, 0x7b, 0xc4, 0x95, 0x93, 0xb3, 0x6a, 0x56, 0x11, 0xba, 0x2d, 0x81, + 0x62, 0x2f, 0xb6, 0xcf, 0x4f, 0x2d, 0x6f, 0x12, 0x5a, 0x03, 0x7e, 0xea, 0x73, 0xd5, 0xca, 0x8a, + 0x7d, 0x7e, 0x7a, 0x30, 0x09, 0xb7, 0x11, 0x26, 0xb0, 0x04, 0x2f, 0xd1, 0xb0, 0x64, 0xcc, 0xda, + 0xc8, 0xbe, 0x8c, 0xb1, 0x64, 0xe0, 0x24, 0x51, 0x66, 0x3e, 0x0a, 0x9c, 0x24, 0x6d, 0x71, 0x7a, + 0x03, 0x2d, 0xcc, 0x6e, 0xa0, 0xdf, 0x84, 0x55, 0xda, 0x40, 0x25, 0x6b, 0xb6, 0xa6, 0x56, 0xf2, + 0x32, 0xa6, 0xca, 0x4e, 0x6a, 0x62, 0x6f, 0x5d, 0xf4, 0x40, 0xb1, 0xa5, 0xc0, 0xf9, 0x21, 0x31, + 0xb2, 0x8c, 0x29, 0x7a, 0x26, 0x0b, 0xef, 0x3a, 0x3f, 0xe4, 0x02, 0x13, 0xa3, 0x63, 0x74, 0x4c, + 0x79, 0xc0, 0x62, 0xe4, 0xb8, 0xd3, 0x98, 0xf6, 0x65, 0x12, 0xb3, 0x24, 0x31, 0xed, 0x4b, 0x1d, + 0xf3, 0x09, 0xac, 0x8d, 0xf8, 0xc0, 0xb1, 0x93, 0xc5, 0x5a, 0xb1, 0xe0, 0xb6, 0x4c, 0xc9, 0x5a, + 0x9e, 0x2e, 0x29, 0xee, 0x62, 0x34, 0x7e, 0xe8, 0x8d, 0x8e, 0x1d, 0x92, 0x59, 0x28, 0x5e, 0x27, + 0x6f, 0xd6, 0xdc, 0xc9, 0xe8, 0xfb, 0x08, 0x16, 0x59, 0x02, 0xa3, 0x0a, 0xe5, 0x6e, 0xe8, 0x8d, + 0xd5, 0x34, 0xd7, 0xa0, 0x42, 0x9f, 0xf2, 0xd4, 0xe3, 0x2d, 0xb8, 0x89, 0x2c, 0xa1, 0xe7, 0x8d, + 0xbd, 0xa1, 0x77, 0x7a, 0x95, 0xb0, 0xe3, 0xfd, 0xbb, 0x0c, 0x2c, 0x25, 0x52, 0x25, 0x7b, 0xfd, + 0x26, 0xf1, 0xb3, 0xe8, 0xe8, 0x1a, 0xad, 0xc1, 0x45, 0x6d, 0x0d, 0x12, 0x22, 0x31, 0x33, 0x75, + 0x9c, 0xad, 0x19, 0x5f, 0xb9, 0xa0, 0x32, 0x12, 0x4b, 0x69, 0xcc, 0xb2, 0x14, 0x99, 0x5f, 0x5d, + 0xc6, 0xa0, 0x8a, 0xf8, 0x05, 0x79, 0x08, 0x66, 0x20, 0xbb, 0x9c, 0x4b, 0x9e, 0x24, 0xd0, 0x6d, + 0x7e, 0xaa, 0x05, 0xb1, 0x21, 0x30, 0x30, 0xfe, 0x49, 0x06, 0x20, 0x6e, 0x1d, 0x9e, 0x65, 0x88, + 0xe4, 0x16, 0xba, 0xcd, 0x4c, 0x93, 0x51, 0xde, 0x80, 0x4a, 0x14, 0xb1, 0x1c, 0x4b, 0x42, 0x65, + 0x05, 0x13, 0xe2, 0xd0, 0x3d, 0x58, 0x38, 0x1d, 0x7a, 0xc7, 0x28, 0xb1, 0x4a, 0xb9, 0x85, 0xe2, + 0xd5, 0x6a, 0x04, 0x56, 0xd2, 0x48, 0x2c, 0x37, 0xe5, 0x53, 0x83, 0x9a, 0x75, 0x29, 0xc8, 0xf8, + 0xad, 0x6c, 0x14, 0xba, 0x19, 0x8f, 0xc4, 0x8b, 0xd5, 0xbb, 0x9f, 0x24, 0x96, 0xe6, 0x45, 0xee, + 0xc5, 0x8f, 0xa1, 0xe6, 0xd3, 0xa6, 0xa4, 0x76, 0xac, 0xfc, 0x0b, 0x76, 0xac, 0xaa, 0x9f, 0x90, + 0x74, 0xde, 0x81, 0xba, 0x3d, 0x38, 0xe7, 0x7e, 0xe8, 0xa0, 0xb5, 0x1e, 0xe5, 0x63, 0x19, 0x2c, + 0xa9, 0xc1, 0x51, 0x10, 0xbd, 0x07, 0x0b, 0xf2, 0x24, 0x6e, 0x84, 0x29, 0xef, 0xf6, 0x89, 0xc1, + 0x02, 0xd1, 0xf8, 0xe7, 0x2a, 0x56, 0x34, 0x39, 0xbb, 0x2f, 0x1e, 0x15, 0xbd, 0x87, 0xd9, 0x59, + 0x07, 0xaa, 0x24, 0x24, 0xe9, 0x04, 0x90, 0xfc, 0x88, 0x80, 0xd2, 0x05, 0x90, 0x1c, 0xd6, 0xfc, + 0xab, 0x0c, 0xab, 0xf1, 0x1f, 0x32, 0x30, 0xbf, 0xeb, 0x8d, 0x77, 0x1d, 0x8a, 0xe6, 0xc7, 0x65, + 0x12, 0xf9, 0xa8, 0xe6, 0xc4, 0x27, 0x06, 0xfe, 0xbc, 0xe0, 0xc0, 0x59, 0xaa, 0x98, 0x57, 0x4d, + 0x8a, 0x79, 0xdf, 0x81, 0x5b, 0xe8, 0x02, 0xf4, 0xbd, 0xb1, 0xe7, 0x8b, 0xa5, 0x6a, 0x0f, 0x49, + 0xdc, 0xf3, 0xdc, 0xf0, 0x4c, 0xf1, 0xce, 0x9b, 0x27, 0x9c, 0x1f, 0x6a, 0x18, 0xfb, 0x11, 0x02, + 0x1e, 0xe9, 0x1c, 0x86, 0xe7, 0x16, 0x69, 0xe8, 0x52, 0x1e, 0x25, 0x8e, 0xba, 0x20, 0x12, 0x5a, + 0x08, 0x47, 0x89, 0xd4, 0xf8, 0x16, 0x94, 0x22, 0x63, 0x0f, 0x7b, 0x17, 0x4a, 0x67, 0xde, 0x58, + 0x5a, 0x84, 0x32, 0x89, 0x43, 0x79, 0xb2, 0xd7, 0x66, 0xf1, 0x8c, 0x7e, 0x04, 0xc6, 0x9f, 0xcd, + 0xc3, 0x7c, 0xdb, 0x3d, 0xf7, 0x9c, 0x3e, 0x46, 0x9b, 0x8e, 0xf8, 0xc8, 0x53, 0xd7, 0x01, 0x88, + 0xdf, 0x18, 0x9b, 0x15, 0xdf, 0xd0, 0x93, 0x93, 0xb1, 0x59, 0xd1, 0xdd, 0x3c, 0x2b, 0x30, 0xe7, + 0xeb, 0x57, 0xec, 0x14, 0x7c, 0x8c, 0x7f, 0x8f, 0xf6, 0xcb, 0x82, 0x76, 0x9d, 0x82, 0x28, 0x8b, + 0xae, 0x7e, 0xc1, 0x21, 0xa3, 0xe3, 0x99, 0x25, 0x84, 0xe0, 0x80, 0xbd, 0x06, 0xf3, 0xf2, 0x0c, + 0x1c, 0x1d, 0x5a, 0xa2, 0x80, 0x75, 0x09, 0x42, 0x6a, 0xf0, 0x39, 0xb9, 0x70, 0x23, 0x41, 0x36, + 0x67, 0x56, 0x14, 0x70, 0x5b, 0xd0, 0xda, 0x1d, 0x28, 0x13, 0x3e, 0xa1, 0x14, 0x65, 0x90, 0x26, + 0x82, 0x10, 0x21, 0xe5, 0xa6, 0xaa, 0x52, 0xea, 0x4d, 0x55, 0x18, 0x4e, 0x1c, 0x71, 0x59, 0xea, + 0x22, 0xd0, 0xfd, 0x44, 0x1a, 0x5c, 0x5d, 0xd3, 0x26, 0x6d, 0x2a, 0x74, 0x5a, 0x59, 0xd9, 0x54, + 0xde, 0x84, 0xea, 0x89, 0x3d, 0x1c, 0x1e, 0xdb, 0xfd, 0xe7, 0x64, 0x0a, 0xa8, 0x90, 0xf5, 0x53, + 0x01, 0xd1, 0x16, 0x70, 0x07, 0xca, 0xda, 0x2c, 0x63, 0x04, 0x66, 0xde, 0x84, 0x78, 0x7e, 0xa7, + 0x2d, 0x7c, 0xb5, 0x57, 0xb0, 0xf0, 0x69, 0x91, 0xa8, 0x0b, 0xc9, 0x48, 0xd4, 0x5b, 0xc8, 0x4d, + 0x65, 0xc8, 0x61, 0x9d, 0x2e, 0xc3, 0xb1, 0x07, 0x03, 0x0c, 0x39, 0xa4, 0x9b, 0x27, 0x71, 0xf0, + 0x28, 0x7d, 0x91, 0x74, 0x09, 0x82, 0x11, 0xca, 0x6d, 0x32, 0x53, 0x8f, 0x6d, 0x67, 0x80, 0x87, + 0x0e, 0xc8, 0x7a, 0x30, 0x6f, 0x8f, 0xc2, 0x43, 0xdb, 0x19, 0xb0, 0xbb, 0x50, 0x51, 0xc9, 0xb8, + 0x3b, 0x2e, 0xd1, 0xf8, 0xcb, 0x64, 0xb1, 0x27, 0x1a, 0x50, 0x8d, 0x30, 0x46, 0xf1, 0x91, 0xe3, + 0xb2, 0x44, 0x41, 0x3a, 0x78, 0x1f, 0xa3, 0x7c, 0x42, 0x8e, 0x07, 0x8b, 0x6b, 0x8f, 0x6f, 0x45, + 0xc1, 0x07, 0x48, 0xa5, 0xea, 0x3f, 0x39, 0xc7, 0x08, 0x53, 0x08, 0x77, 0xe4, 0xa3, 0x5b, 0x4d, + 0xc8, 0xbf, 0x12, 0x15, 0x7d, 0x74, 0x84, 0xc0, 0xbe, 0xa5, 0xe9, 0xaf, 0x0d, 0x44, 0x7e, 0x6d, + 0xaa, 0xfc, 0xeb, 0x0e, 0x65, 0xdd, 0x06, 0x70, 0x02, 0xb1, 0xcb, 0x04, 0xdc, 0x1d, 0xe0, 0x19, + 0xe1, 0xa2, 0x59, 0x72, 0x82, 0x67, 0x04, 0xf8, 0xd9, 0x2a, 0xb6, 0x4d, 0xa8, 0xe8, 0xdd, 0x64, + 0x45, 0xc8, 0x1f, 0x1c, 0xb6, 0x3a, 0xf5, 0x1b, 0xac, 0x0c, 0xf3, 0xdd, 0x56, 0xaf, 0xb7, 0x87, + 0x9e, 0xbe, 0x0a, 0x14, 0xa3, 0x83, 0x8c, 0x59, 0xf1, 0xd5, 0xdc, 0xda, 0x6a, 0x1d, 0xf6, 0x5a, + 0xdb, 0xf5, 0xdc, 0xf7, 0xf2, 0xc5, 0x6c, 0x3d, 0x67, 0xfc, 0x79, 0x0e, 0xca, 0xda, 0x28, 0xbc, + 0x98, 0x19, 0xdf, 0x06, 0x40, 0x4d, 0x32, 0x8e, 0x48, 0xcd, 0x9b, 0x25, 0x01, 0xa1, 0xc9, 0xd7, + 0x7d, 0x14, 0x39, 0xba, 0x65, 0x49, 0xf9, 0x28, 0xde, 0x84, 0x2a, 0x5d, 0x58, 0xa4, 0xfb, 0x6b, + 0x0b, 0x66, 0x85, 0x80, 0x92, 0x55, 0xe3, 0x09, 0x67, 0x44, 0xc2, 0xe3, 0x75, 0xf2, 0xfa, 0x12, + 0x02, 0xe1, 0x01, 0x3b, 0x3c, 0x1d, 0x19, 0x78, 0xc3, 0x73, 0x4e, 0x18, 0x24, 0x11, 0x96, 0x25, + 0xac, 0x27, 0xcf, 0x6a, 0x4b, 0x7e, 0xa8, 0x1d, 0xb1, 0x2d, 0x98, 0x15, 0x02, 0xca, 0x8a, 0xbe, + 0xae, 0x08, 0x88, 0xa2, 0x57, 0xd6, 0x66, 0xa9, 0x21, 0x41, 0x3c, 0x7b, 0x33, 0x66, 0xc4, 0x12, + 0x12, 0xc6, 0xd7, 0x66, 0xf3, 0xbd, 0xdc, 0x9c, 0xc8, 0xde, 0x05, 0x36, 0x1a, 0x8f, 0xad, 0x14, + 0x03, 0x5f, 0xde, 0x5c, 0x18, 0x8d, 0xc7, 0x3d, 0xcd, 0xfe, 0xf5, 0x33, 0xb0, 0x3d, 0x7e, 0x01, + 0xac, 0x29, 0x16, 0x30, 0x36, 0x31, 0x52, 0xc5, 0x62, 0xb6, 0x9c, 0xd1, 0xd9, 0x72, 0x0a, 0xf7, + 0xcb, 0xa6, 0x72, 0xbf, 0x17, 0xf1, 0x09, 0x63, 0x07, 0xca, 0x87, 0xda, 0x75, 0x68, 0x77, 0xc5, + 0x0e, 0xa1, 0x2e, 0x42, 0xa3, 0xbd, 0x83, 0x6c, 0x8a, 0xbe, 0xbc, 0xff, 0x4c, 0x6b, 0x4d, 0x56, + 0x6b, 0x8d, 0xf1, 0x8f, 0x32, 0x74, 0xd5, 0x4c, 0xd4, 0xf8, 0xf8, 0x06, 0x36, 0xe5, 0x7e, 0x8b, + 0x4f, 0xc2, 0x97, 0x95, 0xdb, 0x4d, 0x1e, 0x62, 0xc7, 0xa6, 0x59, 0xde, 0xc9, 0x49, 0xc0, 0x55, + 0x8c, 0x47, 0x19, 0x61, 0x07, 0x08, 0x52, 0xc2, 0xb7, 0x90, 0xf0, 0x1d, 0x2a, 0x3f, 0x90, 0x81, + 0x1d, 0x42, 0xf8, 0xde, 0xb7, 0x2f, 0x65, 0xad, 0x81, 0x10, 0x41, 0xa4, 0x7f, 0x40, 0x1d, 0x96, + 0x8d, 0xbe, 0x8d, 0x7f, 0x20, 0x0f, 0xeb, 0x4f, 0x8f, 0xef, 0x03, 0x28, 0x46, 0xa5, 0x26, 0x77, + 0x58, 0x85, 0x19, 0xa5, 0x8b, 0x7d, 0x1c, 0x8d, 0x21, 0x89, 0x16, 0xd3, 0xe2, 0x42, 0x1f, 0x4f, + 0x5b, 0x6b, 0xf5, 0x7b, 0xc0, 0x4e, 0x1c, 0x7f, 0x1a, 0x99, 0x16, 0x5b, 0x1d, 0x53, 0x34, 0x6c, + 0xe3, 0x08, 0x96, 0x14, 0x97, 0xd0, 0x34, 0x82, 0xe4, 0xe4, 0x65, 0x5e, 0xc2, 0xe4, 0xb3, 0x33, + 0x4c, 0xde, 0xf8, 0xcd, 0x02, 0xcc, 0xab, 0xab, 0x05, 0xd3, 0xae, 0xc3, 0x2b, 0x25, 0xaf, 0xc3, + 0x6b, 0x24, 0xae, 0x4e, 0xc2, 0xa9, 0x97, 0xfb, 0xfd, 0xbd, 0xe9, 0x2d, 0x5b, 0xf3, 0x55, 0x24, + 0xb6, 0x6d, 0xe9, 0xab, 0x28, 0x24, 0x7d, 0x15, 0x69, 0x57, 0x04, 0x92, 0xe8, 0x39, 0x73, 0x45, + 0xe0, 0x2d, 0x20, 0x39, 0x42, 0x0b, 0x6e, 0x2b, 0x22, 0x40, 0xec, 0x39, 0x49, 0xb1, 0xa3, 0x38, + 0x2d, 0x76, 0xbc, 0xb2, 0x48, 0xf0, 0x4d, 0x98, 0xa3, 0xeb, 0x35, 0xe4, 0xe1, 0x5f, 0xb5, 0x71, + 0xc8, 0xb1, 0x52, 0xff, 0xe9, 0xc4, 0x83, 0x29, 0x71, 0xf5, 0xfb, 0xb6, 0xca, 0x89, 0xfb, 0xb6, + 0x74, 0x1f, 0x4a, 0x25, 0xe9, 0x43, 0xb9, 0x0f, 0xf5, 0x68, 0xe0, 0xd0, 0x22, 0xe9, 0x06, 0xf2, + 0xe4, 0x60, 0x4d, 0xc1, 0x05, 0x37, 0xec, 0x04, 0xf1, 0xc6, 0x57, 0x4b, 0x6c, 0x7c, 0x82, 0x57, + 0x35, 0xc3, 0x90, 0x8f, 0xc6, 0xa1, 0xda, 0xf8, 0xb4, 0x5b, 0x19, 0x69, 0xe6, 0x17, 0x70, 0xe6, + 0xd5, 0xf4, 0x12, 0x75, 0x6c, 0x42, 0xed, 0xc4, 0x76, 0x86, 0x13, 0x9f, 0x5b, 0x3e, 0xb7, 0x03, + 0xcf, 0xc5, 0xc5, 0x1f, 0xef, 0xc1, 0xb2, 0x8b, 0x3b, 0x84, 0x63, 0x22, 0x8a, 0x59, 0x3d, 0xd1, + 0x3f, 0xf1, 0x10, 0x93, 0x3e, 0x12, 0x62, 0xcb, 0x92, 0x67, 0x88, 0x29, 0x56, 0xa5, 0xdd, 0xb1, + 0x76, 0xf6, 0xda, 0x4f, 0x77, 0x7b, 0xf5, 0x8c, 0xf8, 0xec, 0x1e, 0x6d, 0x6d, 0xb5, 0x5a, 0xdb, + 0xb8, 0x85, 0x01, 0xcc, 0xed, 0x34, 0xdb, 0x7b, 0x72, 0x03, 0xcb, 0xd7, 0x0b, 0xc6, 0xef, 0x64, + 0xa1, 0xac, 0xf5, 0x86, 0x3d, 0x89, 0x26, 0x81, 0x6e, 0x80, 0xba, 0x3d, 0xdb, 0xe3, 0x0d, 0xc5, + 0xe1, 0xb5, 0x59, 0x88, 0xee, 0x5f, 0xcc, 0x5e, 0x7b, 0xff, 0x22, 0x7b, 0x1b, 0x16, 0x6c, 0x2a, + 0x21, 0x1a, 0x74, 0x69, 0xdc, 0x97, 0x60, 0x39, 0xe6, 0x18, 0x41, 0x1a, 0x6f, 0x53, 0x02, 0x2f, + 0xaf, 0x82, 0x36, 0xa3, 0x9d, 0x0a, 0xe7, 0x66, 0x5e, 0x8e, 0x8c, 0x74, 0xc6, 0x47, 0x1b, 0xbe, + 0x1c, 0x2f, 0x95, 0x6c, 0x7c, 0x00, 0x10, 0xb7, 0x39, 0x39, 0x44, 0x37, 0x92, 0x43, 0x94, 0xd1, + 0x86, 0x28, 0x6b, 0xfc, 0x33, 0xc9, 0x9e, 0xe4, 0x78, 0x47, 0xe6, 0xbc, 0xaf, 0x83, 0x32, 0x30, + 0x5a, 0x18, 0xc8, 0x3d, 0x1e, 0xf2, 0x50, 0x5d, 0x58, 0xb0, 0x28, 0x53, 0xda, 0x51, 0xc2, 0x0c, + 0x3b, 0xcd, 0xce, 0xb2, 0xd3, 0x37, 0xa0, 0x22, 0x58, 0xa9, 0x24, 0x96, 0x40, 0xb2, 0xa4, 0xf2, + 0xc8, 0xbe, 0x54, 0x75, 0x27, 0xf8, 0x68, 0x7e, 0x8a, 0x8f, 0xfe, 0x6e, 0x86, 0xee, 0x0f, 0x89, + 0x1b, 0x1a, 0x33, 0xd2, 0xa8, 0xcc, 0x24, 0x23, 0x95, 0xa8, 0x66, 0x94, 0x7e, 0x0d, 0x73, 0xcc, + 0xa6, 0x33, 0xc7, 0x74, 0xb6, 0x9b, 0x4b, 0x65, 0xbb, 0xc6, 0x3a, 0x34, 0xb6, 0xb9, 0x18, 0x8a, + 0xe6, 0x70, 0x38, 0x35, 0x96, 0xc6, 0x2d, 0xb8, 0x99, 0x92, 0x26, 0x2d, 0x33, 0x9f, 0xc0, 0x4a, + 0x93, 0x2e, 0x56, 0xf8, 0x59, 0x1d, 0x90, 0x34, 0x1a, 0xb0, 0x3a, 0x5d, 0xa4, 0xac, 0x6c, 0x07, + 0x16, 0xb7, 0xf9, 0xf1, 0xe4, 0x74, 0x8f, 0x9f, 0xc7, 0x15, 0x31, 0xc8, 0x07, 0x67, 0xde, 0x85, + 0x9c, 0x5c, 0xfc, 0x8d, 0xa1, 0x97, 0x02, 0xc7, 0x0a, 0xc6, 0xbc, 0xaf, 0xac, 0xf3, 0x08, 0xe9, + 0x8e, 0x79, 0xdf, 0x78, 0x02, 0x4c, 0x2f, 0x47, 0xce, 0x84, 0x50, 0x9d, 0x26, 0xc7, 0x56, 0x70, + 0x15, 0x84, 0x7c, 0xa4, 0x0e, 0x06, 0x42, 0x30, 0x39, 0xee, 0x12, 0xc4, 0xb8, 0x07, 0x95, 0x43, + 0xfb, 0xca, 0xe4, 0x5f, 0xc8, 0xf3, 0x77, 0x6b, 0x30, 0x3f, 0xb6, 0xaf, 0x04, 0xcf, 0x8c, 0x1c, + 0x75, 0x98, 0x6c, 0xfc, 0x61, 0x1e, 0xe6, 0x08, 0x93, 0xdd, 0xa5, 0x1b, 0x8c, 0x1d, 0x17, 0x79, + 0x96, 0xda, 0x3d, 0x34, 0xd0, 0xcc, 0x06, 0x93, 0x9d, 0xdd, 0x60, 0xa4, 0x55, 0x51, 0xdd, 0xdc, + 0xa4, 0x5c, 0x2a, 0xee, 0x64, 0xa4, 0xae, 0x6b, 0x4a, 0xde, 0x2e, 0x90, 0x8f, 0x6f, 0xa8, 0xa6, + 0x93, 0xd5, 0x49, 0xa7, 0x77, 0xac, 0xa0, 0x51, 0xeb, 0xd4, 0xbe, 0x29, 0xf7, 0x16, 0x1d, 0x94, + 0xaa, 0x05, 0xce, 0xab, 0x43, 0xa5, 0x49, 0x2d, 0x70, 0x46, 0xdb, 0x2b, 0xbe, 0x5c, 0xdb, 0x23, + 0x73, 0xe3, 0x0b, 0xb4, 0x3d, 0x78, 0x05, 0x6d, 0xef, 0x15, 0x1c, 0xce, 0x37, 0xa1, 0x88, 0xc2, + 0x90, 0xb6, 0xd5, 0x08, 0x21, 0x48, 0x6c, 0x35, 0x1f, 0x6a, 0xfa, 0x10, 0x45, 0xbb, 0x68, 0xbc, + 0xde, 0xe4, 0x5f, 0xfc, 0x7c, 0x1c, 0x79, 0x9f, 0xc3, 0xbc, 0x84, 0x0a, 0x82, 0x76, 0xed, 0x91, + 0xba, 0xfc, 0x0e, 0x7f, 0x8b, 0x61, 0xc3, 0x1b, 0xbb, 0xbe, 0x98, 0x38, 0x3e, 0x1f, 0xa8, 0x5b, + 0x8d, 0x1c, 0x5c, 0xa3, 0x02, 0x22, 0x3a, 0x28, 0x74, 0x33, 0xd7, 0xbb, 0x70, 0x25, 0xef, 0x99, + 0x77, 0x82, 0x67, 0xe2, 0xd3, 0x60, 0x50, 0xc7, 0xeb, 0x2f, 0xc7, 0x9e, 0xaf, 0x76, 0x72, 0xe3, + 0xc7, 0x19, 0xa8, 0xcb, 0xd5, 0x15, 0xa5, 0xe9, 0xaa, 0x51, 0xe1, 0xba, 0xe0, 0x8c, 0x17, 0xdf, + 0x51, 0x64, 0x40, 0x15, 0x2d, 0x42, 0xd1, 0xb6, 0x4e, 0x16, 0xad, 0xb2, 0x00, 0xee, 0xc8, 0xad, + 0xfd, 0x75, 0x28, 0xab, 0xc0, 0xf0, 0x91, 0x33, 0x54, 0x97, 0xd1, 0x53, 0x64, 0xf8, 0xbe, 0x33, + 0x54, 0x52, 0x81, 0x6f, 0xcb, 0x43, 0xce, 0x19, 0x94, 0x0a, 0x4c, 0x3b, 0xe4, 0xc6, 0xbf, 0xca, + 0xc0, 0xa2, 0xd6, 0x15, 0xb9, 0x6e, 0x3f, 0x82, 0x4a, 0x74, 0xef, 0x2c, 0x8f, 0xc4, 0xd1, 0xb5, + 0x24, 0xa3, 0x89, 0xb3, 0x95, 0xfb, 0x11, 0x24, 0x10, 0x8d, 0x19, 0xd8, 0x57, 0x14, 0xbd, 0x3c, + 0x19, 0x29, 0x8d, 0x6f, 0x60, 0x5f, 0xed, 0x70, 0xde, 0x9d, 0x8c, 0x84, 0x3e, 0x7f, 0xc1, 0xf9, + 0xf3, 0x08, 0x81, 0xd8, 0x27, 0x08, 0x98, 0xc4, 0x30, 0xa0, 0x3a, 0xf2, 0xdc, 0xf0, 0x2c, 0x42, + 0x91, 0xa2, 0x38, 0x02, 0x09, 0xc7, 0xf8, 0xd3, 0x2c, 0x2c, 0x91, 0xdd, 0x51, 0xda, 0x7b, 0x25, + 0xeb, 0x6a, 0xc0, 0x1c, 0x99, 0x60, 0x89, 0x79, 0xed, 0xde, 0x30, 0xe5, 0x37, 0xfb, 0xe6, 0x2b, + 0xda, 0x4a, 0xd5, 0x39, 0xea, 0x6b, 0x86, 0x3f, 0x37, 0x3b, 0xfc, 0xd7, 0x0f, 0x6f, 0x9a, 0xf7, + 0xb7, 0x90, 0xe6, 0xfd, 0x7d, 0x15, 0x9f, 0xeb, 0xcc, 0x89, 0xdf, 0x79, 0x89, 0xa3, 0x9d, 0xf8, + 0x7d, 0x02, 0x6b, 0x09, 0x1c, 0xe4, 0xd6, 0xce, 0x89, 0xc3, 0xd5, 0xad, 0x34, 0xcb, 0x1a, 0x76, + 0x57, 0xa5, 0x6d, 0xce, 0x43, 0x21, 0xe8, 0x7b, 0x63, 0x6e, 0xac, 0xc2, 0x72, 0x72, 0x54, 0xe5, + 0x36, 0xf1, 0x7b, 0x19, 0x68, 0xc8, 0x58, 0x1d, 0xc7, 0x3d, 0xdd, 0x75, 0x82, 0xd0, 0xf3, 0xa3, + 0xfb, 0x59, 0x6f, 0x03, 0x04, 0xa1, 0xed, 0x4b, 0x15, 0x5c, 0xde, 0xc3, 0x82, 0x10, 0x54, 0xaf, + 0x6f, 0x42, 0x91, 0xbb, 0x03, 0x4a, 0x24, 0x6a, 0x98, 0xe7, 0xee, 0x40, 0x29, 0xe7, 0x33, 0x5b, + 0x69, 0x35, 0x29, 0x24, 0xc8, 0x5b, 0x0f, 0xc4, 0xe8, 0xf0, 0x73, 0xdc, 0xd2, 0xf3, 0xd1, 0xad, + 0x07, 0xfb, 0xf6, 0x25, 0x46, 0xbe, 0x06, 0xc6, 0xdf, 0xcd, 0xc2, 0x42, 0xdc, 0x3e, 0xba, 0x53, + 0xe5, 0xc5, 0xb7, 0xc3, 0xdc, 0x95, 0xe4, 0xe0, 0x08, 0xa5, 0x46, 0xb3, 0xc6, 0x16, 0x69, 0x71, + 0xb6, 0x5d, 0x66, 0x40, 0x59, 0x61, 0x78, 0x93, 0x50, 0xbb, 0x26, 0xb1, 0x44, 0x28, 0x07, 0x93, + 0x50, 0x68, 0xa1, 0x42, 0x1d, 0x77, 0x5c, 0xa9, 0x07, 0x16, 0xec, 0x51, 0xd8, 0xc6, 0xd7, 0x17, + 0x04, 0x58, 0x64, 0xa3, 0x89, 0x14, 0x58, 0x02, 0xbf, 0x4e, 0x4a, 0x09, 0xcd, 0x1c, 0x2a, 0x24, + 0xba, 0xc4, 0x4e, 0x17, 0x51, 0x47, 0x12, 0xfb, 0xeb, 0x50, 0xa6, 0xc2, 0xe3, 0x03, 0xde, 0x79, + 0xb3, 0x84, 0x35, 0x60, 0xba, 0xb4, 0x8c, 0x79, 0x93, 0x84, 0x3d, 0x00, 0xa8, 0x2a, 0x0c, 0x85, + 0xf9, 0x9b, 0x19, 0xb8, 0x99, 0x32, 0x6d, 0x72, 0x95, 0x6f, 0xc1, 0xe2, 0x49, 0x94, 0xa8, 0x46, + 0x97, 0x96, 0xfa, 0xaa, 0x62, 0xab, 0xc9, 0x31, 0x35, 0xeb, 0x27, 0x49, 0x40, 0xac, 0x89, 0xd2, + 0x0c, 0x26, 0xae, 0x0f, 0x40, 0x91, 0x88, 0xa6, 0x91, 0x94, 0xc0, 0x43, 0x58, 0x6f, 0x5d, 0x0a, + 0x8e, 0xb1, 0xa5, 0x3f, 0x1f, 0xa2, 0xc8, 0x28, 0x69, 0x75, 0xcf, 0xbc, 0x92, 0xd5, 0x7d, 0x40, + 0xe7, 0x8d, 0xa3, 0xb2, 0x7e, 0x92, 0x42, 0x70, 0x03, 0x15, 0x79, 0xe8, 0xf9, 0x13, 0x75, 0x8f, + 0x40, 0x3f, 0x7a, 0xf6, 0xc4, 0x08, 0x60, 0x61, 0x7f, 0x32, 0x0c, 0x9d, 0xf8, 0x25, 0x14, 0xf6, + 0x4d, 0x99, 0x07, 0xeb, 0x51, 0xa3, 0x96, 0x5a, 0x11, 0x44, 0x15, 0xe1, 0x60, 0x8d, 0x44, 0x41, + 0xd6, 0x6c, 0x7d, 0x0b, 0xa3, 0x64, 0x0d, 0xc6, 0x4d, 0x58, 0x8b, 0xbf, 0x68, 0xd8, 0xd4, 0x56, + 0xf3, 0x0f, 0x33, 0x14, 0x66, 0x9f, 0x7c, 0x95, 0x85, 0xb5, 0x60, 0x29, 0x70, 0xdc, 0xd3, 0x21, + 0xd7, 0x8b, 0x0f, 0xe4, 0x20, 0xac, 0x24, 0xdb, 0x26, 0x5f, 0x6e, 0x31, 0x17, 0x29, 0x47, 0x5c, + 0x5a, 0xc0, 0x36, 0xaf, 0x6b, 0x64, 0x4c, 0x16, 0x53, 0xa3, 0x31, 0xdb, 0xf8, 0x36, 0xd4, 0x92, + 0x15, 0xb1, 0x0f, 0xe5, 0x31, 0xfd, 0xb8, 0x55, 0xb9, 0xa9, 0x33, 0xcc, 0x31, 0x41, 0x94, 0xe3, + 0xb1, 0x0f, 0x8c, 0xbf, 0x9d, 0x81, 0x86, 0xc9, 0x05, 0xe5, 0x6a, 0xad, 0x54, 0x34, 0xf3, 0xd1, + 0x4c, 0xa9, 0xd7, 0xf7, 0x55, 0x9d, 0xfe, 0x57, 0x2d, 0x7a, 0xef, 0xda, 0xc9, 0xd8, 0xbd, 0x31, + 0xd3, 0xa3, 0xcd, 0x22, 0xcc, 0x11, 0x8a, 0xb1, 0x06, 0x2b, 0xb2, 0x3d, 0xaa, 0x2d, 0xb1, 0x4b, + 0x35, 0x51, 0x63, 0xc2, 0xa5, 0xba, 0x0e, 0x0d, 0x3a, 0x8f, 0xab, 0x77, 0x42, 0x66, 0xdc, 0x06, + 0xb6, 0x6f, 0xf7, 0x6d, 0xdf, 0xf3, 0xdc, 0x43, 0xee, 0xcb, 0xa0, 0x65, 0x94, 0x30, 0xd1, 0xe3, + 0xa8, 0x44, 0x61, 0xfa, 0x52, 0x97, 0xc3, 0x7a, 0xae, 0x8a, 0xd1, 0xa2, 0x2f, 0xc3, 0x84, 0xa5, + 0x4d, 0xfb, 0x39, 0x57, 0x25, 0xa9, 0x21, 0xfa, 0x18, 0xca, 0xe3, 0xa8, 0x50, 0x35, 0xee, 0xea, + 0x1a, 0x91, 0xd9, 0x6a, 0x4d, 0x1d, 0xdb, 0x78, 0x0c, 0xcb, 0xc9, 0x32, 0x25, 0xeb, 0x58, 0x87, + 0xe2, 0x48, 0xc2, 0x64, 0xeb, 0xa2, 0x6f, 0xe3, 0xb7, 0x8b, 0x30, 0x2f, 0xb5, 0x51, 0xb6, 0x01, + 0xf9, 0xbe, 0x8a, 0x93, 0x8b, 0xaf, 0xaf, 0x92, 0xa9, 0xea, 0xff, 0x16, 0x46, 0xcb, 0x09, 0x3c, + 0xf6, 0x31, 0xd4, 0x92, 0xae, 0xe2, 0xa9, 0xd3, 0xfe, 0x49, 0x1f, 0x6f, 0xb5, 0x3f, 0xe5, 0x14, + 0x2c, 0xc5, 0x9b, 0x23, 0xc9, 0x0c, 0xc5, 0x33, 0x6d, 0xf7, 0xf4, 0x5c, 0x21, 0x6f, 0x07, 0x67, + 0xb6, 0xf5, 0xf8, 0xc9, 0x07, 0xf2, 0xb8, 0x7f, 0x19, 0x81, 0xdd, 0x33, 0xfb, 0xf1, 0x93, 0x0f, + 0xa6, 0x25, 0x69, 0x79, 0xd8, 0x5f, 0x93, 0xa4, 0x97, 0xa1, 0x40, 0xb7, 0xa0, 0x52, 0xc0, 0x13, + 0x7d, 0xb0, 0x47, 0xb0, 0xac, 0x0c, 0x1c, 0x32, 0x34, 0x9d, 0xb8, 0x60, 0x91, 0x4e, 0x03, 0xca, + 0xb4, 0x2e, 0x26, 0x91, 0x49, 0x64, 0x15, 0xe6, 0xce, 0xe2, 0x2b, 0x6d, 0xab, 0xa6, 0xfc, 0x32, + 0xfe, 0xb4, 0x00, 0x65, 0x6d, 0x50, 0x58, 0x05, 0x8a, 0x66, 0xab, 0xdb, 0x32, 0x3f, 0x6d, 0x6d, + 0xd7, 0x6f, 0xb0, 0xfb, 0xf0, 0x56, 0xbb, 0xb3, 0x75, 0x60, 0x9a, 0xad, 0xad, 0x9e, 0x75, 0x60, + 0x5a, 0xea, 0x12, 0xb5, 0xc3, 0xe6, 0xe7, 0xfb, 0xad, 0x4e, 0xcf, 0xda, 0x6e, 0xf5, 0x9a, 0xed, + 0xbd, 0x6e, 0x3d, 0xc3, 0x5e, 0x83, 0x46, 0x8c, 0xa9, 0x92, 0x9b, 0xfb, 0x07, 0x47, 0x9d, 0x5e, + 0x3d, 0xcb, 0xee, 0xc0, 0xad, 0x9d, 0x76, 0xa7, 0xb9, 0x67, 0xc5, 0x38, 0x5b, 0x7b, 0xbd, 0x4f, + 0xad, 0xd6, 0x2f, 0x1d, 0xb6, 0xcd, 0xcf, 0xeb, 0xb9, 0x34, 0x84, 0xdd, 0xde, 0xde, 0x96, 0x2a, + 0x21, 0xcf, 0x6e, 0xc2, 0x0a, 0x21, 0x50, 0x16, 0xab, 0x77, 0x70, 0x60, 0x75, 0x0f, 0x0e, 0x3a, + 0xf5, 0x02, 0x5b, 0x84, 0x6a, 0xbb, 0xf3, 0x69, 0x73, 0xaf, 0xbd, 0x6d, 0x99, 0xad, 0xe6, 0xde, + 0x7e, 0x7d, 0x8e, 0x2d, 0xc1, 0xc2, 0x34, 0xde, 0xbc, 0x28, 0x42, 0xe1, 0x1d, 0x74, 0xda, 0x07, + 0x1d, 0xeb, 0xd3, 0x96, 0xd9, 0x6d, 0x1f, 0x74, 0xea, 0x45, 0xb6, 0x0a, 0x2c, 0x99, 0xb4, 0xbb, + 0xdf, 0xdc, 0xaa, 0x97, 0xd8, 0x0a, 0x2c, 0x26, 0xe1, 0xcf, 0x5a, 0x9f, 0xd7, 0x81, 0x35, 0x60, + 0x99, 0x1a, 0x66, 0x6d, 0xb6, 0xf6, 0x0e, 0x3e, 0xb3, 0xf6, 0xdb, 0x9d, 0xf6, 0xfe, 0xd1, 0x7e, + 0xbd, 0x8c, 0x77, 0x33, 0xb6, 0x5a, 0x56, 0xbb, 0xd3, 0x3d, 0xda, 0xd9, 0x69, 0x6f, 0xb5, 0x5b, + 0x9d, 0x5e, 0xbd, 0x42, 0x35, 0xa7, 0x75, 0xbc, 0x2a, 0x32, 0xc8, 0xf3, 0x2b, 0xd6, 0x76, 0xbb, + 0xdb, 0xdc, 0xdc, 0x6b, 0x6d, 0xd7, 0x6b, 0xec, 0x36, 0xdc, 0xec, 0xb5, 0xf6, 0x0f, 0x0f, 0xcc, + 0xa6, 0xf9, 0xb9, 0x3a, 0xdf, 0x62, 0xed, 0x34, 0xdb, 0x7b, 0x47, 0x66, 0xab, 0xbe, 0xc0, 0xde, + 0x80, 0xdb, 0x66, 0xeb, 0x93, 0xa3, 0xb6, 0xd9, 0xda, 0xb6, 0x3a, 0x07, 0xdb, 0x2d, 0x6b, 0xa7, + 0xd5, 0xec, 0x1d, 0x99, 0x2d, 0x6b, 0xbf, 0xdd, 0xed, 0xb6, 0x3b, 0x4f, 0xeb, 0x75, 0xf6, 0x16, + 0xdc, 0x8d, 0x50, 0xa2, 0x02, 0xa6, 0xb0, 0x16, 0x45, 0xff, 0xd4, 0x94, 0x76, 0x5a, 0xbf, 0xd4, + 0xb3, 0x0e, 0x5b, 0x2d, 0xb3, 0xce, 0xd8, 0x3a, 0xac, 0xc6, 0xd5, 0x53, 0x05, 0xb2, 0xee, 0x25, + 0x91, 0x76, 0xd8, 0x32, 0xf7, 0x9b, 0x1d, 0x31, 0xc1, 0x89, 0xb4, 0x65, 0xd1, 0xec, 0x38, 0x6d, + 0xba, 0xd9, 0x2b, 0x8c, 0x41, 0x4d, 0x9b, 0x95, 0x9d, 0xa6, 0x59, 0x5f, 0x65, 0x0b, 0x50, 0xde, + 0x3f, 0x3c, 0xb4, 0x7a, 0xed, 0xfd, 0xd6, 0xc1, 0x51, 0xaf, 0xbe, 0xc6, 0x56, 0xa0, 0xde, 0xee, + 0xf4, 0x5a, 0xa6, 0x98, 0x6b, 0x95, 0xf5, 0x7f, 0xcc, 0xb3, 0x65, 0x58, 0x50, 0x2d, 0x55, 0xd0, + 0xbf, 0x98, 0x67, 0x6b, 0xc0, 0x8e, 0x3a, 0x66, 0xab, 0xb9, 0x2d, 0x06, 0x2e, 0x4a, 0xf8, 0x9f, + 0xf3, 0xd2, 0x6d, 0xf4, 0xe3, 0x5c, 0xb4, 0x59, 0xc7, 0x71, 0x18, 0xc9, 0x0b, 0xce, 0x2b, 0xda, + 0xc5, 0xe4, 0x2f, 0x7b, 0x7a, 0x44, 0x53, 0xad, 0x72, 0x33, 0xaa, 0xd5, 0x8c, 0xee, 0x5e, 0xd5, + 0x65, 0xbf, 0x37, 0xa1, 0x3a, 0xa2, 0xcb, 0xce, 0xe5, 0xa5, 0xc6, 0x20, 0x83, 0x92, 0x08, 0x48, + 0x37, 0x1a, 0xcf, 0xbc, 0xbd, 0x51, 0x98, 0x7d, 0x7b, 0x23, 0x4d, 0xbe, 0x9f, 0x4b, 0x93, 0xef, + 0x1f, 0xc0, 0x22, 0xb1, 0x26, 0xc7, 0x75, 0x46, 0x4a, 0x6b, 0x26, 0x29, 0x70, 0x01, 0x59, 0x14, + 0xc1, 0x95, 0x3a, 0xa1, 0x54, 0x0e, 0xc9, 0x42, 0xe6, 0xa5, 0xb6, 0x91, 0xd0, 0x34, 0x88, 0x73, + 0x44, 0x9a, 0x46, 0x54, 0x83, 0x7d, 0x19, 0xd7, 0x50, 0xd6, 0x6a, 0x20, 0x38, 0xd6, 0xf0, 0x00, + 0x16, 0xf9, 0x65, 0xe8, 0xdb, 0x96, 0x37, 0xb6, 0xbf, 0x98, 0xa0, 0x5f, 0xdb, 0x46, 0x1d, 0xbe, + 0x62, 0x2e, 0x60, 0xc2, 0x01, 0xc2, 0xb7, 0xed, 0xd0, 0x7e, 0xf0, 0x25, 0x94, 0xb5, 0x8b, 0xf0, + 0xd9, 0x1a, 0x2c, 0x7d, 0xd6, 0xee, 0x75, 0x5a, 0xdd, 0xae, 0x75, 0x78, 0xb4, 0xf9, 0xac, 0xf5, + 0xb9, 0xb5, 0xdb, 0xec, 0xee, 0xd6, 0x6f, 0x88, 0x45, 0xdb, 0x69, 0x75, 0x7b, 0xad, 0xed, 0x04, + 0x3c, 0xc3, 0x5e, 0x87, 0xf5, 0xa3, 0xce, 0x51, 0xb7, 0xb5, 0x6d, 0xa5, 0xe5, 0xcb, 0x0a, 0x2a, + 0x95, 0xe9, 0x29, 0xd9, 0x73, 0x0f, 0x7e, 0x0d, 0x6a, 0xc9, 0xa3, 0xde, 0x0c, 0x60, 0x6e, 0xaf, + 0xf5, 0xb4, 0xb9, 0xf5, 0x39, 0xdd, 0xde, 0xda, 0xed, 0x35, 0x7b, 0xed, 0x2d, 0x4b, 0xde, 0xd6, + 0x2a, 0x38, 0x42, 0x86, 0x95, 0x61, 0xbe, 0xd9, 0xd9, 0xda, 0x3d, 0x30, 0xbb, 0xf5, 0x2c, 0x7b, + 0x0d, 0xd6, 0x14, 0xad, 0x6e, 0x1d, 0xec, 0xef, 0xb7, 0x7b, 0xc8, 0x0c, 0x7b, 0x9f, 0x1f, 0x0a, + 0xd2, 0x7c, 0x60, 0x43, 0x29, 0xbe, 0x6e, 0x16, 0x19, 0x4c, 0xbb, 0xd7, 0x6e, 0xf6, 0x62, 0xee, + 0x5a, 0xbf, 0x21, 0xf8, 0x57, 0x0c, 0xc6, 0xdb, 0x62, 0xeb, 0x19, 0x3a, 0x0d, 0xa7, 0x80, 0x54, + 0x7b, 0x3d, 0x2b, 0x16, 0x55, 0x0c, 0xdd, 0x3c, 0xe8, 0x89, 0x2e, 0x7c, 0x1b, 0x6a, 0xc9, 0x98, + 0xc7, 0xa4, 0xf1, 0x7a, 0x1d, 0x56, 0x37, 0x5b, 0xbd, 0xcf, 0x5a, 0xad, 0x0e, 0x8e, 0xce, 0x56, + 0xab, 0xd3, 0x33, 0x9b, 0x7b, 0xed, 0xde, 0xe7, 0xf5, 0xcc, 0x83, 0x8f, 0xa1, 0x3e, 0xed, 0x60, + 0x4c, 0x78, 0x64, 0x5f, 0xe4, 0xba, 0x7d, 0xf0, 0x9f, 0x33, 0xb0, 0x9c, 0x66, 0x5b, 0x17, 0x73, + 0x28, 0x17, 0xa7, 0x60, 0xd1, 0xdd, 0x83, 0x8e, 0xd5, 0x39, 0xc0, 0xeb, 0x21, 0xd7, 0x61, 0x75, + 0x2a, 0x41, 0x71, 0x82, 0x0c, 0xbb, 0x05, 0x6b, 0x33, 0x99, 0x2c, 0xf3, 0xe0, 0x08, 0xbb, 0xdd, + 0x80, 0xe5, 0xa9, 0xc4, 0x96, 0x69, 0x1e, 0x98, 0xf5, 0x1c, 0x7b, 0x0f, 0xee, 0x4f, 0xa5, 0xcc, + 0x6e, 0x4c, 0x6a, 0xdf, 0xca, 0xb3, 0x7b, 0xf0, 0xe6, 0x0c, 0x76, 0xcc, 0xbb, 0xad, 0xcd, 0xe6, + 0x9e, 0xe8, 0x5e, 0xbd, 0xf0, 0xe0, 0x9f, 0xe6, 0x00, 0xe2, 0x43, 0x45, 0xa2, 0xfe, 0xed, 0x66, + 0xaf, 0xb9, 0x77, 0x20, 0xc8, 0xcb, 0x3c, 0xe8, 0x89, 0xd2, 0xcd, 0xd6, 0x27, 0xf5, 0x1b, 0xa9, + 0x29, 0x07, 0x87, 0xa2, 0x43, 0x6b, 0xb0, 0x44, 0x53, 0xb5, 0x27, 0xba, 0xd1, 0xee, 0x3c, 0xa5, + 0x9b, 0x46, 0x71, 0xf7, 0x3b, 0x3a, 0xdc, 0x31, 0x0f, 0x3a, 0x3d, 0xab, 0xbb, 0x7b, 0xd4, 0xdb, + 0xc6, 0x7b, 0x4a, 0xb7, 0xcc, 0xf6, 0x21, 0x95, 0x99, 0x7f, 0x11, 0x82, 0x28, 0xba, 0x20, 0xd6, + 0xc2, 0xd3, 0x83, 0x6e, 0xb7, 0x7d, 0x68, 0x7d, 0x72, 0xd4, 0x32, 0xdb, 0xad, 0x2e, 0x66, 0x9c, + 0x4b, 0x81, 0x0b, 0xfc, 0x79, 0xb1, 0x67, 0xf6, 0xf6, 0x3e, 0x95, 0x9b, 0x9a, 0x40, 0x2d, 0x26, + 0x41, 0x02, 0xab, 0x24, 0x66, 0x47, 0xec, 0x0a, 0x29, 0x25, 0xc3, 0x35, 0x69, 0x22, 0x5f, 0x59, + 0xec, 0x77, 0x33, 0x8b, 0x04, 0xb3, 0x55, 0xd2, 0x93, 0x44, 0x2e, 0xdc, 0x0a, 0x23, 0xc1, 0x61, + 0x7b, 0xdb, 0xc4, 0x0c, 0xb5, 0x19, 0xa8, 0xc0, 0x5d, 0x10, 0x44, 0x28, 0xb6, 0x0d, 0x81, 0x52, + 0x57, 0x1f, 0x22, 0x65, 0xf1, 0xf1, 0x6f, 0xe5, 0xa0, 0x46, 0x07, 0x3c, 0xe9, 0x7d, 0x48, 0xee, + 0xb3, 0x7d, 0x98, 0x97, 0x0f, 0x8d, 0xb2, 0x95, 0xe8, 0x8e, 0x47, 0xfd, 0x69, 0xd3, 0xf5, 0xd5, + 0x69, 0xb0, 0x14, 0x93, 0x97, 0xfe, 0xca, 0x9f, 0xfc, 0xf7, 0xbf, 0x93, 0xad, 0xb2, 0xf2, 0xc3, + 0xf3, 0xf7, 0x1f, 0x9e, 0x72, 0x37, 0x10, 0x65, 0xfc, 0x0a, 0x40, 0xfc, 0x7c, 0x26, 0x6b, 0x68, + 0x97, 0x4a, 0x24, 0x1e, 0xce, 0x5c, 0xbf, 0x99, 0x92, 0x22, 0xcb, 0xbd, 0x89, 0xe5, 0x2e, 0x19, + 0x35, 0x51, 0xae, 0xe3, 0x3a, 0x21, 0x3d, 0xa5, 0xf9, 0x51, 0xe6, 0x01, 0x1b, 0x40, 0x45, 0x7f, + 0xd8, 0x92, 0x29, 0x09, 0x36, 0xe5, 0x69, 0xce, 0xf5, 0x5b, 0xa9, 0x69, 0x4a, 0x37, 0xc0, 0x3a, + 0x56, 0x8c, 0xba, 0xa8, 0x63, 0x82, 0x18, 0x71, 0x2d, 0x43, 0xd2, 0x96, 0xe2, 0xf7, 0x2b, 0xd9, + 0x6b, 0x9a, 0xbc, 0x3b, 0xf3, 0x7a, 0xe6, 0xfa, 0xed, 0x6b, 0x52, 0x65, 0x5d, 0xb7, 0xb1, 0xae, + 0x35, 0x83, 0x89, 0xba, 0xfa, 0x88, 0xa3, 0x5e, 0xcf, 0xfc, 0x28, 0xf3, 0xe0, 0xf1, 0xbf, 0x7f, + 0x07, 0x4a, 0x51, 0xc0, 0x37, 0xfb, 0x75, 0xa8, 0x26, 0x4e, 0xe0, 0x32, 0xd5, 0x8d, 0xb4, 0x03, + 0xbb, 0xeb, 0xaf, 0xa5, 0x27, 0xca, 0x8a, 0x5f, 0xc7, 0x8a, 0x1b, 0x6c, 0x55, 0x54, 0x2c, 0x4f, + 0xb8, 0x3e, 0xc4, 0x13, 0xf3, 0x74, 0x63, 0xe6, 0x73, 0x4d, 0x2b, 0xa4, 0xca, 0x5e, 0x9b, 0xd6, + 0xd4, 0x12, 0xb5, 0xdd, 0xbe, 0x26, 0x55, 0x56, 0xf7, 0x1a, 0x56, 0xb7, 0xca, 0x96, 0xf5, 0xea, + 0x54, 0x9c, 0x30, 0xe3, 0x78, 0x8d, 0xad, 0xfe, 0xbc, 0x23, 0xbb, 0x1d, 0xdf, 0x29, 0x9a, 0xf2, + 0xec, 0x63, 0x44, 0x22, 0xb3, 0x6f, 0x3f, 0x1a, 0x0d, 0xac, 0x8a, 0x31, 0x9c, 0x3e, 0xfd, 0x75, + 0x47, 0x76, 0x0c, 0x65, 0xed, 0x45, 0x24, 0x76, 0xf3, 0xda, 0xd7, 0x9b, 0xd6, 0xd7, 0xd3, 0x92, + 0xd2, 0xba, 0xa2, 0x97, 0xff, 0xf0, 0x84, 0x73, 0xf6, 0xcb, 0x50, 0x8a, 0xde, 0xd9, 0x61, 0x6b, + 0xda, 0xbb, 0x47, 0xfa, 0xbb, 0x40, 0xeb, 0x8d, 0xd9, 0x84, 0x34, 0xe2, 0xd3, 0x4b, 0x17, 0xc4, + 0xf7, 0x19, 0x94, 0xb5, 0xb7, 0x74, 0xa2, 0x0e, 0xcc, 0xbe, 0xd7, 0x13, 0x75, 0x20, 0xe5, 0xe9, + 0x1d, 0x63, 0x11, 0xab, 0x28, 0xb3, 0x12, 0xd2, 0x77, 0x78, 0xe9, 0x05, 0x6c, 0x0f, 0x56, 0xa4, + 0x06, 0x7c, 0xcc, 0xbf, 0xca, 0x34, 0xa4, 0xbc, 0xa8, 0xf9, 0x28, 0xc3, 0x3e, 0x86, 0xa2, 0x7a, + 0x32, 0x89, 0xad, 0xa6, 0x3f, 0xfd, 0xb4, 0xbe, 0x36, 0x03, 0x97, 0xea, 0xea, 0xe7, 0x00, 0xf1, + 0xc3, 0x3d, 0x11, 0x93, 0x98, 0x79, 0x08, 0x28, 0xa2, 0x80, 0xd9, 0x57, 0x7e, 0x8c, 0x55, 0xec, + 0x60, 0x9d, 0x21, 0x93, 0x70, 0xf9, 0x85, 0xba, 0x6b, 0xfc, 0x07, 0x50, 0xd6, 0xde, 0xee, 0x89, + 0x86, 0x6f, 0xf6, 0xdd, 0x9f, 0x68, 0xf8, 0x52, 0x9e, 0xfa, 0x31, 0xd6, 0xb1, 0xf4, 0x65, 0x63, + 0x41, 0x94, 0x2e, 0x44, 0x60, 0x29, 0x8a, 0x8a, 0x09, 0x3a, 0x83, 0x6a, 0xe2, 0x81, 0x9e, 0x68, + 0x85, 0xa6, 0x3d, 0xff, 0x13, 0xad, 0xd0, 0xd4, 0x37, 0x7d, 0x14, 0x9d, 0x19, 0x8b, 0xa2, 0x1e, + 0xba, 0x4d, 0x4c, 0xab, 0xe9, 0xfb, 0x50, 0xd6, 0x1e, 0xdb, 0x89, 0xfa, 0x32, 0xfb, 0xae, 0x4f, + 0xd4, 0x97, 0xb4, 0xb7, 0x79, 0x96, 0xb1, 0x8e, 0x9a, 0x81, 0xa4, 0x80, 0x97, 0x21, 0x8b, 0xb2, + 0x7f, 0x1d, 0x6a, 0xc9, 0xf7, 0x77, 0xa2, 0xb5, 0x9f, 0xfa, 0x90, 0x4f, 0xb4, 0xf6, 0xaf, 0x79, + 0xb4, 0x47, 0x92, 0xf4, 0x83, 0xa5, 0xa8, 0x92, 0x87, 0x3f, 0x92, 0x47, 0xd7, 0xbe, 0x64, 0x9f, + 0x08, 0x06, 0x27, 0x2f, 0xeb, 0x66, 0x6b, 0x1a, 0xd5, 0xea, 0xb7, 0x7e, 0x47, 0xeb, 0x65, 0xe6, + 0x5e, 0xef, 0x24, 0x31, 0x63, 0xe1, 0xec, 0x29, 0x2c, 0x45, 0xc4, 0x1c, 0xdd, 0xbe, 0x1d, 0x44, + 0x7d, 0x48, 0xbd, 0xe3, 0x7b, 0xbd, 0x3e, 0x9d, 0xfa, 0x28, 0x43, 0xdb, 0x1f, 0x5e, 0x69, 0xac, + 0x6d, 0x7f, 0xfa, 0x05, 0xdc, 0xda, 0xf6, 0x97, 0xb8, 0xf9, 0x78, 0x7a, 0xfb, 0x0b, 0x1d, 0x51, + 0x86, 0x0b, 0x0b, 0xd3, 0x57, 0x5d, 0xdf, 0xbe, 0xee, 0x6a, 0x10, 0x2a, 0xfe, 0xf5, 0x17, 0xdf, + 0x1c, 0x92, 0x64, 0x45, 0x8a, 0x9b, 0x3e, 0x94, 0x91, 0x52, 0xec, 0x57, 0xa1, 0xa2, 0xbf, 0xd9, + 0xc1, 0x74, 0x9e, 0x30, 0x5d, 0xd3, 0xad, 0xd4, 0xb4, 0x24, 0x95, 0xb0, 0x8a, 0x5e, 0x0d, 0xfb, + 0x14, 0x56, 0xa3, 0x61, 0xd6, 0xef, 0xb6, 0x08, 0xd8, 0x9d, 0x94, 0x1b, 0x2f, 0x12, 0x83, 0x7d, + 0xf3, 0xda, 0x2b, 0x31, 0x1e, 0x65, 0x04, 0xf5, 0x25, 0xdf, 0x27, 0x88, 0x77, 0x9e, 0xb4, 0x67, + 0x19, 0xe2, 0x9d, 0x27, 0xf5, 0x51, 0x03, 0x45, 0x7d, 0x6c, 0x29, 0x31, 0x46, 0x14, 0x43, 0xce, + 0xbe, 0x0f, 0x0b, 0xda, 0xc5, 0x1d, 0xdd, 0x2b, 0xb7, 0x1f, 0xad, 0xa4, 0xd9, 0x4b, 0x6c, 0xd7, + 0xd3, 0x6c, 0xc6, 0xc6, 0x1a, 0x96, 0xbf, 0x68, 0x24, 0x06, 0x47, 0xac, 0xa2, 0x2d, 0x28, 0xeb, + 0x97, 0x82, 0xbc, 0xa0, 0xdc, 0x35, 0x2d, 0x49, 0xbf, 0x2f, 0xf5, 0x51, 0x86, 0xed, 0x41, 0x7d, + 0xfa, 0x0a, 0xbf, 0x88, 0xa7, 0xa4, 0x5d, 0x7b, 0xb8, 0x3e, 0x95, 0x98, 0xb8, 0xf8, 0x8f, 0x1d, + 0xd2, 0x29, 0xa4, 0xe8, 0xf9, 0x49, 0xcf, 0x9f, 0xde, 0xd5, 0x93, 0xcf, 0x52, 0x46, 0xa5, 0xa5, + 0x3d, 0x48, 0x7a, 0x3f, 0xf3, 0x28, 0xc3, 0x7e, 0x27, 0x03, 0x95, 0xc4, 0x15, 0x56, 0x89, 0x73, + 0x1e, 0x53, 0xfd, 0x6c, 0xe8, 0x69, 0x7a, 0x47, 0x0d, 0x13, 0x07, 0x71, 0xef, 0xc1, 0xf7, 0x12, + 0x93, 0xf4, 0xa3, 0x84, 0xcb, 0x75, 0x63, 0xfa, 0x7d, 0xca, 0x2f, 0xa7, 0x11, 0xf4, 0x6b, 0x91, + 0xbf, 0x7c, 0x94, 0x61, 0xff, 0x22, 0x03, 0xb5, 0x64, 0x2c, 0x45, 0xd4, 0xdd, 0xd4, 0xa8, 0x8d, + 0x88, 0x94, 0xae, 0x09, 0xc0, 0xf8, 0x3e, 0xb6, 0xb2, 0xf7, 0xc0, 0x4c, 0xb4, 0x52, 0xbe, 0xac, + 0xf1, 0xd3, 0xb5, 0x96, 0xfd, 0x22, 0x3d, 0x07, 0xad, 0xe2, 0xee, 0xd8, 0xec, 0xf3, 0xc1, 0x11, + 0xf9, 0xe9, 0x8f, 0xed, 0x1a, 0xb9, 0xdf, 0xc8, 0x66, 0x70, 0x26, 0x7e, 0x40, 0x8f, 0x31, 0xaa, + 0xd0, 0x2b, 0x41, 0xca, 0xaf, 0x5c, 0xc8, 0x5b, 0xd8, 0xb1, 0xd7, 0x8d, 0x9b, 0x89, 0x8e, 0x4d, + 0x4b, 0x1f, 0x4d, 0x6a, 0xa2, 0x7c, 0x30, 0x37, 0xde, 0x3e, 0x67, 0x1e, 0xd1, 0x4d, 0xad, 0x04, + 0x1b, 0x39, 0xa2, 0x46, 0x4a, 0xf4, 0xc4, 0x7a, 0x7b, 0xc5, 0x62, 0x8c, 0x07, 0xd8, 0xd6, 0xb7, + 0x8c, 0x3b, 0xd7, 0xb6, 0xf5, 0x21, 0x06, 0x47, 0x88, 0x16, 0x1f, 0x02, 0xc4, 0xc1, 0xb1, 0x6c, + 0x2a, 0x44, 0x33, 0xe2, 0x42, 0xb3, 0xf1, 0xb3, 0xc9, 0x45, 0xad, 0x22, 0x39, 0x45, 0x89, 0xbf, + 0x4c, 0x3c, 0x35, 0x0a, 0x1e, 0xd5, 0x45, 0xb0, 0x64, 0x1c, 0x6b, 0x42, 0x04, 0x9b, 0x2e, 0x3f, + 0xc1, 0x51, 0xa3, 0x48, 0xd1, 0x23, 0xa8, 0xee, 0x79, 0xde, 0xf3, 0xc9, 0x38, 0x3a, 0x90, 0x91, + 0x8c, 0x85, 0xda, 0xb5, 0x83, 0xb3, 0xf5, 0xa9, 0x5e, 0x18, 0x77, 0xb1, 0xa8, 0x75, 0xd6, 0xd0, + 0x8a, 0x7a, 0xf8, 0xa3, 0x38, 0x22, 0xf7, 0x4b, 0x66, 0xc3, 0x62, 0xc4, 0xa8, 0xe3, 0xa8, 0xd7, + 0x64, 0x31, 0x09, 0xf6, 0x3c, 0x5d, 0x45, 0x42, 0x57, 0x50, 0xad, 0x7d, 0x18, 0xa8, 0x32, 0x1f, + 0x65, 0xd8, 0x21, 0x54, 0xb6, 0x79, 0x1f, 0x6f, 0xf4, 0xc0, 0xb8, 0x9f, 0xa5, 0x44, 0x0c, 0x09, + 0x05, 0x0c, 0xad, 0x57, 0x13, 0xc0, 0xe4, 0xe6, 0x35, 0xb6, 0xaf, 0x7c, 0xfe, 0xc5, 0xc3, 0x1f, + 0xc9, 0x88, 0xa2, 0x2f, 0xd5, 0xe6, 0x15, 0x47, 0x97, 0xe9, 0x12, 0x40, 0x32, 0x44, 0x2b, 0xb1, + 0x79, 0xcd, 0x84, 0x68, 0x25, 0x86, 0x3a, 0x8a, 0x25, 0x1b, 0xc2, 0xe2, 0x4c, 0x54, 0x57, 0xb4, + 0x6f, 0x5d, 0x17, 0x0b, 0xb6, 0x7e, 0xf7, 0x7a, 0x84, 0x64, 0x6d, 0x0f, 0x92, 0xb5, 0x75, 0xa1, + 0x4a, 0x17, 0x1f, 0x1f, 0x73, 0x3a, 0xdb, 0x3b, 0x75, 0x31, 0x96, 0x7e, 0x70, 0x78, 0x7a, 0x97, + 0xc1, 0xb4, 0xa4, 0x98, 0x83, 0xa7, 0x3b, 0xd9, 0x09, 0x3e, 0x07, 0xa2, 0x1d, 0xa6, 0x8d, 0x88, + 0x71, 0xf6, 0x80, 0x6f, 0x44, 0x8c, 0x29, 0x67, 0x6f, 0x95, 0x0e, 0xca, 0x56, 0xa2, 0xb2, 0x1f, + 0xba, 0xde, 0x80, 0x8f, 0x64, 0xa9, 0xbf, 0x0c, 0xe5, 0xa7, 0x3c, 0x54, 0xa7, 0x57, 0x23, 0x81, + 0x7e, 0xea, 0x38, 0xeb, 0x7a, 0xca, 0x99, 0xe3, 0x24, 0x6d, 0x52, 0xc9, 0x7c, 0x70, 0xca, 0x89, + 0x13, 0x5a, 0xce, 0xe0, 0x4b, 0xf6, 0x4b, 0x58, 0x78, 0x74, 0x57, 0xc3, 0xaa, 0xd6, 0x4c, 0xbd, + 0xf0, 0x85, 0x29, 0x78, 0x5a, 0xc9, 0xa2, 0xcd, 0x9a, 0x60, 0xe9, 0x42, 0x59, 0xbb, 0xd3, 0x25, + 0x1a, 0x9b, 0xd9, 0x3b, 0x7c, 0xa2, 0xb1, 0x49, 0xb9, 0x02, 0xc6, 0xb8, 0x8f, 0xf5, 0x18, 0xec, + 0x6e, 0x5c, 0x0f, 0x5d, 0xfb, 0x12, 0xd7, 0xf4, 0xf0, 0x47, 0xf6, 0x28, 0xfc, 0x92, 0x7d, 0x46, + 0xd3, 0xa1, 0x9d, 0xce, 0x8d, 0x35, 0x94, 0xe9, 0x83, 0xbc, 0xd1, 0x60, 0x69, 0x49, 0x49, 0xad, + 0x85, 0xaa, 0x42, 0xb1, 0xf1, 0x09, 0x40, 0x37, 0xf4, 0xc6, 0xdb, 0x36, 0x1f, 0x79, 0x6e, 0xcc, + 0xd3, 0xe3, 0xf3, 0xa2, 0x31, 0x9f, 0xd4, 0x0e, 0x8d, 0xb2, 0xcf, 0x34, 0x95, 0x2e, 0x71, 0xae, + 0x5c, 0x11, 0xf1, 0xb5, 0x47, 0x4a, 0xa3, 0x01, 0x49, 0x39, 0x56, 0xfa, 0x28, 0xc3, 0x9a, 0x00, + 0x71, 0xf8, 0x60, 0xa4, 0xa0, 0xcd, 0x44, 0x26, 0x46, 0xec, 0x35, 0x25, 0xd6, 0xf0, 0x10, 0x4a, + 0x71, 0xdc, 0xd5, 0x5a, 0x7c, 0x45, 0x55, 0x22, 0x4a, 0x2b, 0x12, 0x17, 0x66, 0x62, 0x9e, 0x8c, + 0x3a, 0x0e, 0x15, 0xb0, 0xa2, 0x18, 0xaa, 0x13, 0xce, 0x03, 0xe6, 0xc0, 0x12, 0x35, 0x30, 0x92, + 0xcd, 0xf0, 0x9c, 0x63, 0xf4, 0x08, 0xcf, 0x6c, 0xf8, 0x51, 0xc4, 0x35, 0x52, 0x83, 0x68, 0x12, + 0x76, 0x26, 0x41, 0xad, 0x74, 0xc6, 0x52, 0x6c, 0x01, 0x23, 0x58, 0x9c, 0x89, 0xd3, 0x88, 0x58, + 0xc7, 0x75, 0x81, 0x37, 0x11, 0xeb, 0xb8, 0x36, 0xc4, 0xc3, 0x58, 0xc1, 0x2a, 0x17, 0x0c, 0x40, + 0xbd, 0xf2, 0xc2, 0x09, 0xfb, 0x67, 0xa2, 0xba, 0xdf, 0xcf, 0xc0, 0x52, 0x4a, 0x24, 0x06, 0x7b, + 0x43, 0x99, 0x28, 0xae, 0x8d, 0xd2, 0x58, 0x4f, 0xf5, 0xd8, 0x1b, 0x5d, 0xac, 0x67, 0x9f, 0x3d, + 0x4b, 0x6c, 0xa0, 0xe4, 0x30, 0x97, 0x2b, 0xf3, 0x85, 0x12, 0x4c, 0xaa, 0xf8, 0xf2, 0x05, 0xac, + 0x51, 0x43, 0x9a, 0xc3, 0xe1, 0x54, 0x34, 0xc1, 0xeb, 0x5a, 0x2b, 0x52, 0x22, 0x24, 0x12, 0xca, + 0x40, 0x32, 0x4a, 0xe2, 0x1a, 0xd9, 0x9d, 0x9a, 0xca, 0x26, 0x50, 0x9f, 0xf6, 0xd2, 0xb3, 0xeb, + 0xcb, 0x5a, 0xbf, 0x93, 0x50, 0xb6, 0x53, 0x3c, 0xfb, 0x5f, 0xc3, 0xca, 0xee, 0x18, 0xeb, 0x69, + 0xe3, 0x42, 0xfa, 0xb7, 0x98, 0x8f, 0xff, 0x3f, 0x0a, 0x29, 0x98, 0xea, 0xe7, 0x9d, 0xe8, 0x55, + 0x81, 0xf4, 0x00, 0x88, 0x48, 0xdd, 0x4f, 0x8f, 0x48, 0x78, 0x1b, 0xab, 0xbf, 0x6b, 0xdc, 0x4a, + 0xab, 0xde, 0xa7, 0x2c, 0xa4, 0xf8, 0xaf, 0x4d, 0xaf, 0x6b, 0xd5, 0x82, 0xbb, 0x69, 0xf3, 0x7d, + 0xad, 0xe2, 0x35, 0x35, 0xd6, 0x37, 0x50, 0x86, 0xac, 0xe8, 0x21, 0x04, 0xd1, 0xf2, 0x49, 0x89, + 0x55, 0x88, 0x96, 0x4f, 0x5a, 0xcc, 0x41, 0x52, 0x7e, 0x52, 0xd1, 0x06, 0x1f, 0x65, 0x1e, 0x6c, + 0xde, 0xfb, 0xfe, 0xd7, 0x4e, 0x9d, 0xf0, 0x6c, 0x72, 0xbc, 0xd1, 0xf7, 0x46, 0x0f, 0x87, 0xca, + 0xb4, 0x29, 0x2f, 0x03, 0x78, 0x38, 0x74, 0x07, 0x0f, 0xb1, 0xd8, 0xe3, 0xb9, 0xb1, 0xef, 0x85, + 0xde, 0x37, 0xfe, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x63, 0x6c, 0x5d, 0x30, 0x7e, 0x8b, 0x00, + 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -9987,6 +13061,11 @@ type LightningClient interface { //* lncli: `listpeers` //ListPeers returns a verbose listing of all currently active peers. ListPeers(ctx context.Context, in *ListPeersRequest, opts ...grpc.CallOption) (*ListPeersResponse, error) + //* + //SubscribePeerEvents creates a uni-directional stream from the server to + //the client in which any events relevant to the state of peers are sent + //over. Events include peers going online and offline. + SubscribePeerEvents(ctx context.Context, in *PeerEventSubscription, opts ...grpc.CallOption) (Lightning_SubscribePeerEventsClient, error) //* lncli: `getinfo` //GetInfo returns general information concerning the lightning node including //it's identity pubkey, alias, the chains it is connected to, and information @@ -10023,9 +13102,22 @@ type LightningClient interface { //request to a remote peer. Users are able to specify a target number of //blocks that the funding transaction should be confirmed in, or a manual fee //rate to us for the funding transaction. If neither are specified, then a - //lax block confirmation target is used. + //lax block confirmation target is used. Each OpenStatusUpdate will return + //the pending channel ID of the in-progress channel. Depending on the + //arguments specified in the OpenChannelRequest, this pending channel ID can + //then be used to manually progress the channel funding flow. OpenChannel(ctx context.Context, in *OpenChannelRequest, opts ...grpc.CallOption) (Lightning_OpenChannelClient, error) //* + //FundingStateStep is an advanced funding related call that allows the caller + //to either execute some preparatory steps for a funding workflow, or + //manually progress a funding workflow. The primary way a funding flow is + //identified is via its pending channel ID. As an example, this method can be + //used to specify that we're expecting a funding flow for a particular + //pending channel ID, for which we need to use specific parameters. + //Alternatively, this can be used to interactively drive PSBT signing for + //funding for partially complete funding transactions. + FundingStateStep(ctx context.Context, in *FundingTransitionMsg, opts ...grpc.CallOption) (*FundingStateStepResp, error) + //* //ChannelAcceptor dispatches a bi-directional streaming RPC in which //OpenChannel requests are sent to the client and the client responds with //a boolean that tells LND whether or not to accept the channel. This allows @@ -10048,10 +13140,11 @@ type LightningClient interface { //when in debug builds of lnd. AbandonChannel(ctx context.Context, in *AbandonChannelRequest, opts ...grpc.CallOption) (*AbandonChannelResponse, error) //* lncli: `sendpayment` - //SendPayment dispatches a bi-directional streaming RPC for sending payments - //through the Lightning Network. A single RPC invocation creates a persistent - //bi-directional stream allowing clients to rapidly send payments through the - //Lightning Network with a single persistent connection. + //Deprecated, use routerrpc.SendPayment. SendPayment dispatches a + //bi-directional streaming RPC for sending payments through the Lightning + //Network. A single RPC invocation creates a persistent bi-directional + //stream allowing clients to rapidly send payments through the Lightning + //Network with a single persistent connection. SendPayment(ctx context.Context, opts ...grpc.CallOption) (Lightning_SendPaymentClient, error) //* //SendPaymentSync is the synchronous non-streaming version of SendPayment. @@ -10093,9 +13186,9 @@ type LightningClient interface { //notifying the client of newly added/settled invoices. The caller can //optionally specify the add_index and/or the settle_index. If the add_index //is specified, then we'll first start by sending add invoice events for all - //invoices with an add_index greater than the specified value. If the + //invoices with an add_index greater than the specified value. If the //settle_index is specified, the next, we'll send out all settle events for - //invoices with a settle_index greater than the specified value. One or both + //invoices with a settle_index greater than the specified value. One or both //of these fields can be set. If no fields are set, then we'll only send out //the latest add/settle events. SubscribeInvoices(ctx context.Context, in *InvoiceSubscription, opts ...grpc.CallOption) (Lightning_SubscribeInvoicesClient, error) @@ -10114,10 +13207,14 @@ type LightningClient interface { //DescribeGraph returns a description of the latest graph state from the //point of view of the node. The graph information is partitioned into two //components: all the nodes/vertexes, and all the edges that connect the - //vertexes themselves. As this is a directed graph, the edges also contain + //vertexes themselves. As this is a directed graph, the edges also contain //the node directional specific routing policy which includes: the time lock //delta, fee information, etc. DescribeGraph(ctx context.Context, in *ChannelGraphRequest, opts ...grpc.CallOption) (*ChannelGraph, error) + //* lncli: `getnodemetrics` + //GetNodeMetrics returns node metrics calculated from the graph. Currently + //the only supported metric is betweenness centrality of individual nodes. + GetNodeMetrics(ctx context.Context, in *NodeMetricsRequest, opts ...grpc.CallOption) (*NodeMetricsResponse, error) //* lncli: `getchaninfo` //GetChanInfo returns the latest authenticated network announcement for the //given channel identified by its channel ID: an 8-byte integer which @@ -10173,7 +13270,7 @@ type LightningClient interface { // //A list of forwarding events are returned. The size of each forwarding event //is 40 bytes, and the max message size able to be returned in gRPC is 4 MiB. - //As a result each message can only contain 50k entries. Each response has + //As a result each message can only contain 50k entries. Each response has //the index offset of the last entry. The index offset can be provided to the //request to allow the caller to skip a series of records. ForwardingHistory(ctx context.Context, in *ForwardingHistoryRequest, opts ...grpc.CallOption) (*ForwardingHistoryResponse, error) @@ -10212,6 +13309,11 @@ type LightningClient interface { //ups, but the updated set of encrypted multi-chan backups with the closed //channel(s) removed. SubscribeChannelBackups(ctx context.Context, in *ChannelBackupSubscription, opts ...grpc.CallOption) (Lightning_SubscribeChannelBackupsClient, error) + //* lncli: `bakemacaroon` + //BakeMacaroon allows the creation of a new macaroon with custom read and + //write permissions. No first-party caveats are added since this can be done + //offline. + BakeMacaroon(ctx context.Context, in *BakeMacaroonRequest, opts ...grpc.CallOption) (*BakeMacaroonResponse, error) } type lightningClient struct { @@ -10371,6 +13473,38 @@ func (c *lightningClient) ListPeers(ctx context.Context, in *ListPeersRequest, o return out, nil } +func (c *lightningClient) SubscribePeerEvents(ctx context.Context, in *PeerEventSubscription, opts ...grpc.CallOption) (Lightning_SubscribePeerEventsClient, error) { + stream, err := c.cc.NewStream(ctx, &_Lightning_serviceDesc.Streams[1], "/lnrpc.Lightning/SubscribePeerEvents", opts...) + if err != nil { + return nil, err + } + x := &lightningSubscribePeerEventsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Lightning_SubscribePeerEventsClient interface { + Recv() (*PeerEvent, error) + grpc.ClientStream +} + +type lightningSubscribePeerEventsClient struct { + grpc.ClientStream +} + +func (x *lightningSubscribePeerEventsClient) Recv() (*PeerEvent, error) { + m := new(PeerEvent) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + func (c *lightningClient) GetInfo(ctx context.Context, in *GetInfoRequest, opts ...grpc.CallOption) (*GetInfoResponse, error) { out := new(GetInfoResponse) err := c.cc.Invoke(ctx, "/lnrpc.Lightning/GetInfo", in, out, opts...) @@ -10399,7 +13533,7 @@ func (c *lightningClient) ListChannels(ctx context.Context, in *ListChannelsRequ } func (c *lightningClient) SubscribeChannelEvents(ctx context.Context, in *ChannelEventSubscription, opts ...grpc.CallOption) (Lightning_SubscribeChannelEventsClient, error) { - stream, err := c.cc.NewStream(ctx, &_Lightning_serviceDesc.Streams[1], "/lnrpc.Lightning/SubscribeChannelEvents", opts...) + stream, err := c.cc.NewStream(ctx, &_Lightning_serviceDesc.Streams[2], "/lnrpc.Lightning/SubscribeChannelEvents", opts...) if err != nil { return nil, err } @@ -10449,7 +13583,7 @@ func (c *lightningClient) OpenChannelSync(ctx context.Context, in *OpenChannelRe } func (c *lightningClient) OpenChannel(ctx context.Context, in *OpenChannelRequest, opts ...grpc.CallOption) (Lightning_OpenChannelClient, error) { - stream, err := c.cc.NewStream(ctx, &_Lightning_serviceDesc.Streams[2], "/lnrpc.Lightning/OpenChannel", opts...) + stream, err := c.cc.NewStream(ctx, &_Lightning_serviceDesc.Streams[3], "/lnrpc.Lightning/OpenChannel", opts...) if err != nil { return nil, err } @@ -10480,8 +13614,17 @@ func (x *lightningOpenChannelClient) Recv() (*OpenStatusUpdate, error) { return m, nil } +func (c *lightningClient) FundingStateStep(ctx context.Context, in *FundingTransitionMsg, opts ...grpc.CallOption) (*FundingStateStepResp, error) { + out := new(FundingStateStepResp) + err := c.cc.Invoke(ctx, "/lnrpc.Lightning/FundingStateStep", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *lightningClient) ChannelAcceptor(ctx context.Context, opts ...grpc.CallOption) (Lightning_ChannelAcceptorClient, error) { - stream, err := c.cc.NewStream(ctx, &_Lightning_serviceDesc.Streams[3], "/lnrpc.Lightning/ChannelAcceptor", opts...) + stream, err := c.cc.NewStream(ctx, &_Lightning_serviceDesc.Streams[4], "/lnrpc.Lightning/ChannelAcceptor", opts...) if err != nil { return nil, err } @@ -10512,7 +13655,7 @@ func (x *lightningChannelAcceptorClient) Recv() (*ChannelAcceptRequest, error) { } func (c *lightningClient) CloseChannel(ctx context.Context, in *CloseChannelRequest, opts ...grpc.CallOption) (Lightning_CloseChannelClient, error) { - stream, err := c.cc.NewStream(ctx, &_Lightning_serviceDesc.Streams[4], "/lnrpc.Lightning/CloseChannel", opts...) + stream, err := c.cc.NewStream(ctx, &_Lightning_serviceDesc.Streams[5], "/lnrpc.Lightning/CloseChannel", opts...) if err != nil { return nil, err } @@ -10552,8 +13695,9 @@ func (c *lightningClient) AbandonChannel(ctx context.Context, in *AbandonChannel return out, nil } +// Deprecated: Do not use. func (c *lightningClient) SendPayment(ctx context.Context, opts ...grpc.CallOption) (Lightning_SendPaymentClient, error) { - stream, err := c.cc.NewStream(ctx, &_Lightning_serviceDesc.Streams[5], "/lnrpc.Lightning/SendPayment", opts...) + stream, err := c.cc.NewStream(ctx, &_Lightning_serviceDesc.Streams[6], "/lnrpc.Lightning/SendPayment", opts...) if err != nil { return nil, err } @@ -10593,7 +13737,7 @@ func (c *lightningClient) SendPaymentSync(ctx context.Context, in *SendRequest, } func (c *lightningClient) SendToRoute(ctx context.Context, opts ...grpc.CallOption) (Lightning_SendToRouteClient, error) { - stream, err := c.cc.NewStream(ctx, &_Lightning_serviceDesc.Streams[6], "/lnrpc.Lightning/SendToRoute", opts...) + stream, err := c.cc.NewStream(ctx, &_Lightning_serviceDesc.Streams[7], "/lnrpc.Lightning/SendToRoute", opts...) if err != nil { return nil, err } @@ -10660,7 +13804,7 @@ func (c *lightningClient) LookupInvoice(ctx context.Context, in *PaymentHash, op } func (c *lightningClient) SubscribeInvoices(ctx context.Context, in *InvoiceSubscription, opts ...grpc.CallOption) (Lightning_SubscribeInvoicesClient, error) { - stream, err := c.cc.NewStream(ctx, &_Lightning_serviceDesc.Streams[7], "/lnrpc.Lightning/SubscribeInvoices", opts...) + stream, err := c.cc.NewStream(ctx, &_Lightning_serviceDesc.Streams[8], "/lnrpc.Lightning/SubscribeInvoices", opts...) if err != nil { return nil, err } @@ -10727,6 +13871,15 @@ func (c *lightningClient) DescribeGraph(ctx context.Context, in *ChannelGraphReq return out, nil } +func (c *lightningClient) GetNodeMetrics(ctx context.Context, in *NodeMetricsRequest, opts ...grpc.CallOption) (*NodeMetricsResponse, error) { + out := new(NodeMetricsResponse) + err := c.cc.Invoke(ctx, "/lnrpc.Lightning/GetNodeMetrics", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *lightningClient) GetChanInfo(ctx context.Context, in *ChanInfoRequest, opts ...grpc.CallOption) (*ChannelEdge, error) { out := new(ChannelEdge) err := c.cc.Invoke(ctx, "/lnrpc.Lightning/GetChanInfo", in, out, opts...) @@ -10773,7 +13926,7 @@ func (c *lightningClient) StopDaemon(ctx context.Context, in *StopRequest, opts } func (c *lightningClient) SubscribeChannelGraph(ctx context.Context, in *GraphTopologySubscription, opts ...grpc.CallOption) (Lightning_SubscribeChannelGraphClient, error) { - stream, err := c.cc.NewStream(ctx, &_Lightning_serviceDesc.Streams[8], "/lnrpc.Lightning/SubscribeChannelGraph", opts...) + stream, err := c.cc.NewStream(ctx, &_Lightning_serviceDesc.Streams[9], "/lnrpc.Lightning/SubscribeChannelGraph", opts...) if err != nil { return nil, err } @@ -10877,7 +14030,7 @@ func (c *lightningClient) RestoreChannelBackups(ctx context.Context, in *Restore } func (c *lightningClient) SubscribeChannelBackups(ctx context.Context, in *ChannelBackupSubscription, opts ...grpc.CallOption) (Lightning_SubscribeChannelBackupsClient, error) { - stream, err := c.cc.NewStream(ctx, &_Lightning_serviceDesc.Streams[9], "/lnrpc.Lightning/SubscribeChannelBackups", opts...) + stream, err := c.cc.NewStream(ctx, &_Lightning_serviceDesc.Streams[10], "/lnrpc.Lightning/SubscribeChannelBackups", opts...) if err != nil { return nil, err } @@ -10908,6 +14061,15 @@ func (x *lightningSubscribeChannelBackupsClient) Recv() (*ChanBackupSnapshot, er return m, nil } +func (c *lightningClient) BakeMacaroon(ctx context.Context, in *BakeMacaroonRequest, opts ...grpc.CallOption) (*BakeMacaroonResponse, error) { + out := new(BakeMacaroonResponse) + err := c.cc.Invoke(ctx, "/lnrpc.Lightning/BakeMacaroon", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // LightningServer is the server API for Lightning service. type LightningServer interface { //* lncli: `walletbalance` @@ -10976,6 +14138,11 @@ type LightningServer interface { //* lncli: `listpeers` //ListPeers returns a verbose listing of all currently active peers. ListPeers(context.Context, *ListPeersRequest) (*ListPeersResponse, error) + //* + //SubscribePeerEvents creates a uni-directional stream from the server to + //the client in which any events relevant to the state of peers are sent + //over. Events include peers going online and offline. + SubscribePeerEvents(*PeerEventSubscription, Lightning_SubscribePeerEventsServer) error //* lncli: `getinfo` //GetInfo returns general information concerning the lightning node including //it's identity pubkey, alias, the chains it is connected to, and information @@ -11012,9 +14179,22 @@ type LightningServer interface { //request to a remote peer. Users are able to specify a target number of //blocks that the funding transaction should be confirmed in, or a manual fee //rate to us for the funding transaction. If neither are specified, then a - //lax block confirmation target is used. + //lax block confirmation target is used. Each OpenStatusUpdate will return + //the pending channel ID of the in-progress channel. Depending on the + //arguments specified in the OpenChannelRequest, this pending channel ID can + //then be used to manually progress the channel funding flow. OpenChannel(*OpenChannelRequest, Lightning_OpenChannelServer) error //* + //FundingStateStep is an advanced funding related call that allows the caller + //to either execute some preparatory steps for a funding workflow, or + //manually progress a funding workflow. The primary way a funding flow is + //identified is via its pending channel ID. As an example, this method can be + //used to specify that we're expecting a funding flow for a particular + //pending channel ID, for which we need to use specific parameters. + //Alternatively, this can be used to interactively drive PSBT signing for + //funding for partially complete funding transactions. + FundingStateStep(context.Context, *FundingTransitionMsg) (*FundingStateStepResp, error) + //* //ChannelAcceptor dispatches a bi-directional streaming RPC in which //OpenChannel requests are sent to the client and the client responds with //a boolean that tells LND whether or not to accept the channel. This allows @@ -11037,10 +14217,11 @@ type LightningServer interface { //when in debug builds of lnd. AbandonChannel(context.Context, *AbandonChannelRequest) (*AbandonChannelResponse, error) //* lncli: `sendpayment` - //SendPayment dispatches a bi-directional streaming RPC for sending payments - //through the Lightning Network. A single RPC invocation creates a persistent - //bi-directional stream allowing clients to rapidly send payments through the - //Lightning Network with a single persistent connection. + //Deprecated, use routerrpc.SendPayment. SendPayment dispatches a + //bi-directional streaming RPC for sending payments through the Lightning + //Network. A single RPC invocation creates a persistent bi-directional + //stream allowing clients to rapidly send payments through the Lightning + //Network with a single persistent connection. SendPayment(Lightning_SendPaymentServer) error //* //SendPaymentSync is the synchronous non-streaming version of SendPayment. @@ -11082,9 +14263,9 @@ type LightningServer interface { //notifying the client of newly added/settled invoices. The caller can //optionally specify the add_index and/or the settle_index. If the add_index //is specified, then we'll first start by sending add invoice events for all - //invoices with an add_index greater than the specified value. If the + //invoices with an add_index greater than the specified value. If the //settle_index is specified, the next, we'll send out all settle events for - //invoices with a settle_index greater than the specified value. One or both + //invoices with a settle_index greater than the specified value. One or both //of these fields can be set. If no fields are set, then we'll only send out //the latest add/settle events. SubscribeInvoices(*InvoiceSubscription, Lightning_SubscribeInvoicesServer) error @@ -11103,10 +14284,14 @@ type LightningServer interface { //DescribeGraph returns a description of the latest graph state from the //point of view of the node. The graph information is partitioned into two //components: all the nodes/vertexes, and all the edges that connect the - //vertexes themselves. As this is a directed graph, the edges also contain + //vertexes themselves. As this is a directed graph, the edges also contain //the node directional specific routing policy which includes: the time lock //delta, fee information, etc. DescribeGraph(context.Context, *ChannelGraphRequest) (*ChannelGraph, error) + //* lncli: `getnodemetrics` + //GetNodeMetrics returns node metrics calculated from the graph. Currently + //the only supported metric is betweenness centrality of individual nodes. + GetNodeMetrics(context.Context, *NodeMetricsRequest) (*NodeMetricsResponse, error) //* lncli: `getchaninfo` //GetChanInfo returns the latest authenticated network announcement for the //given channel identified by its channel ID: an 8-byte integer which @@ -11162,7 +14347,7 @@ type LightningServer interface { // //A list of forwarding events are returned. The size of each forwarding event //is 40 bytes, and the max message size able to be returned in gRPC is 4 MiB. - //As a result each message can only contain 50k entries. Each response has + //As a result each message can only contain 50k entries. Each response has //the index offset of the last entry. The index offset can be provided to the //request to allow the caller to skip a series of records. ForwardingHistory(context.Context, *ForwardingHistoryRequest) (*ForwardingHistoryResponse, error) @@ -11201,6 +14386,11 @@ type LightningServer interface { //ups, but the updated set of encrypted multi-chan backups with the closed //channel(s) removed. SubscribeChannelBackups(*ChannelBackupSubscription, Lightning_SubscribeChannelBackupsServer) error + //* lncli: `bakemacaroon` + //BakeMacaroon allows the creation of a new macaroon with custom read and + //write permissions. No first-party caveats are added since this can be done + //offline. + BakeMacaroon(context.Context, *BakeMacaroonRequest) (*BakeMacaroonResponse, error) } func RegisterLightningServer(s *grpc.Server, srv LightningServer) { @@ -11462,6 +14652,27 @@ func _Lightning_ListPeers_Handler(srv interface{}, ctx context.Context, dec func return interceptor(ctx, in, info, handler) } +func _Lightning_SubscribePeerEvents_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(PeerEventSubscription) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(LightningServer).SubscribePeerEvents(m, &lightningSubscribePeerEventsServer{stream}) +} + +type Lightning_SubscribePeerEventsServer interface { + Send(*PeerEvent) error + grpc.ServerStream +} + +type lightningSubscribePeerEventsServer struct { + grpc.ServerStream +} + +func (x *lightningSubscribePeerEventsServer) Send(m *PeerEvent) error { + return x.ServerStream.SendMsg(m) +} + func _Lightning_GetInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetInfoRequest) if err := dec(in); err != nil { @@ -11594,6 +14805,24 @@ func (x *lightningOpenChannelServer) Send(m *OpenStatusUpdate) error { return x.ServerStream.SendMsg(m) } +func _Lightning_FundingStateStep_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(FundingTransitionMsg) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LightningServer).FundingStateStep(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/lnrpc.Lightning/FundingStateStep", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LightningServer).FundingStateStep(ctx, req.(*FundingTransitionMsg)) + } + return interceptor(ctx, in, info, handler) +} + func _Lightning_ChannelAcceptor_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(LightningServer).ChannelAcceptor(&lightningChannelAcceptorServer{stream}) } @@ -11894,6 +15123,24 @@ func _Lightning_DescribeGraph_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } +func _Lightning_GetNodeMetrics_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeMetricsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LightningServer).GetNodeMetrics(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/lnrpc.Lightning/GetNodeMetrics", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LightningServer).GetNodeMetrics(ctx, req.(*NodeMetricsRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Lightning_GetChanInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ChanInfoRequest) if err := dec(in); err != nil { @@ -12170,6 +15417,24 @@ func (x *lightningSubscribeChannelBackupsServer) Send(m *ChanBackupSnapshot) err return x.ServerStream.SendMsg(m) } +func _Lightning_BakeMacaroon_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BakeMacaroonRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LightningServer).BakeMacaroon(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/lnrpc.Lightning/BakeMacaroon", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LightningServer).BakeMacaroon(ctx, req.(*BakeMacaroonRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _Lightning_serviceDesc = grpc.ServiceDesc{ ServiceName: "lnrpc.Lightning", HandlerType: (*LightningServer)(nil), @@ -12246,6 +15511,10 @@ var _Lightning_serviceDesc = grpc.ServiceDesc{ MethodName: "OpenChannelSync", Handler: _Lightning_OpenChannelSync_Handler, }, + { + MethodName: "FundingStateStep", + Handler: _Lightning_FundingStateStep_Handler, + }, { MethodName: "AbandonChannel", Handler: _Lightning_AbandonChannel_Handler, @@ -12286,6 +15555,10 @@ var _Lightning_serviceDesc = grpc.ServiceDesc{ MethodName: "DescribeGraph", Handler: _Lightning_DescribeGraph_Handler, }, + { + MethodName: "GetNodeMetrics", + Handler: _Lightning_GetNodeMetrics_Handler, + }, { MethodName: "GetChanInfo", Handler: _Lightning_GetChanInfo_Handler, @@ -12338,6 +15611,10 @@ var _Lightning_serviceDesc = grpc.ServiceDesc{ MethodName: "RestoreChannelBackups", Handler: _Lightning_RestoreChannelBackups_Handler, }, + { + MethodName: "BakeMacaroon", + Handler: _Lightning_BakeMacaroon_Handler, + }, }, Streams: []grpc.StreamDesc{ { @@ -12345,6 +15622,11 @@ var _Lightning_serviceDesc = grpc.ServiceDesc{ Handler: _Lightning_SubscribeTransactions_Handler, ServerStreams: true, }, + { + StreamName: "SubscribePeerEvents", + Handler: _Lightning_SubscribePeerEvents_Handler, + ServerStreams: true, + }, { StreamName: "SubscribeChannelEvents", Handler: _Lightning_SubscribeChannelEvents_Handler, diff --git a/lnrpc/rpc.pb.gw.go b/lnrpc/rpc.pb.gw.go index 270ba306e1..78aae3c181 100644 --- a/lnrpc/rpc.pb.gw.go +++ b/lnrpc/rpc.pb.gw.go @@ -9,13 +9,13 @@ It translates gRPC into RESTful JSON APIs. package lnrpc import ( + "context" "io" "net/http" "github.com/golang/protobuf/proto" "github.com/grpc-ecosystem/grpc-gateway/runtime" "github.com/grpc-ecosystem/grpc-gateway/utilities" - "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" @@ -49,7 +49,11 @@ func request_WalletUnlocker_InitWallet_0(ctx context.Context, marshaler runtime. var protoReq InitWalletRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -62,7 +66,11 @@ func request_WalletUnlocker_UnlockWallet_0(ctx context.Context, marshaler runtim var protoReq UnlockWalletRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -75,7 +83,11 @@ func request_WalletUnlocker_ChangePassword_0(ctx context.Context, marshaler runt var protoReq ChangePasswordRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -132,7 +144,11 @@ func request_Lightning_SendCoins_0(ctx context.Context, marshaler runtime.Marsha var protoReq SendCoinsRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -179,7 +195,11 @@ func request_Lightning_SignMessage_0(ctx context.Context, marshaler runtime.Mars var protoReq SignMessageRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -192,7 +212,11 @@ func request_Lightning_VerifyMessage_0(ctx context.Context, marshaler runtime.Ma var protoReq VerifyMessageRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -205,7 +229,11 @@ func request_Lightning_ConnectPeer_0(ctx context.Context, marshaler runtime.Mars var protoReq ConnectPeerRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -241,10 +269,18 @@ func request_Lightning_DisconnectPeer_0(ctx context.Context, marshaler runtime.M } +var ( + filter_Lightning_ListPeers_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + func request_Lightning_ListPeers_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq ListPeersRequest var metadata runtime.ServerMetadata + if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_Lightning_ListPeers_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + msg, err := client.ListPeers(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err @@ -306,7 +342,11 @@ func request_Lightning_OpenChannelSync_0(ctx context.Context, marshaler runtime. var protoReq OpenChannelRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -419,7 +459,11 @@ func request_Lightning_SendPaymentSync_0(ctx context.Context, marshaler runtime. var protoReq SendRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -432,7 +476,11 @@ func request_Lightning_SendToRouteSync_0(ctx context.Context, marshaler runtime. var protoReq SendToRouteRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -445,7 +493,11 @@ func request_Lightning_AddInvoice_0(ctx context.Context, marshaler runtime.Marsh var protoReq Invoice var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -601,6 +653,23 @@ func request_Lightning_DescribeGraph_0(ctx context.Context, marshaler runtime.Ma } +var ( + filter_Lightning_GetNodeMetrics_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Lightning_GetNodeMetrics_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq NodeMetricsRequest + var metadata runtime.ServerMetadata + + if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_Lightning_GetNodeMetrics_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.GetNodeMetrics(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + func request_Lightning_GetChanInfo_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { var protoReq ChanInfoRequest var metadata runtime.ServerMetadata @@ -731,7 +800,11 @@ func request_Lightning_UpdateChannelPolicy_0(ctx context.Context, marshaler runt var protoReq PolicyUpdateRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -744,7 +817,11 @@ func request_Lightning_ForwardingHistory_0(ctx context.Context, marshaler runtim var protoReq ForwardingHistoryRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -812,7 +889,11 @@ func request_Lightning_VerifyChanBackup_0(ctx context.Context, marshaler runtime var protoReq ChanBackupSnapshot var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -825,7 +906,11 @@ func request_Lightning_RestoreChannelBackups_0(ctx context.Context, marshaler ru var protoReq RestoreChanBackupRequest var metadata runtime.ServerMetadata - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } @@ -834,6 +919,23 @@ func request_Lightning_RestoreChannelBackups_0(ctx context.Context, marshaler ru } +func request_Lightning_BakeMacaroon_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq BakeMacaroonRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.BakeMacaroon(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + // RegisterWalletUnlockerHandlerFromEndpoint is same as RegisterWalletUnlockerHandler but // automatically dials to "endpoint" and closes the connection when "ctx" gets done. func RegisterWalletUnlockerHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { @@ -844,14 +946,14 @@ func RegisterWalletUnlockerHandlerFromEndpoint(ctx context.Context, mux *runtime defer func() { if err != nil { if cerr := conn.Close(); cerr != nil { - grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) } return } go func() { <-ctx.Done() if cerr := conn.Close(); cerr != nil { - grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) } }() }() @@ -862,20 +964,19 @@ func RegisterWalletUnlockerHandlerFromEndpoint(ctx context.Context, mux *runtime // RegisterWalletUnlockerHandler registers the http handlers for service WalletUnlocker to "mux". // The handlers forward requests to the grpc endpoint over "conn". func RegisterWalletUnlockerHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := NewWalletUnlockerClient(conn) + return RegisterWalletUnlockerHandlerClient(ctx, mux, NewWalletUnlockerClient(conn)) +} + +// RegisterWalletUnlockerHandlerClient registers the http handlers for service WalletUnlocker +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "WalletUnlockerClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "WalletUnlockerClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "WalletUnlockerClient" to call the correct interceptors. +func RegisterWalletUnlockerHandlerClient(ctx context.Context, mux *runtime.ServeMux, client WalletUnlockerClient) error { mux.Handle("GET", pattern_WalletUnlocker_GenSeed_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -894,17 +995,8 @@ func RegisterWalletUnlockerHandler(ctx context.Context, mux *runtime.ServeMux, c }) mux.Handle("POST", pattern_WalletUnlocker_InitWallet_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -923,17 +1015,8 @@ func RegisterWalletUnlockerHandler(ctx context.Context, mux *runtime.ServeMux, c }) mux.Handle("POST", pattern_WalletUnlocker_UnlockWallet_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -952,17 +1035,8 @@ func RegisterWalletUnlockerHandler(ctx context.Context, mux *runtime.ServeMux, c }) mux.Handle("POST", pattern_WalletUnlocker_ChangePassword_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -1013,14 +1087,14 @@ func RegisterLightningHandlerFromEndpoint(ctx context.Context, mux *runtime.Serv defer func() { if err != nil { if cerr := conn.Close(); cerr != nil { - grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) } return } go func() { <-ctx.Done() if cerr := conn.Close(); cerr != nil { - grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) } }() }() @@ -1031,20 +1105,19 @@ func RegisterLightningHandlerFromEndpoint(ctx context.Context, mux *runtime.Serv // RegisterLightningHandler registers the http handlers for service Lightning to "mux". // The handlers forward requests to the grpc endpoint over "conn". func RegisterLightningHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := NewLightningClient(conn) + return RegisterLightningHandlerClient(ctx, mux, NewLightningClient(conn)) +} + +// RegisterLightningHandlerClient registers the http handlers for service Lightning +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "LightningClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "LightningClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "LightningClient" to call the correct interceptors. +func RegisterLightningHandlerClient(ctx context.Context, mux *runtime.ServeMux, client LightningClient) error { mux.Handle("GET", pattern_Lightning_WalletBalance_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -1063,17 +1136,8 @@ func RegisterLightningHandler(ctx context.Context, mux *runtime.ServeMux, conn * }) mux.Handle("GET", pattern_Lightning_ChannelBalance_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -1092,17 +1156,8 @@ func RegisterLightningHandler(ctx context.Context, mux *runtime.ServeMux, conn * }) mux.Handle("GET", pattern_Lightning_GetTransactions_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -1121,17 +1176,8 @@ func RegisterLightningHandler(ctx context.Context, mux *runtime.ServeMux, conn * }) mux.Handle("GET", pattern_Lightning_EstimateFee_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -1150,17 +1196,8 @@ func RegisterLightningHandler(ctx context.Context, mux *runtime.ServeMux, conn * }) mux.Handle("POST", pattern_Lightning_SendCoins_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -1179,17 +1216,8 @@ func RegisterLightningHandler(ctx context.Context, mux *runtime.ServeMux, conn * }) mux.Handle("GET", pattern_Lightning_ListUnspent_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -1208,17 +1236,8 @@ func RegisterLightningHandler(ctx context.Context, mux *runtime.ServeMux, conn * }) mux.Handle("GET", pattern_Lightning_NewAddress_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -1237,17 +1256,8 @@ func RegisterLightningHandler(ctx context.Context, mux *runtime.ServeMux, conn * }) mux.Handle("POST", pattern_Lightning_SignMessage_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -1266,17 +1276,8 @@ func RegisterLightningHandler(ctx context.Context, mux *runtime.ServeMux, conn * }) mux.Handle("POST", pattern_Lightning_VerifyMessage_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -1295,17 +1296,8 @@ func RegisterLightningHandler(ctx context.Context, mux *runtime.ServeMux, conn * }) mux.Handle("POST", pattern_Lightning_ConnectPeer_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -1324,17 +1316,8 @@ func RegisterLightningHandler(ctx context.Context, mux *runtime.ServeMux, conn * }) mux.Handle("DELETE", pattern_Lightning_DisconnectPeer_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -1353,17 +1336,8 @@ func RegisterLightningHandler(ctx context.Context, mux *runtime.ServeMux, conn * }) mux.Handle("GET", pattern_Lightning_ListPeers_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -1382,17 +1356,8 @@ func RegisterLightningHandler(ctx context.Context, mux *runtime.ServeMux, conn * }) mux.Handle("GET", pattern_Lightning_GetInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -1411,17 +1376,8 @@ func RegisterLightningHandler(ctx context.Context, mux *runtime.ServeMux, conn * }) mux.Handle("GET", pattern_Lightning_PendingChannels_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -1440,17 +1396,8 @@ func RegisterLightningHandler(ctx context.Context, mux *runtime.ServeMux, conn * }) mux.Handle("GET", pattern_Lightning_ListChannels_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -1469,17 +1416,8 @@ func RegisterLightningHandler(ctx context.Context, mux *runtime.ServeMux, conn * }) mux.Handle("GET", pattern_Lightning_ClosedChannels_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -1498,17 +1436,8 @@ func RegisterLightningHandler(ctx context.Context, mux *runtime.ServeMux, conn * }) mux.Handle("POST", pattern_Lightning_OpenChannelSync_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -1527,17 +1456,8 @@ func RegisterLightningHandler(ctx context.Context, mux *runtime.ServeMux, conn * }) mux.Handle("DELETE", pattern_Lightning_CloseChannel_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -1556,17 +1476,8 @@ func RegisterLightningHandler(ctx context.Context, mux *runtime.ServeMux, conn * }) mux.Handle("DELETE", pattern_Lightning_AbandonChannel_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -1585,17 +1496,8 @@ func RegisterLightningHandler(ctx context.Context, mux *runtime.ServeMux, conn * }) mux.Handle("POST", pattern_Lightning_SendPaymentSync_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -1614,17 +1516,8 @@ func RegisterLightningHandler(ctx context.Context, mux *runtime.ServeMux, conn * }) mux.Handle("POST", pattern_Lightning_SendToRouteSync_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -1643,17 +1536,8 @@ func RegisterLightningHandler(ctx context.Context, mux *runtime.ServeMux, conn * }) mux.Handle("POST", pattern_Lightning_AddInvoice_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -1672,17 +1556,8 @@ func RegisterLightningHandler(ctx context.Context, mux *runtime.ServeMux, conn * }) mux.Handle("GET", pattern_Lightning_ListInvoices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -1701,17 +1576,8 @@ func RegisterLightningHandler(ctx context.Context, mux *runtime.ServeMux, conn * }) mux.Handle("GET", pattern_Lightning_LookupInvoice_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -1730,17 +1596,8 @@ func RegisterLightningHandler(ctx context.Context, mux *runtime.ServeMux, conn * }) mux.Handle("GET", pattern_Lightning_SubscribeInvoices_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -1759,17 +1616,8 @@ func RegisterLightningHandler(ctx context.Context, mux *runtime.ServeMux, conn * }) mux.Handle("GET", pattern_Lightning_DecodePayReq_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -1788,17 +1636,8 @@ func RegisterLightningHandler(ctx context.Context, mux *runtime.ServeMux, conn * }) mux.Handle("GET", pattern_Lightning_ListPayments_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -1817,17 +1656,8 @@ func RegisterLightningHandler(ctx context.Context, mux *runtime.ServeMux, conn * }) mux.Handle("DELETE", pattern_Lightning_DeleteAllPayments_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -1846,17 +1676,8 @@ func RegisterLightningHandler(ctx context.Context, mux *runtime.ServeMux, conn * }) mux.Handle("GET", pattern_Lightning_DescribeGraph_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -1874,18 +1695,29 @@ func RegisterLightningHandler(ctx context.Context, mux *runtime.ServeMux, conn * }) - mux.Handle("GET", pattern_Lightning_GetChanInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) + mux.Handle("GET", pattern_Lightning_GetNodeMetrics_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Lightning_GetNodeMetrics_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return } + + forward_Lightning_GetNodeMetrics_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Lightning_GetChanInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -1904,17 +1736,8 @@ func RegisterLightningHandler(ctx context.Context, mux *runtime.ServeMux, conn * }) mux.Handle("GET", pattern_Lightning_GetNodeInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -1933,17 +1756,8 @@ func RegisterLightningHandler(ctx context.Context, mux *runtime.ServeMux, conn * }) mux.Handle("GET", pattern_Lightning_QueryRoutes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -1962,17 +1776,8 @@ func RegisterLightningHandler(ctx context.Context, mux *runtime.ServeMux, conn * }) mux.Handle("GET", pattern_Lightning_GetNetworkInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -1991,17 +1796,8 @@ func RegisterLightningHandler(ctx context.Context, mux *runtime.ServeMux, conn * }) mux.Handle("GET", pattern_Lightning_FeeReport_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -2020,17 +1816,8 @@ func RegisterLightningHandler(ctx context.Context, mux *runtime.ServeMux, conn * }) mux.Handle("POST", pattern_Lightning_UpdateChannelPolicy_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -2049,17 +1836,8 @@ func RegisterLightningHandler(ctx context.Context, mux *runtime.ServeMux, conn * }) mux.Handle("POST", pattern_Lightning_ForwardingHistory_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -2078,17 +1856,8 @@ func RegisterLightningHandler(ctx context.Context, mux *runtime.ServeMux, conn * }) mux.Handle("GET", pattern_Lightning_ExportChannelBackup_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -2107,17 +1876,8 @@ func RegisterLightningHandler(ctx context.Context, mux *runtime.ServeMux, conn * }) mux.Handle("GET", pattern_Lightning_ExportAllChannelBackups_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -2136,17 +1896,8 @@ func RegisterLightningHandler(ctx context.Context, mux *runtime.ServeMux, conn * }) mux.Handle("POST", pattern_Lightning_VerifyChanBackup_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -2165,17 +1916,8 @@ func RegisterLightningHandler(ctx context.Context, mux *runtime.ServeMux, conn * }) mux.Handle("POST", pattern_Lightning_RestoreChannelBackups_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(req.Context()) defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) rctx, err := runtime.AnnotateContext(ctx, mux, req) if err != nil { @@ -2193,6 +1935,26 @@ func RegisterLightningHandler(ctx context.Context, mux *runtime.ServeMux, conn * }) + mux.Handle("POST", pattern_Lightning_BakeMacaroon_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Lightning_BakeMacaroon_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Lightning_BakeMacaroon_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + return nil } @@ -2255,6 +2017,8 @@ var ( pattern_Lightning_DescribeGraph_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "graph"}, "")) + pattern_Lightning_GetNodeMetrics_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "graph", "nodemetrics"}, "")) + pattern_Lightning_GetChanInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "graph", "edge", "chan_id"}, "")) pattern_Lightning_GetNodeInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "graph", "node", "pub_key"}, "")) @@ -2276,6 +2040,8 @@ var ( pattern_Lightning_VerifyChanBackup_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "channels", "backup", "verify"}, "")) pattern_Lightning_RestoreChannelBackups_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "channels", "backup", "restore"}, "")) + + pattern_Lightning_BakeMacaroon_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "macaroon"}, "")) ) var ( @@ -2337,6 +2103,8 @@ var ( forward_Lightning_DescribeGraph_0 = runtime.ForwardResponseMessage + forward_Lightning_GetNodeMetrics_0 = runtime.ForwardResponseMessage + forward_Lightning_GetChanInfo_0 = runtime.ForwardResponseMessage forward_Lightning_GetNodeInfo_0 = runtime.ForwardResponseMessage @@ -2358,4 +2126,6 @@ var ( forward_Lightning_VerifyChanBackup_0 = runtime.ForwardResponseMessage forward_Lightning_RestoreChannelBackups_0 = runtime.ForwardResponseMessage + + forward_Lightning_BakeMacaroon_0 = runtime.ForwardResponseMessage ) diff --git a/lnrpc/rpc.proto b/lnrpc/rpc.proto index 0a985863ba..7f58e59e99 100644 --- a/lnrpc/rpc.proto +++ b/lnrpc/rpc.proto @@ -10,19 +10,19 @@ option go_package = "github.com/lightningnetwork/lnd/lnrpc"; * Comments in this file will be directly parsed into the API * Documentation as descriptions of the associated method, message, or field. * These descriptions should go right above the definition of the object, and - * can be in either block or /// comment format. - * + * can be in either block or /// comment format. + * * One edge case exists where a // comment followed by a /// comment in the * next line will cause the description not to show up in the documentation. In * that instance, simply separate the two comments with a blank line. - * + * * An RPC method can be matched to an lncli command by placing a line in the * beginning of the description in exactly the following format: * lncli: `methodname` - * + * * Failure to specify the exact name of the command will cause documentation * generation to fail. - * + * * More information on how exactly the gRPC documentation is generated from * this proto file can be found here: * https://github.com/lightninglabs/lightning-api @@ -41,13 +41,13 @@ service WalletUnlocker { method should be used to commit the newly generated seed, and create the wallet. */ - rpc GenSeed(GenSeedRequest) returns (GenSeedResponse) { + rpc GenSeed (GenSeedRequest) returns (GenSeedResponse) { option (google.api.http) = { get: "/v1/genseed" }; } - /** + /** InitWallet is used when lnd is starting up for the first time to fully initialize the daemon and its internal wallet. At the very least a wallet password must be provided. This will be used to encrypt sensitive material @@ -61,7 +61,7 @@ service WalletUnlocker { seed, then present it to the user. Once it has been verified by the user, the seed can be fed into this RPC in order to commit the new wallet. */ - rpc InitWallet(InitWalletRequest) returns (InitWalletResponse) { + rpc InitWallet (InitWalletRequest) returns (InitWalletResponse) { option (google.api.http) = { post: "/v1/initwallet" body: "*" @@ -72,7 +72,7 @@ service WalletUnlocker { UnlockWallet is used at startup of lnd to provide a password to unlock the wallet database. */ - rpc UnlockWallet(UnlockWalletRequest) returns (UnlockWalletResponse) { + rpc UnlockWallet (UnlockWalletRequest) returns (UnlockWalletResponse) { option (google.api.http) = { post: "/v1/unlockwallet" body: "*" @@ -83,7 +83,8 @@ service WalletUnlocker { ChangePassword changes the password of the encrypted wallet. This will automatically unlock the wallet database if successful. */ - rpc ChangePassword (ChangePasswordRequest) returns (ChangePasswordResponse) { + rpc ChangePassword (ChangePasswordRequest) + returns (ChangePasswordResponse) { option (google.api.http) = { post: "/v1/changepassword" body: "*" @@ -94,13 +95,15 @@ service WalletUnlocker { message GenSeedRequest { /** aezeed_passphrase is an optional user provided passphrase that will be used - to encrypt the generated aezeed cipher seed. + to encrypt the generated aezeed cipher seed. When using REST, this field + must be encoded as base64. */ bytes aezeed_passphrase = 1; /** seed_entropy is an optional 16-bytes generated via CSPRNG. If not specified, then a fresh set of randomness will be used to create the seed. + When using REST, this field must be encoded as base64. */ bytes seed_entropy = 2; } @@ -125,7 +128,8 @@ message InitWalletRequest { /** wallet_password is the passphrase that should be used to encrypt the wallet. This MUST be at least 8 chars in length. After creation, this - password is required to unlock the daemon. + password is required to unlock the daemon. When using REST, this field + must be encoded as base64. */ bytes wallet_password = 1; @@ -138,7 +142,8 @@ message InitWalletRequest { /** aezeed_passphrase is an optional user provided passphrase that will be used - to encrypt the generated aezeed cipher seed. + to encrypt the generated aezeed cipher seed. When using REST, this field + must be encoded as base64. */ bytes aezeed_passphrase = 3; @@ -168,7 +173,7 @@ message UnlockWalletRequest { /** wallet_password should be the current valid passphrase for the daemon. This will be required to decrypt on-disk material that the daemon requires to - function properly. + function properly. When using REST, this field must be encoded as base64. */ bytes wallet_password = 1; @@ -191,22 +196,24 @@ message UnlockWalletRequest { */ ChanBackupSnapshot channel_backups = 3; } -message UnlockWalletResponse {} +message UnlockWalletResponse { +} message ChangePasswordRequest { /** current_password should be the current valid passphrase used to unlock the - daemon. + daemon. When using REST, this field must be encoded as base64. */ bytes current_password = 1; /** new_password should be the new passphrase that will be needed to unlock the - daemon. + daemon. When using REST, this field must be encoded as base64. */ bytes new_password = 2; } -message ChangePasswordResponse {} +message ChangePasswordResponse { +} service Lightning { /** lncli: `walletbalance` @@ -224,7 +231,8 @@ service Lightning { ChannelBalance returns the total funds available across all open channels in satoshis. */ - rpc ChannelBalance (ChannelBalanceRequest) returns (ChannelBalanceResponse) { + rpc ChannelBalance (ChannelBalanceRequest) + returns (ChannelBalanceResponse) { option (google.api.http) = { get: "/v1/balance/channels" }; @@ -266,7 +274,7 @@ service Lightning { /** lncli: `listunspent` ListUnspent returns a list of all utxos spendable by the wallet with a - number of confirmations between the specified minimum and maximum. + number of confirmations between the specified minimum and maximum. */ rpc ListUnspent (ListUnspentRequest) returns (ListUnspentResponse) { option (google.api.http) = { @@ -279,7 +287,8 @@ service Lightning { the client in which any newly discovered transactions relevant to the wallet are sent over. */ - rpc SubscribeTransactions (GetTransactionsRequest) returns (stream Transaction); + rpc SubscribeTransactions (GetTransactionsRequest) + returns (stream Transaction); /** lncli: `sendmany` SendMany handles a request for a transaction that creates multiple specified @@ -340,7 +349,8 @@ service Lightning { given pubKey. In the case that we currently have a pending or active channel with the target peer, then this action will be not be allowed. */ - rpc DisconnectPeer (DisconnectPeerRequest) returns (DisconnectPeerResponse) { + rpc DisconnectPeer (DisconnectPeerRequest) + returns (DisconnectPeerResponse) { option (google.api.http) = { delete: "/v1/peers/{pub_key}" }; @@ -355,6 +365,13 @@ service Lightning { }; } + /** + SubscribePeerEvents creates a uni-directional stream from the server to + the client in which any events relevant to the state of peers are sent + over. Events include peers going online and offline. + */ + rpc SubscribePeerEvents (PeerEventSubscription) returns (stream PeerEvent); + /** lncli: `getinfo` GetInfo returns general information concerning the lightning node including it's identity pubkey, alias, the chains it is connected to, and information @@ -373,9 +390,10 @@ service Lightning { workflow and is waiting for confirmations for the funding txn, or is in the process of closure, either initiated cooperatively or non-cooperatively. */ - rpc PendingChannels (PendingChannelsRequest) returns (PendingChannelsResponse) { + rpc PendingChannels (PendingChannelsRequest) + returns (PendingChannelsResponse) { option (google.api.http) = { - get: "/v1/channels/pending" + get: "/v1/channels/pending" }; } @@ -395,19 +413,20 @@ service Lightning { sent over. Events include new active channels, inactive channels, and closed channels. */ - rpc SubscribeChannelEvents (ChannelEventSubscription) returns (stream ChannelEventUpdate); + rpc SubscribeChannelEvents (ChannelEventSubscription) + returns (stream ChannelEventUpdate); /** lncli: `closedchannels` ClosedChannels returns a description of all the closed channels that this node was a participant in. */ - rpc ClosedChannels (ClosedChannelsRequest) returns (ClosedChannelsResponse) { + rpc ClosedChannels (ClosedChannelsRequest) + returns (ClosedChannelsResponse) { option (google.api.http) = { get: "/v1/channels/closed" }; } - /** OpenChannelSync is a synchronous version of the OpenChannel RPC call. This call is meant to be consumed by clients to the REST proxy. As with all @@ -426,10 +445,25 @@ service Lightning { request to a remote peer. Users are able to specify a target number of blocks that the funding transaction should be confirmed in, or a manual fee rate to us for the funding transaction. If neither are specified, then a - lax block confirmation target is used. + lax block confirmation target is used. Each OpenStatusUpdate will return + the pending channel ID of the in-progress channel. Depending on the + arguments specified in the OpenChannelRequest, this pending channel ID can + then be used to manually progress the channel funding flow. */ rpc OpenChannel (OpenChannelRequest) returns (stream OpenStatusUpdate); + /** + FundingStateStep is an advanced funding related call that allows the caller + to either execute some preparatory steps for a funding workflow, or + manually progress a funding workflow. The primary way a funding flow is + identified is via its pending channel ID. As an example, this method can be + used to specify that we're expecting a funding flow for a particular + pending channel ID, for which we need to use specific parameters. + Alternatively, this can be used to interactively drive PSBT signing for + funding for partially complete funding transactions. + */ + rpc FundingStateStep (FundingTransitionMsg) returns (FundingStateStepResp); + /** ChannelAcceptor dispatches a bi-directional streaming RPC in which OpenChannel requests are sent to the client and the client responds with @@ -437,7 +471,8 @@ service Lightning { node operators to specify their own criteria for accepting inbound channels through a single persistent connection. */ - rpc ChannelAcceptor (stream ChannelAcceptResponse) returns (stream ChannelAcceptRequest); + rpc ChannelAcceptor (stream ChannelAcceptResponse) + returns (stream ChannelAcceptRequest); /** lncli: `closechannel` CloseChannel attempts to close an active channel identified by its channel @@ -460,20 +495,23 @@ service Lightning { channels due to bugs fixed in newer versions of lnd. Only available when in debug builds of lnd. */ - rpc AbandonChannel (AbandonChannelRequest) returns (AbandonChannelResponse) { + rpc AbandonChannel (AbandonChannelRequest) + returns (AbandonChannelResponse) { option (google.api.http) = { delete: "/v1/channels/abandon/{channel_point.funding_txid_str}/{channel_point.output_index}" }; } - /** lncli: `sendpayment` - SendPayment dispatches a bi-directional streaming RPC for sending payments - through the Lightning Network. A single RPC invocation creates a persistent - bi-directional stream allowing clients to rapidly send payments through the - Lightning Network with a single persistent connection. - */ - rpc SendPayment (stream SendRequest) returns (stream SendResponse); + Deprecated, use routerrpc.SendPayment. SendPayment dispatches a + bi-directional streaming RPC for sending payments through the Lightning + Network. A single RPC invocation creates a persistent bi-directional + stream allowing clients to rapidly send payments through the Lightning + Network with a single persistent connection. + */ + rpc SendPayment (stream SendRequest) returns (stream SendResponse) { + option deprecated = true; + } /** SendPaymentSync is the synchronous non-streaming version of SendPayment. @@ -494,7 +532,7 @@ service Lightning { allows users to specify a full route manually. This can be used for things like rebalancing, and atomic swaps. */ - rpc SendToRoute(stream SendToRouteRequest) returns (stream SendResponse); + rpc SendToRoute (stream SendToRouteRequest) returns (stream SendResponse); /** SendToRouteSync is a synchronous version of SendToRoute. It Will block @@ -550,9 +588,9 @@ service Lightning { notifying the client of newly added/settled invoices. The caller can optionally specify the add_index and/or the settle_index. If the add_index is specified, then we'll first start by sending add invoice events for all - invoices with an add_index greater than the specified value. If the + invoices with an add_index greater than the specified value. If the settle_index is specified, the next, we'll send out all settle events for - invoices with a settle_index greater than the specified value. One or both + invoices with a settle_index greater than the specified value. One or both of these fields can be set. If no fields are set, then we'll only send out the latest add/settle events. */ @@ -585,7 +623,8 @@ service Lightning { /** DeleteAllPayments deletes all outgoing payments from DB. */ - rpc DeleteAllPayments (DeleteAllPaymentsRequest) returns (DeleteAllPaymentsResponse) { + rpc DeleteAllPayments (DeleteAllPaymentsRequest) + returns (DeleteAllPaymentsResponse) { option (google.api.http) = { delete: "/v1/payments" }; @@ -595,7 +634,7 @@ service Lightning { DescribeGraph returns a description of the latest graph state from the point of view of the node. The graph information is partitioned into two components: all the nodes/vertexes, and all the edges that connect the - vertexes themselves. As this is a directed graph, the edges also contain + vertexes themselves. As this is a directed graph, the edges also contain the node directional specific routing policy which includes: the time lock delta, fee information, etc. */ @@ -605,6 +644,16 @@ service Lightning { }; } + /** lncli: `getnodemetrics` + GetNodeMetrics returns node metrics calculated from the graph. Currently + the only supported metric is betweenness centrality of individual nodes. + */ + rpc GetNodeMetrics (NodeMetricsRequest) returns (NodeMetricsResponse) { + option (google.api.http) = { + get: "/v1/graph/nodemetrics" + }; + } + /** lncli: `getchaninfo` GetChanInfo returns the latest authenticated network announcement for the given channel identified by its channel ID: an 8-byte integer which @@ -634,7 +683,7 @@ service Lightning { send an HTLC, also including the necessary information that should be present within the Sphinx packet encapsulated within the HTLC. */ - rpc QueryRoutes(QueryRoutesRequest) returns (QueryRoutesResponse) { + rpc QueryRoutes (QueryRoutesRequest) returns (QueryRoutesResponse) { option (google.api.http) = { get: "/v1/graph/routes/{pub_key}/{amt}" }; @@ -654,7 +703,7 @@ service Lightning { StopDaemon will send a shutdown request to the interrupt handler, triggering a graceful shutdown of the daemon. */ - rpc StopDaemon(StopRequest) returns (StopResponse); + rpc StopDaemon (StopRequest) returns (StopResponse); /** SubscribeChannelGraph launches a streaming RPC that allows the caller to @@ -664,7 +713,8 @@ service Lightning { channels being advertised, updates in the routing policy for a directional channel edge, and when channels are closed on-chain. */ - rpc SubscribeChannelGraph(GraphTopologySubscription) returns (stream GraphTopologyUpdate); + rpc SubscribeChannelGraph (GraphTopologySubscription) + returns (stream GraphTopologyUpdate); /** lncli: `debuglevel` DebugLevel allows a caller to programmatically set the logging verbosity of @@ -678,7 +728,7 @@ service Lightning { FeeReport allows the caller to obtain a report detailing the current fee schedule enforced by the node globally for each channel. */ - rpc FeeReport(FeeReportRequest) returns (FeeReportResponse) { + rpc FeeReport (FeeReportRequest) returns (FeeReportResponse) { option (google.api.http) = { get: "/v1/fees" }; @@ -688,7 +738,8 @@ service Lightning { UpdateChannelPolicy allows the caller to update the fee schedule and channel policies for all channels globally, or a particular channel. */ - rpc UpdateChannelPolicy(PolicyUpdateRequest) returns (PolicyUpdateResponse) { + rpc UpdateChannelPolicy (PolicyUpdateRequest) + returns (PolicyUpdateResponse) { option (google.api.http) = { post: "/v1/chanpolicy" body: "*" @@ -703,11 +754,12 @@ service Lightning { A list of forwarding events are returned. The size of each forwarding event is 40 bytes, and the max message size able to be returned in gRPC is 4 MiB. - As a result each message can only contain 50k entries. Each response has + As a result each message can only contain 50k entries. Each response has the index offset of the last entry. The index offset can be provided to the request to allow the caller to skip a series of records. */ - rpc ForwardingHistory(ForwardingHistoryRequest) returns (ForwardingHistoryResponse) { + rpc ForwardingHistory (ForwardingHistoryRequest) + returns (ForwardingHistoryResponse) { option (google.api.http) = { post: "/v1/switch" body: "*" @@ -722,7 +774,8 @@ service Lightning { method once lnd is running, or via the InitWallet and UnlockWallet methods from the WalletUnlocker service. */ - rpc ExportChannelBackup(ExportChannelBackupRequest) returns (ChannelBackup) { + rpc ExportChannelBackup (ExportChannelBackupRequest) + returns (ChannelBackup) { option (google.api.http) = { get: "/v1/channels/backup/{chan_point.funding_txid_str}/{chan_point.output_index}" }; @@ -735,7 +788,8 @@ service Lightning { as well, which contains a single encrypted blob containing the backups of each channel. */ - rpc ExportAllChannelBackups(ChanBackupExportRequest) returns (ChanBackupSnapshot) { + rpc ExportAllChannelBackups (ChanBackupExportRequest) + returns (ChanBackupSnapshot) { option (google.api.http) = { get: "/v1/channels/backup" }; @@ -746,7 +800,8 @@ service Lightning { snapshot. This method will accept either a packed Single or a packed Multi. Specifying both will result in an error. */ - rpc VerifyChanBackup(ChanBackupSnapshot) returns (VerifyChanBackupResponse) { + rpc VerifyChanBackup (ChanBackupSnapshot) + returns (VerifyChanBackupResponse) { option (google.api.http) = { post: "/v1/channels/backup/verify" body: "*" @@ -759,7 +814,8 @@ service Lightning { remaining within the channel. If we are able to unpack the backup, then the new channel will be shown under listchannels, as well as pending channels. */ - rpc RestoreChannelBackups(RestoreChanBackupRequest) returns (RestoreBackupResponse) { + rpc RestoreChannelBackups (RestoreChanBackupRequest) + returns (RestoreBackupResponse) { option (google.api.http) = { post: "/v1/channels/backup/restore" body: "*" @@ -775,93 +831,140 @@ service Lightning { ups, but the updated set of encrypted multi-chan backups with the closed channel(s) removed. */ - rpc SubscribeChannelBackups(ChannelBackupSubscription) returns (stream ChanBackupSnapshot) { + rpc SubscribeChannelBackups (ChannelBackupSubscription) + returns (stream ChanBackupSnapshot) { + }; + + /** lncli: `bakemacaroon` + BakeMacaroon allows the creation of a new macaroon with custom read and + write permissions. No first-party caveats are added since this can be done + offline. + */ + rpc BakeMacaroon (BakeMacaroonRequest) returns (BakeMacaroonResponse) { + option (google.api.http) = { + post: "/v1/macaroon" + body: "*" + }; }; } message Utxo { /// The type of address - AddressType type = 1 [json_name = "address_type"]; + AddressType address_type = 1; /// The address - string address = 2 [json_name = "address"]; + string address = 2; /// The value of the unspent coin in satoshis - int64 amount_sat = 3 [json_name = "amount_sat"]; + int64 amount_sat = 3; /// The pkscript in hex - string pk_script = 4 [json_name = "pk_script"]; + string pk_script = 4; /// The outpoint in format txid:n - OutPoint outpoint = 5 [json_name = "outpoint"]; + OutPoint outpoint = 5; /// The number of confirmations for the Utxo - int64 confirmations = 6 [json_name = "confirmations"]; + int64 confirmations = 6; } message Transaction { /// The transaction hash - string tx_hash = 1 [ json_name = "tx_hash" ]; + string tx_hash = 1; /// The transaction amount, denominated in satoshis - int64 amount = 2 [ json_name = "amount" ]; + int64 amount = 2; /// The number of confirmations - int32 num_confirmations = 3 [ json_name = "num_confirmations" ]; + int32 num_confirmations = 3; /// The hash of the block this transaction was included in - string block_hash = 4 [ json_name = "block_hash" ]; + string block_hash = 4; /// The height of the block this transaction was included in - int32 block_height = 5 [ json_name = "block_height" ]; + int32 block_height = 5; - /// Timestamp of this transaction - int64 time_stamp = 6 [ json_name = "time_stamp" ]; + /// Timestamp of this transaction + int64 time_stamp = 6; /// Fees paid for this transaction - int64 total_fees = 7 [ json_name = "total_fees" ]; + int64 total_fees = 7; /// Addresses that received funds for this transaction - repeated string dest_addresses = 8 [ json_name = "dest_addresses" ]; + repeated string dest_addresses = 8; /// The raw transaction hex. - string raw_tx_hex = 9 [ json_name = "raw_tx_hex" ]; + string raw_tx_hex = 9; } message GetTransactionsRequest { } message TransactionDetails { /// The list of transactions relevant to the wallet. - repeated Transaction transactions = 1 [json_name = "transactions"]; + repeated Transaction transactions = 1; } message FeeLimit { oneof limit { - /// The fee limit expressed as a fixed amount of satoshis. + /** + The fee limit expressed as a fixed amount of satoshis. + + The fields fixed and fixed_msat are mutually exclusive. + */ int64 fixed = 1; + /** + The fee limit expressed as a fixed amount of millisatoshis. + + The fields fixed and fixed_msat are mutually exclusive. + */ + int64 fixed_msat = 3; + /// The fee limit expressed as a percentage of the payment amount. int64 percent = 2; } } message SendRequest { - /// The identity pubkey of the payment recipient + /** + The identity pubkey of the payment recipient. When using REST, this field + must be encoded as base64. + */ bytes dest = 1; - /// The hex-encoded identity pubkey of the payment recipient - string dest_string = 2; + /** + The hex-encoded identity pubkey of the payment recipient. Deprecated now + that the REST gateway supports base64 encoding of bytes fields. + */ + string dest_string = 2 [deprecated = true]; + + /** + The amount to send expressed in satoshis. - /// Number of satoshis to send. + The fields amt and amt_msat are mutually exclusive. + */ int64 amt = 3; - /// The hash to use within the payment's HTLC + /** + The amount to send expressed in millisatoshis. + + The fields amt and amt_msat are mutually exclusive. + */ + int64 amt_msat = 12; + + /** + The hash to use within the payment's HTLC. When using REST, this field + must be encoded as base64. + */ bytes payment_hash = 4; - /// The hex-encoded hash to use within the payment's HTLC - string payment_hash_string = 5; + /** + The hex-encoded hash to use within the payment's HTLC. Deprecated now + that the REST gateway supports base64 encoding of bytes fields. + */ + string payment_hash_string = 5 [deprecated = true]; /** - A bare-bones invoice for a payment within the Lightning Network. With the + A bare-bones invoice for a payment within the Lightning Network. With the details of the invoice, the sender has all the data necessary to send a payment to the recipient. */ @@ -885,36 +988,61 @@ message SendRequest { The channel id of the channel that must be taken to the first hop. If zero, any channel may be used. */ - uint64 outgoing_chan_id = 9; + uint64 outgoing_chan_id = 9 [jstype = JS_STRING]; - /** + /** + The pubkey of the last hop of the route. If empty, any hop may be used. + */ + bytes last_hop_pubkey = 13; + + /** An optional maximum total time lock for the route. This should not exceed lnd's `--max-cltv-expiry` setting. If zero, then the value of `--max-cltv-expiry` is enforced. */ uint32 cltv_limit = 10; - /** + /** An optional field that can be used to pass an arbitrary set of TLV records to a peer which understands the new records. This can be used to pass - application specific data during the payment attempt. + application specific data during the payment attempt. Record types are + required to be in the custom range >= 65536. When using REST, the values + must be encoded as base64. + */ + map dest_custom_records = 11; + + /// If set, circular payments to self are permitted. + bool allow_self_payment = 14; + + /** + Features assumed to be supported by the final node. All transitive feature + dependencies must also be set properly. For a given feature bit pair, either + optional or remote may be set, but not both. If this field is nil or empty, + the router will try to load destination features from the graph as a + fallback. */ - map dest_tlv = 11; + repeated FeatureBit dest_features = 15; } message SendResponse { - string payment_error = 1 [json_name = "payment_error"]; - bytes payment_preimage = 2 [json_name = "payment_preimage"]; - Route payment_route = 3 [json_name = "payment_route"]; - bytes payment_hash = 4 [json_name = "payment_hash"]; + string payment_error = 1; + bytes payment_preimage = 2; + Route payment_route = 3; + bytes payment_hash = 4; } message SendToRouteRequest { - /// The payment hash to use for the HTLC. + /** + The payment hash to use for the HTLC. When using REST, this field must be + encoded as base64. + */ bytes payment_hash = 1; - /// An optional hex-encoded payment hash to be used for the HTLC. - string payment_hash_string = 2; + /** + An optional hex-encoded payment hash to be used for the HTLC. Deprecated now + that the REST gateway supports base64 encoding of bytes fields. + */ + string payment_hash_string = 2 [deprecated = true]; reserved 3; @@ -932,7 +1060,8 @@ message ChannelAcceptRequest { /// The pending channel id. bytes pending_chan_id = 3; - /// The funding amount in satoshis that initiator wishes to use in the channel. + /// The funding amount in satoshis that initiator wishes to use in the + /// channel. uint64 funding_amt = 4; /// The push amount of the proposed channel in millisatoshis. @@ -941,28 +1070,32 @@ message ChannelAcceptRequest { /// The dust limit of the initiator's commitment tx. uint64 dust_limit = 6; - /// The maximum amount of coins in millisatoshis that can be pending in this channel. + /// The maximum amount of coins in millisatoshis that can be pending in this + /// channel. uint64 max_value_in_flight = 7; - /// The minimum amount of satoshis the initiator requires us to have at all times. + /// The minimum amount of satoshis the initiator requires us to have at all + /// times. uint64 channel_reserve = 8; /// The smallest HTLC in millisatoshis that the initiator will accept. uint64 min_htlc = 9; - /// The initial fee rate that the initiator suggests for both commitment transactions. + /// The initial fee rate that the initiator suggests for both commitment + /// transactions. uint64 fee_per_kw = 10; /** - The number of blocks to use for the relative time lock in the pay-to-self output - of both commitment transactions. + The number of blocks to use for the relative time lock in the pay-to-self + output of both commitment transactions. */ uint32 csv_delay = 11; /// The total number of incoming HTLC's that the initiator will accept. uint32 max_accepted_htlcs = 12; - /// A bit-field which the initiator uses to specify proposed channel behavior. + /// A bit-field which the initiator uses to specify proposed channel + /// behavior. uint32 channel_flags = 13; } @@ -976,78 +1109,90 @@ message ChannelAcceptResponse { message ChannelPoint { oneof funding_txid { - /// Txid of the funding transaction - bytes funding_txid_bytes = 1 [json_name = "funding_txid_bytes"]; + /** + Txid of the funding transaction. When using REST, this field must be + encoded as base64. + */ + bytes funding_txid_bytes = 1; - /// Hex-encoded string representing the funding transaction - string funding_txid_str = 2 [json_name = "funding_txid_str"]; + /** + Hex-encoded string representing the byte-reversed hash of the funding + transaction. + */ + string funding_txid_str = 2; } /// The index of the output of the funding transaction - uint32 output_index = 3 [json_name = "output_index"]; + uint32 output_index = 3; } message OutPoint { /// Raw bytes representing the transaction id. - bytes txid_bytes = 1 [json_name = "txid_bytes"]; + bytes txid_bytes = 1; /// Reversed, hex-encoded string representing the transaction id. - string txid_str = 2 [json_name = "txid_str"]; + string txid_str = 2; /// The index of the output on the transaction. - uint32 output_index = 3 [json_name = "output_index"]; + uint32 output_index = 3; } message LightningAddress { /// The identity pubkey of the Lightning node - string pubkey = 1 [json_name = "pubkey"]; + string pubkey = 1; - /// The network location of the lightning node, e.g. `69.69.69.69:1337` or `localhost:10011` - string host = 2 [json_name = "host"]; + /// The network location of the lightning node, e.g. `69.69.69.69:1337` or + /// `localhost:10011` + string host = 2; } message EstimateFeeRequest { /// The map from addresses to amounts for the transaction. map AddrToAmount = 1; - /// The target number of blocks that this transaction should be confirmed by. + /// The target number of blocks that this transaction should be confirmed + /// by. int32 target_conf = 2; } message EstimateFeeResponse { /// The total fee in satoshis. - int64 fee_sat = 1 [json_name = "fee_sat"]; + int64 fee_sat = 1; /// The fee rate in satoshi/byte. - int64 feerate_sat_per_byte = 2 [json_name = "feerate_sat_per_byte"]; + int64 feerate_sat_per_byte = 2; } message SendManyRequest { /// The map from addresses to amounts map AddrToAmount = 1; - /// The target number of blocks that this transaction should be confirmed by. + /// The target number of blocks that this transaction should be confirmed + /// by. int32 target_conf = 3; - /// A manual fee rate set in sat/byte that should be used when crafting the transaction. + /// A manual fee rate set in sat/byte that should be used when crafting the + /// transaction. int64 sat_per_byte = 5; } message SendManyResponse { /// The id of the transaction - string txid = 1 [json_name = "txid"]; + string txid = 1; } message SendCoinsRequest { - /// The address to send coins to + /// The address to send coins to string addr = 1; /// The amount in satoshis to send int64 amount = 2; - /// The target number of blocks that this transaction should be confirmed by. + /// The target number of blocks that this transaction should be confirmed + /// by. int32 target_conf = 3; - /// A manual fee rate set in sat/byte that should be used when crafting the transaction. + /// A manual fee rate set in sat/byte that should be used when crafting the + /// transaction. int64 sat_per_byte = 5; /** @@ -1055,11 +1200,11 @@ message SendCoinsRequest { send all the coins under control of the internal wallet to the specified address. */ - bool send_all = 6; + bool send_all = 6; } message SendCoinsResponse { /// The transaction ID of the transaction - string txid = 1 [json_name = "txid"]; + string txid = 1; } message ListUnspentRequest { @@ -1071,20 +1216,20 @@ message ListUnspentRequest { } message ListUnspentResponse { /// A list of utxos - repeated Utxo utxos = 1 [json_name = "utxos"]; + repeated Utxo utxos = 1; } -/** +/** `AddressType` has to be one of: - `p2wkh`: Pay to witness key hash (`WITNESS_PUBKEY_HASH` = 0) - `np2wkh`: Pay to nested witness key hash (`NESTED_PUBKEY_HASH` = 1) */ enum AddressType { - WITNESS_PUBKEY_HASH = 0; - NESTED_PUBKEY_HASH = 1; - UNUSED_WITNESS_PUBKEY_HASH = 2; - UNUSED_NESTED_PUBKEY_HASH = 3; + WITNESS_PUBKEY_HASH = 0; + NESTED_PUBKEY_HASH = 1; + UNUSED_WITNESS_PUBKEY_HASH = 2; + UNUSED_NESTED_PUBKEY_HASH = 3; } message NewAddressRequest { @@ -1093,31 +1238,37 @@ message NewAddressRequest { } message NewAddressResponse { /// The newly generated wallet address - string address = 1 [json_name = "address"]; + string address = 1; } message SignMessageRequest { - /// The message to be signed - bytes msg = 1 [ json_name = "msg" ]; + /** + The message to be signed. When using REST, this field must be encoded as + base64. + */ + bytes msg = 1; } message SignMessageResponse { /// The signature for the given message - string signature = 1 [ json_name = "signature" ]; + string signature = 1; } message VerifyMessageRequest { - /// The message over which the signature is to be verified - bytes msg = 1 [ json_name = "msg" ]; + /** + The message over which the signature is to be verified. When using REST, + this field must be encoded as base64. + */ + bytes msg = 1; /// The signature to be verified over the given message - string signature = 2 [ json_name = "signature" ]; + string signature = 2; } message VerifyMessageResponse { /// Whether the signature was valid over the given message - bool valid = 1 [ json_name = "valid" ]; + bool valid = 1; /// The pubkey recovered from the signature - string pubkey = 2 [ json_name = "pubkey" ]; + string pubkey = 2; } message ConnectPeerRequest { @@ -1125,7 +1276,7 @@ message ConnectPeerRequest { LightningAddress addr = 1; /** If set, the daemon will attempt to persistently connect to the target - * peer. Otherwise, the call will be synchronous. */ + * peer. Otherwise, the call will be synchronous. */ bool perm = 2; } message ConnectPeerResponse { @@ -1133,47 +1284,75 @@ message ConnectPeerResponse { message DisconnectPeerRequest { /// The pubkey of the node to disconnect from - string pub_key = 1 [json_name = "pub_key"]; + string pub_key = 1; } message DisconnectPeerResponse { } message HTLC { - bool incoming = 1 [json_name = "incoming"]; - int64 amount = 2 [json_name = "amount"]; - bytes hash_lock = 3 [json_name = "hash_lock"]; - uint32 expiration_height = 4 [json_name = "expiration_height"]; + bool incoming = 1; + int64 amount = 2; + bytes hash_lock = 3; + uint32 expiration_height = 4; +} + +enum CommitmentType { + /** + A channel using the legacy commitment format having tweaked to_remote + keys. + */ + LEGACY = 0; + + /** + A channel that uses the modern commitment format where the key in the + output of the remote party does not change each state. This makes back + up and recovery easier as when the channel is closed, the funds go + directly to that key. + */ + STATIC_REMOTE_KEY = 1; + + /** + A channel that uses a commitment format that has anchor outputs on the + commitments, allowing fee bumping after a force close transaction has + been broadcast. + */ + ANCHORS = 2; + + /** + Returned when the commitment type isn't known or unavailable. + */ + UNKNOWN_COMMITMENT_TYPE = 999; } message Channel { /// Whether this channel is active or not - bool active = 1 [json_name = "active"]; + bool active = 1; /// The identity pubkey of the remote node - string remote_pubkey = 2 [json_name = "remote_pubkey"]; + string remote_pubkey = 2; /** The outpoint (txid:index) of the funding transaction. With this value, Bob will be able to generate a signature for Alice's version of the commitment transaction. */ - string channel_point = 3 [json_name = "channel_point"]; + string channel_point = 3; /** The unique channel ID for the channel. The first 3 bytes are the block height, the next 3 the index within the block, and the last 2 bytes are the output index for the channel. */ - uint64 chan_id = 4 [json_name = "chan_id"]; + uint64 chan_id = 4 [jstype = JS_STRING]; /// The total amount of funds held in this channel - int64 capacity = 5 [json_name = "capacity"]; + int64 capacity = 5; /// This node's current balance in this channel - int64 local_balance = 6 [json_name = "local_balance"]; + int64 local_balance = 6; /// The counterparty's current balance in this channel - int64 remote_balance = 7 [json_name = "remote_balance"]; + int64 remote_balance = 7; /** The amount calculated to be paid in fees for the current set of commitment @@ -1181,112 +1360,163 @@ message Channel { allow the fee amount to be removed and recalculated with each channel state update, including updates that happen after a system restart. */ - int64 commit_fee = 8 [json_name = "commit_fee"]; + int64 commit_fee = 8; /// The weight of the commitment transaction - int64 commit_weight = 9 [json_name = "commit_weight"]; + int64 commit_weight = 9; /** The required number of satoshis per kilo-weight that the requester will pay at all times, for both the funding transaction and commitment transaction. This value can later be updated once the channel is open. */ - int64 fee_per_kw = 10 [json_name = "fee_per_kw"]; + int64 fee_per_kw = 10; /// The unsettled balance in this channel - int64 unsettled_balance = 11 [json_name = "unsettled_balance"]; + int64 unsettled_balance = 11; /** The total number of satoshis we've sent within this channel. */ - int64 total_satoshis_sent = 12 [json_name = "total_satoshis_sent"]; + int64 total_satoshis_sent = 12; /** The total number of satoshis we've received within this channel. */ - int64 total_satoshis_received = 13 [json_name = "total_satoshis_received"]; + int64 total_satoshis_received = 13; /** The total number of updates conducted within this channel. */ - uint64 num_updates = 14 [json_name = "num_updates"]; + uint64 num_updates = 14; /** The list of active, uncleared HTLCs currently pending within the channel. */ - repeated HTLC pending_htlcs = 15 [json_name = "pending_htlcs"]; + repeated HTLC pending_htlcs = 15; /** The CSV delay expressed in relative blocks. If the channel is force closed, we will need to wait for this many blocks before we can regain our funds. */ - uint32 csv_delay = 16 [json_name = "csv_delay"]; + uint32 csv_delay = 16; /// Whether this channel is advertised to the network or not. - bool private = 17 [json_name = "private"]; + bool private = 17; /// True if we were the ones that created the channel. - bool initiator = 18 [json_name = "initiator"]; + bool initiator = 18; /// A set of flags showing the current state of the channel. - string chan_status_flags = 19 [json_name = "chan_status_flags"]; + string chan_status_flags = 19; /// The minimum satoshis this node is required to reserve in its balance. - int64 local_chan_reserve_sat = 20 [json_name = "local_chan_reserve_sat"]; + int64 local_chan_reserve_sat = 20; /** The minimum satoshis the other node is required to reserve in its balance. */ - int64 remote_chan_reserve_sat = 21 [json_name = "remote_chan_reserve_sat"]; + int64 remote_chan_reserve_sat = 21; + + /// Deprecated. Use commitment_type. + bool static_remote_key = 22 [deprecated = true]; + + /// The commitment type used by this channel. + CommitmentType commitment_type = 26; /** - If true, then this channel uses the modern commitment format where the key - in the output of the remote party does not change each state. This makes - back up and recovery easier as when the channel is closed, the funds go - directly to that key. + The number of seconds that the channel has been monitored by the channel + scoring system. Scores are currently not persisted, so this value may be + less than the lifetime of the channel [EXPERIMENTAL]. */ - bool static_remote_key = 22 [json_name = "static_remote_key"]; -} + int64 lifetime = 23; + + /** + The number of seconds that the remote peer has been observed as being online + by the channel scoring system over the lifetime of the channel + [EXPERIMENTAL]. + */ + int64 uptime = 24; + + /** + Close address is the address that we will enforce payout to on cooperative + close if the channel was opened utilizing option upfront shutdown. This + value can be set on channel open by setting close_address in an open channel + request. If this value is not set, you can still choose a payout address by + cooperatively closing with the delivery_address field set. + */ + string close_address = 25; + /* + The amount that the initiator of the channel optionally pushed to the remote + party on channel open. This amount will be zero if the channel initiator did + not push any funds to the remote peer. If the initiator field is true, we + pushed this amount to our peer, if it is false, the remote peer pushed this + amount to us. + */ + uint64 push_amount_sat = 27; + + /** + This uint32 indicates if this channel is to be considered 'frozen'. A + frozen channel doest not allow a cooperative channel close by the + initiator. The thaw_height is the height that this restriction stops + applying to the channel. This field is optional, not setting it or using a + value of zero will mean the channel has no additional restrictions. + */ + uint32 thaw_height = 28; +} message ListChannelsRequest { bool active_only = 1; bool inactive_only = 2; bool public_only = 3; bool private_only = 4; + + /** + Filters the response for channels with a target peer's pubkey. If peer is + empty, all channels will be returned. + */ + bytes peer = 5; } message ListChannelsResponse { /// The list of active channels - repeated Channel channels = 11 [json_name = "channels"]; + repeated Channel channels = 11; +} + +enum Initiator { + INITIATOR_UNKNOWN = 0; + INITIATOR_LOCAL = 1; + INITIATOR_REMOTE = 2; + INITIATOR_BOTH = 3; } message ChannelCloseSummary { - /// The outpoint (txid:index) of the funding transaction. - string channel_point = 1 [json_name = "channel_point"]; + /// The outpoint (txid:index) of the funding transaction. + string channel_point = 1; - /// The unique channel ID for the channel. - uint64 chan_id = 2 [json_name = "chan_id"]; + /// The unique channel ID for the channel. + uint64 chan_id = 2 [jstype = JS_STRING]; /// The hash of the genesis block that this channel resides within. - string chain_hash = 3 [json_name = "chain_hash"]; + string chain_hash = 3; /// The txid of the transaction which ultimately closed this channel. - string closing_tx_hash = 4 [json_name = "closing_tx_hash"]; + string closing_tx_hash = 4; /// Public key of the remote peer that we formerly had a channel with. - string remote_pubkey = 5 [json_name = "remote_pubkey"]; + string remote_pubkey = 5; /// Total capacity of the channel. - int64 capacity = 6 [json_name = "capacity"]; + int64 capacity = 6; /// Height at which the funding transaction was spent. - uint32 close_height = 7 [json_name = "close_height"]; + uint32 close_height = 7; /// Settled balance at the time of channel closure - int64 settled_balance = 8 [json_name = "settled_balance"]; + int64 settled_balance = 8; /// The sum of all the time-locked outputs at the time of channel closure - int64 time_locked_balance = 9 [json_name = "time_locked_balance"]; + int64 time_locked_balance = 9; enum ClosureType { COOPERATIVE_CLOSE = 0; @@ -1298,7 +1528,23 @@ message ChannelCloseSummary { } /// Details on how the channel was closed. - ClosureType close_type = 10 [json_name = "close_type"]; + ClosureType close_type = 10; + + /** + Open initiator is the party that initiated opening the channel. Note that + this value may be unknown if the channel was closed before we migrated to + store open channel information after close. + */ + Initiator open_initiator = 11; + + /** + Close initiator indicates which party initiated the close. This value will + be unknown for channels that were cooperatively closed before we started + tracking cooperative close initiators. Note that this indicates which party + initiated a close, and it is possible for both to initiate cooperative or + force closes, although only one party's close will be confirmed on chain. + */ + Initiator close_initiator = 12; } message ClosedChannelsRequest { @@ -1310,34 +1556,34 @@ message ClosedChannelsRequest { bool abandoned = 6; } -message ClosedChannelsResponse { - repeated ChannelCloseSummary channels = 1 [json_name = "channels"]; +message ClosedChannelsResponse { + repeated ChannelCloseSummary channels = 1; } message Peer { /// The identity pubkey of the peer - string pub_key = 1 [json_name = "pub_key"]; + string pub_key = 1; /// Network address of the peer; eg `127.0.0.1:10011` - string address = 3 [json_name = "address"]; + string address = 3; /// Bytes of data transmitted to this peer - uint64 bytes_sent = 4 [json_name = "bytes_sent"]; + uint64 bytes_sent = 4; /// Bytes of data transmitted from this peer - uint64 bytes_recv = 5 [json_name = "bytes_recv"]; + uint64 bytes_recv = 5; /// Satoshis sent to this peer - int64 sat_sent = 6 [json_name = "sat_sent"]; + int64 sat_sent = 6; /// Satoshis received from this peer - int64 sat_recv = 7 [json_name = "sat_recv"]; + int64 sat_recv = 7; /// A channel is inbound if the counterparty initiated the channel - bool inbound = 8 [json_name = "inbound"]; + bool inbound = 8; /// Ping time to this peer - int64 ping_time = 9 [json_name = "ping_time"]; + int64 ping_time = 9; enum SyncType { /** @@ -1357,80 +1603,129 @@ message Peer { } // The type of sync we are currently performing with this peer. - SyncType sync_type = 10 [json_name = "sync_type"]; + SyncType sync_type = 10; + + /// Features advertised by the remote peer in their init message. + map features = 11; + + /* + The latest errors received from our peer with timestamps, limited to the 10 + most recent errors. These errors are tracked across peer connections, but + are not persisted across lnd restarts. Note that these errors are only + stored for peers that we have channels open with, to prevent peers from + spamming us with errors at no cost. + */ + repeated TimestampedError errors = 12; +} + +message TimestampedError { + // The unix timestamp in seconds when the error occurred. + uint64 timestamp = 1; + + // The string representation of the error sent by our peer. + string error = 2; } message ListPeersRequest { + /* + If true, only the last error that our peer sent us will be returned with + the peer's information, rather than the full set of historic errors we have + stored. + */ + bool latest_error = 1; } message ListPeersResponse { /// The list of currently connected peers - repeated Peer peers = 1 [json_name = "peers"]; + repeated Peer peers = 1; +} + +message PeerEventSubscription { +} + +message PeerEvent { + /// The identity pubkey of the peer. + string pub_key = 1; + + enum EventType { + PEER_ONLINE = 0; + PEER_OFFLINE = 1; + } + + EventType type = 2; } message GetInfoRequest { } message GetInfoResponse { + /// The version of the LND software that the node is running. + string version = 14; + + /// The SHA1 commit hash that the daemon is compiled with. + string commit_hash = 20; /// The identity pubkey of the current node. - string identity_pubkey = 1 [json_name = "identity_pubkey"]; + string identity_pubkey = 1; /// If applicable, the alias of the current node, e.g. "bob" - string alias = 2 [json_name = "alias"]; + string alias = 2; + + /// The color of the current node in hex code format + string color = 17; /// Number of pending channels - uint32 num_pending_channels = 3 [json_name = "num_pending_channels"]; + uint32 num_pending_channels = 3; /// Number of active channels - uint32 num_active_channels = 4 [json_name = "num_active_channels"]; + uint32 num_active_channels = 4; + + /// Number of inactive channels + uint32 num_inactive_channels = 15; /// Number of peers - uint32 num_peers = 5 [json_name = "num_peers"]; + uint32 num_peers = 5; /// The node's current view of the height of the best block - uint32 block_height = 6 [json_name = "block_height"]; + uint32 block_height = 6; /// The node's current view of the hash of the best block - string block_hash = 8 [json_name = "block_hash"]; + string block_hash = 8; + + /// Timestamp of the block best known to the wallet + int64 best_header_timestamp = 13; /// Whether the wallet's view is synced to the main chain - bool synced_to_chain = 9 [json_name = "synced_to_chain"]; + bool synced_to_chain = 9; + + // Whether we consider ourselves synced with the public channel graph. + bool synced_to_graph = 18; - /** - Whether the current node is connected to testnet. This field is - deprecated and the network field should be used instead + /** + Whether the current node is connected to testnet. This field is + deprecated and the network field should be used instead **/ - bool testnet = 10 [json_name = "testnet", deprecated = true]; + bool testnet = 10 [deprecated = true]; reserved 11; - /// The URIs of the current node. - repeated string uris = 12 [json_name = "uris"]; - - /// Timestamp of the block best known to the wallet - int64 best_header_timestamp = 13 [ json_name = "best_header_timestamp" ]; - - /// The version of the LND software that the node is running. - string version = 14 [ json_name = "version" ]; - - /// Number of inactive channels - uint32 num_inactive_channels = 15 [json_name = "num_inactive_channels"]; - /// A list of active chains the node is connected to - repeated Chain chains = 16 [json_name = "chains"]; + repeated Chain chains = 16; - /// The color of the current node in hex code format - string color = 17 [json_name = "color"]; + /// The URIs of the current node. + repeated string uris = 12; - // Whether we consider ourselves synced with the public channel graph. - bool synced_to_graph = 18 [json_name = "synced_to_graph"]; + /* + Features that our node has advertised in our init message, node + announcements and invoices. + */ + map features = 19; } message Chain { /// The blockchain the node is on (eg bitcoin, litecoin) - string chain = 1 [json_name = "chain"]; + string chain = 1; /// The network the node is on (eg regtest, testnet, mainnet) - string network = 2 [json_name = "network"]; + string network = 2; } message ConfirmationUpdate { @@ -1441,13 +1736,13 @@ message ConfirmationUpdate { } message ChannelOpenUpdate { - ChannelPoint channel_point = 1 [json_name = "channel_point"]; + ChannelPoint channel_point = 1; } message ChannelCloseUpdate { - bytes closing_txid = 1 [json_name = "closing_txid"]; + bytes closing_txid = 1; - bool success = 2 [json_name = "success"]; + bool success = 2; } message CloseChannelRequest { @@ -1458,121 +1753,362 @@ message CloseChannelRequest { */ ChannelPoint channel_point = 1; - /// If true, then the channel will be closed forcibly. This means the current commitment transaction will be signed and broadcast. + /// If true, then the channel will be closed forcibly. This means the + /// current commitment transaction will be signed and broadcast. bool force = 2; - /// The target number of blocks that the closure transaction should be confirmed by. + /// The target number of blocks that the closure transaction should be + /// confirmed by. int32 target_conf = 3; - /// A manual fee rate set in sat/byte that should be used when crafting the closure transaction. + /// A manual fee rate set in sat/byte that should be used when crafting the + /// closure transaction. int64 sat_per_byte = 4; + + /* + An optional address to send funds to in the case of a cooperative close. + If the channel was opened with an upfront shutdown script and this field + is set, the request to close will fail because the channel must pay out + to the upfront shutdown addresss. + */ + string delivery_address = 5; } message CloseStatusUpdate { oneof update { - PendingUpdate close_pending = 1 [json_name = "close_pending"]; - ChannelCloseUpdate chan_close = 3 [json_name = "chan_close"]; + PendingUpdate close_pending = 1; + ChannelCloseUpdate chan_close = 3; } } message PendingUpdate { - bytes txid = 1 [json_name = "txid"]; - uint32 output_index = 2 [json_name = "output_index"]; + bytes txid = 1; + uint32 output_index = 2; +} + +message ReadyForPsbtFunding { + /** + The P2WSH address of the channel funding multisig address that the below + specified amount in satoshis needs to be sent to. + */ + string funding_address = 1; + + /** + The exact amount in satoshis that needs to be sent to the above address to + fund the pending channel. + */ + int64 funding_amount = 2; + + /** + A raw PSBT that contains the pending channel output. If a base PSBT was + provided in the PsbtShim, this is the base PSBT with one additional output. + If no base PSBT was specified, this is an otherwise empty PSBT with exactly + one output. + */ + bytes psbt = 3; } message OpenChannelRequest { - /// The pubkey of the node to open a channel with - bytes node_pubkey = 2 [json_name = "node_pubkey"]; + /** + The pubkey of the node to open a channel with. When using REST, this field + must be encoded as base64. + */ + bytes node_pubkey = 2; - /// The hex encoded pubkey of the node to open a channel with - string node_pubkey_string = 3 [json_name = "node_pubkey_string"]; + /** + The hex encoded pubkey of the node to open a channel with. Deprecated now + that the REST gateway supports base64 encoding of bytes fields. + */ + string node_pubkey_string = 3 [deprecated = true]; /// The number of satoshis the wallet should commit to the channel - int64 local_funding_amount = 4 [json_name = "local_funding_amount"]; + int64 local_funding_amount = 4; - /// The number of satoshis to push to the remote side as part of the initial commitment state - int64 push_sat = 5 [json_name = "push_sat"]; + /// The number of satoshis to push to the remote side as part of the initial + /// commitment state + int64 push_sat = 5; - /// The target number of blocks that the funding transaction should be confirmed by. + /// The target number of blocks that the funding transaction should be + /// confirmed by. int32 target_conf = 6; - /// A manual fee rate set in sat/byte that should be used when crafting the funding transaction. + /// A manual fee rate set in sat/byte that should be used when crafting the + /// funding transaction. int64 sat_per_byte = 7; - /// Whether this channel should be private, not announced to the greater network. - bool private = 8 [json_name = "private"]; + /// Whether this channel should be private, not announced to the greater + /// network. + bool private = 8; + + /// The minimum value in millisatoshi we will require for incoming HTLCs on + /// the channel. + int64 min_htlc_msat = 9; + + /// The delay we require on the remote's commitment transaction. If this is + /// not set, it will be scaled automatically with the channel size. + uint32 remote_csv_delay = 10; - /// The minimum value in millisatoshi we will require for incoming HTLCs on the channel. - int64 min_htlc_msat = 9 [json_name = "min_htlc_msat"]; + /// The minimum number of confirmations each one of your outputs used for + /// the funding transaction must satisfy. + int32 min_confs = 11; - /// The delay we require on the remote's commitment transaction. If this is not set, it will be scaled automatically with the channel size. - uint32 remote_csv_delay = 10 [json_name = "remote_csv_delay"]; + /// Whether unconfirmed outputs should be used as inputs for the funding + /// transaction. + bool spend_unconfirmed = 12; - /// The minimum number of confirmations each one of your outputs used for the funding transaction must satisfy. - int32 min_confs = 11 [json_name = "min_confs"]; + /* + Close address is an optional address which specifies the address to which + funds should be paid out to upon cooperative close. This field may only be + set if the peer supports the option upfront feature bit (call listpeers + to check). The remote peer will only accept cooperative closes to this + address if it is set. - /// Whether unconfirmed outputs should be used as inputs for the funding transaction. - bool spend_unconfirmed = 12 [json_name = "spend_unconfirmed"]; + Note: If this value is set on channel creation, you will *not* be able to + cooperatively close out to a different address. + */ + string close_address = 13; + + /** + Funding shims are an optional argument that allow the caller to intercept + certain funding functionality. For example, a shim can be provided to use a + particular key for the commitment key (ideally cold) rather than use one + that is generated by the wallet as normal, or signal that signing will be + carried out in an interactive manner (PSBT based). + */ + FundingShim funding_shim = 14; } message OpenStatusUpdate { oneof update { - PendingUpdate chan_pending = 1 [json_name = "chan_pending"]; - ChannelOpenUpdate chan_open = 3 [json_name = "chan_open"]; + /** + Signals that the channel is now fully negotiated and the funding + transaction published. + */ + PendingUpdate chan_pending = 1; + + /** + Signals that the channel's funding transaction has now reached the + required number of confirmations on chain and can be used. + */ + ChannelOpenUpdate chan_open = 3; + + /** + Signals that the funding process has been suspended and the construction + of a PSBT that funds the channel PK script is now required. + */ + ReadyForPsbtFunding psbt_fund = 5; } + + /** + The pending channel ID of the created channel. This value may be used to + further the funding flow manually via the FundingStateStep method. + */ + bytes pending_chan_id = 4; } -message PendingHTLC { +message KeyLocator { + /// The family of key being identified. + int32 key_family = 1; + + /// The precise index of the key being identified. + int32 key_index = 2; +} + +message KeyDescriptor { + /** + The raw bytes of the key being identified. + */ + bytes raw_key_bytes = 1; + + /** + The key locator that identifies which key to use for signing. + */ + KeyLocator key_loc = 2; +} + +message ChanPointShim { + /** + The size of the pre-crafted output to be used as the channel point for this + channel funding. + */ + int64 amt = 1; + + /// The target channel point to refrence in created commitment transactions. + ChannelPoint chan_point = 2; + + /// Our local key to use when creating the multi-sig output. + KeyDescriptor local_key = 3; + + /// The key of the remote party to use when creating the multi-sig output. + bytes remote_key = 4; + + /** + If non-zero, then this will be used as the pending channel ID on the wire + protocol to initate the funding request. This is an optional field, and + should only be set if the responder is already expecting a specific pending + channel ID. + */ + bytes pending_chan_id = 5; + + /** + This uint32 indicates if this channel is to be considered 'frozen'. A + frozen channel does not allow a cooperative channel close by the + initiator. The thaw_height is the height that this restriction stops + applying to the channel. + */ + uint32 thaw_height = 6; +} + +message PsbtShim { + /** + A unique identifier of 32 random bytes that will be used as the pending + channel ID to identify the PSBT state machine when interacting with it and + on the wire protocol to initiate the funding request. + */ + bytes pending_chan_id = 1; + + /** + An optional base PSBT the new channel output will be added to. If this is + non-empty, it must be a binary serialized PSBT. + */ + bytes base_psbt = 2; +} + +message FundingShim { + oneof shim { + /** + A channel shim where the channel point was fully constructed outside + of lnd's wallet and the transaction might already be published. + */ + ChanPointShim chan_point_shim = 1; + + /** + A channel shim that uses a PSBT to fund and sign the channel funding + transaction. + */ + PsbtShim psbt_shim = 2; + } +} + +message FundingShimCancel { + /// The pending channel ID of the channel to cancel the funding shim for. + bytes pending_chan_id = 1; +} + +message FundingPsbtVerify { + /** + The funded but not yet signed PSBT that sends the exact channel capacity + amount to the PK script returned in the open channel message in a previous + step. + */ + bytes funded_psbt = 1; + + /// The pending channel ID of the channel to get the PSBT for. + bytes pending_chan_id = 2; +} + +message FundingPsbtFinalize { + /** + The funded PSBT that contains all witness data to send the exact channel + capacity amount to the PK script returned in the open channel message in a + previous step. + */ + bytes signed_psbt = 1; + + /// The pending channel ID of the channel to get the PSBT for. + bytes pending_chan_id = 2; +} + +message FundingTransitionMsg { + oneof trigger { + /** + The funding shim to register. This should be used before any + channel funding has began by the remote party, as it is intended as a + preparatory step for the full channel funding. + */ + FundingShim shim_register = 1; + + /// Used to cancel an existing registered funding shim. + FundingShimCancel shim_cancel = 2; + + /** + Used to continue a funding flow that was initiated to be executed + through a PSBT. This step verifies that the PSBT contains the correct + outputs to fund the channel. + */ + FundingPsbtVerify psbt_verify = 3; + + /** + Used to continue a funding flow that was initiated to be executed + through a PSBT. This step finalizes the funded and signed PSBT, finishes + negotiation with the peer and finally publishes the resulting funding + transaction. + */ + FundingPsbtFinalize psbt_finalize = 4; + } +} + +message FundingStateStepResp { +} +message PendingHTLC { /// The direction within the channel that the htlc was sent - bool incoming = 1 [ json_name = "incoming" ]; + bool incoming = 1; /// The total value of the htlc - int64 amount = 2 [ json_name = "amount" ]; + int64 amount = 2; /// The final output to be swept back to the user's wallet - string outpoint = 3 [ json_name = "outpoint" ]; + string outpoint = 3; /// The next block height at which we can spend the current stage - uint32 maturity_height = 4 [ json_name = "maturity_height" ]; + uint32 maturity_height = 4; /** The number of blocks remaining until the current stage can be swept. Negative values indicate how many blocks have passed since becoming mature. */ - int32 blocks_til_maturity = 5 [ json_name = "blocks_til_maturity" ]; + int32 blocks_til_maturity = 5; /// Indicates whether the htlc is in its first or second stage of recovery - uint32 stage = 6 [ json_name = "stage" ]; + uint32 stage = 6; } -message PendingChannelsRequest {} +message PendingChannelsRequest { +} message PendingChannelsResponse { message PendingChannel { - string remote_node_pub = 1 [ json_name = "remote_node_pub" ]; - string channel_point = 2 [ json_name = "channel_point" ]; + string remote_node_pub = 1; + string channel_point = 2; - int64 capacity = 3 [ json_name = "capacity" ]; + int64 capacity = 3; - int64 local_balance = 4 [ json_name = "local_balance" ]; - int64 remote_balance = 5 [ json_name = "remote_balance" ]; - - /// The minimum satoshis this node is required to reserve in its balance. - int64 local_chan_reserve_sat = 6 [json_name = "local_chan_reserve_sat"]; + int64 local_balance = 4; + int64 remote_balance = 5; + + /// The minimum satoshis this node is required to reserve in its + /// balance. + int64 local_chan_reserve_sat = 6; /** The minimum satoshis the other node is required to reserve in its balance. */ - int64 remote_chan_reserve_sat = 7 [json_name = "remote_chan_reserve_sat"]; + int64 remote_chan_reserve_sat = 7; + + // The party that initiated opening the channel. + Initiator initiator = 8; + + /// The commitment type used by this channel. + CommitmentType commitment_type = 9; } message PendingOpenChannel { /// The pending channel - PendingChannel channel = 1 [ json_name = "channel" ]; + PendingChannel channel = 1; /// The height at which this channel will be confirmed - uint32 confirmation_height = 2 [ json_name = "confirmation_height" ]; + uint32 confirmation_height = 2; /** The amount calculated to be paid in fees for the current set of @@ -1581,17 +2117,17 @@ message PendingChannelsResponse { each channel state update, including updates that happen after a system restart. */ - int64 commit_fee = 4 [json_name = "commit_fee" ]; + int64 commit_fee = 4; /// The weight of the commitment transaction - int64 commit_weight = 5 [ json_name = "commit_weight" ]; + int64 commit_weight = 5; /** The required number of satoshis per kilo-weight that the requester will pay at all times, for both the funding transaction and commitment transaction. This value can later be updated once the channel is open. */ - int64 fee_per_kw = 6 [ json_name = "fee_per_kw" ]; + int64 fee_per_kw = 6; } message WaitingCloseChannel { @@ -1599,7 +2135,42 @@ message PendingChannelsResponse { PendingChannel channel = 1; /// The balance in satoshis encumbered in this channel - int64 limbo_balance = 2 [ json_name = "limbo_balance" ]; + int64 limbo_balance = 2; + + /** + A list of valid commitment transactions. Any of these can confirm at + this point. + */ + Commitments commitments = 3; + } + + message Commitments { + /// Hash of the local version of the commitment tx. + string local_txid = 1; + + /// Hash of the remote version of the commitment tx. + string remote_txid = 2; + + /// Hash of the remote pending version of the commitment tx. + string remote_pending_txid = 3; + + /* + The amount in satoshis calculated to be paid in fees for the local + commitment. + */ + uint64 local_commit_fee_sat = 4; + + /* + The amount in satoshis calculated to be paid in fees for the remote + commitment. + */ + uint64 remote_commit_fee_sat = 5; + + /* + The amount in satoshis calculated to be paid in fees for the remote + pending commitment. + */ + uint64 remote_pending_commit_fee_sat = 6; } message ClosedChannel { @@ -1607,49 +2178,61 @@ message PendingChannelsResponse { PendingChannel channel = 1; /// The transaction id of the closing transaction - string closing_txid = 2 [ json_name = "closing_txid" ]; + string closing_txid = 2; } message ForceClosedChannel { /// The pending channel to be force closed - PendingChannel channel = 1 [ json_name = "channel" ]; + PendingChannel channel = 1; /// The transaction id of the closing transaction - string closing_txid = 2 [ json_name = "closing_txid" ]; + string closing_txid = 2; /// The balance in satoshis encumbered in this pending channel - int64 limbo_balance = 3 [ json_name = "limbo_balance" ]; + int64 limbo_balance = 3; /// The height at which funds can be swept into the wallet - uint32 maturity_height = 4 [ json_name = "maturity_height" ]; + uint32 maturity_height = 4; /* Remaining # of blocks until the commitment output can be swept. Negative values indicate how many blocks have passed since becoming mature. */ - int32 blocks_til_maturity = 5 [ json_name = "blocks_til_maturity" ]; + int32 blocks_til_maturity = 5; /// The total value of funds successfully recovered from this channel - int64 recovered_balance = 6 [ json_name = "recovered_balance" ]; + int64 recovered_balance = 6; + + repeated PendingHTLC pending_htlcs = 8; - repeated PendingHTLC pending_htlcs = 8 [ json_name = "pending_htlcs" ]; + enum AnchorState { + LIMBO = 0; + RECOVERED = 1; + LOST = 2; + } + + AnchorState anchor = 9; } /// The balance in satoshis encumbered in pending channels - int64 total_limbo_balance = 1 [ json_name = "total_limbo_balance" ]; + int64 total_limbo_balance = 1; /// Channels pending opening - repeated PendingOpenChannel pending_open_channels = 2 [ json_name = "pending_open_channels" ]; + repeated PendingOpenChannel pending_open_channels = 2; - /// Channels pending closing - repeated ClosedChannel pending_closing_channels = 3 [ json_name = "pending_closing_channels" ]; + /* + Deprecated: Channels pending closing previously contained cooperatively + closed channels with a single confirmation. These channels are now + considered closed from the time we see them on chain. + */ + repeated ClosedChannel pending_closing_channels = 3 [deprecated = true]; /// Channels pending force closing - repeated ForceClosedChannel pending_force_closing_channels = 4 [ json_name = "pending_force_closing_channels" ]; + repeated ForceClosedChannel pending_force_closing_channels = 4; /// Channels waiting for closing tx to confirm - repeated WaitingCloseChannel waiting_close_channels = 5 [ json_name = "waiting_close_channels" ]; + repeated WaitingCloseChannel waiting_close_channels = 5; } message ChannelEventSubscription { @@ -1657,55 +2240,74 @@ message ChannelEventSubscription { message ChannelEventUpdate { oneof channel { - Channel open_channel = 1 [ json_name = "open_channel" ]; - ChannelCloseSummary closed_channel = 2 [ json_name = "closed_channel" ]; - ChannelPoint active_channel = 3 [ json_name = "active_channel" ]; - ChannelPoint inactive_channel = 4 [ json_name = "inactive_channel" ]; + Channel open_channel = 1; + ChannelCloseSummary closed_channel = 2; + ChannelPoint active_channel = 3; + ChannelPoint inactive_channel = 4; + PendingUpdate pending_open_channel = 6; } enum UpdateType { - OPEN_CHANNEL = 0; - CLOSED_CHANNEL = 1; - ACTIVE_CHANNEL = 2; - INACTIVE_CHANNEL = 3; + OPEN_CHANNEL = 0; + CLOSED_CHANNEL = 1; + ACTIVE_CHANNEL = 2; + INACTIVE_CHANNEL = 3; + PENDING_OPEN_CHANNEL = 4; } - UpdateType type = 5 [ json_name = "type" ]; + UpdateType type = 5; } message WalletBalanceRequest { } message WalletBalanceResponse { /// The balance of the wallet - int64 total_balance = 1 [json_name = "total_balance"]; + int64 total_balance = 1; /// The confirmed balance of a wallet(with >= 1 confirmations) - int64 confirmed_balance = 2 [json_name = "confirmed_balance"]; + int64 confirmed_balance = 2; /// The unconfirmed balance of a wallet(with 0 confirmations) - int64 unconfirmed_balance = 3 [json_name = "unconfirmed_balance"]; + int64 unconfirmed_balance = 3; } message ChannelBalanceRequest { } message ChannelBalanceResponse { /// Sum of channels balances denominated in satoshis - int64 balance = 1 [json_name = "balance"]; + int64 balance = 1; /// Sum of channels pending balances denominated in satoshis - int64 pending_open_balance = 2 [json_name = "pending_open_balance"]; + int64 pending_open_balance = 2; } message QueryRoutesRequest { /// The 33-byte hex-encoded public key for the payment destination string pub_key = 1; - /// The amount to send expressed in satoshis + /** + The amount to send expressed in satoshis. + + The fields amt and amt_msat are mutually exclusive. + */ int64 amt = 2; + /** + The amount to send expressed in millisatoshis. + + The fields amt and amt_msat are mutually exclusive. + */ + int64 amt_msat = 12; + reserved 3; - /// An optional CLTV delta from the current height that should be used for the timelock of the final hop + /** + An optional CLTV delta from the current height that should be used for the + timelock of the final hop. Note that unlike SendPayment, QueryRoutes does + not add any additional block padding on top of final_ctlv_delta. This + padding of a few blocks needs to be added manually or otherwise failures may + happen when a block comes in while the payment is in flight. + */ int32 final_cltv_delta = 4; /** @@ -1717,7 +2319,8 @@ message QueryRoutesRequest { FeeLimit fee_limit = 5; /** - A list of nodes to ignore during path finding. + A list of nodes to ignore during path finding. When using REST, these fields + must be encoded as base64. */ repeated bytes ignored_nodes = 6; @@ -1743,25 +2346,66 @@ message QueryRoutesRequest { */ repeated NodePair ignored_pairs = 10; - /** + /** An optional maximum total time lock for the route. If the source is empty or ourselves, this should not exceed lnd's `--max-cltv-expiry` setting. If zero, then the value of `--max-cltv-expiry` is used as the limit. */ uint32 cltv_limit = 11; + + /** + An optional field that can be used to pass an arbitrary set of TLV records + to a peer which understands the new records. This can be used to pass + application specific data during the payment attempt. If the destination + does not support the specified recrods, and error will be returned. + Record types are required to be in the custom range >= 65536. When using + REST, the values must be encoded as base64. + */ + map dest_custom_records = 13; + + /** + The channel id of the channel that must be taken to the first hop. If zero, + any channel may be used. + */ + uint64 outgoing_chan_id = 14 [jstype = JS_STRING]; + + /** + The pubkey of the last hop of the route. If empty, any hop may be used. + */ + bytes last_hop_pubkey = 15; + + /** + Optional route hints to reach the destination through private channels. + */ + repeated lnrpc.RouteHint route_hints = 16; + + /** + Features assumed to be supported by the final node. All transitive feature + dependencies must also be set properly. For a given feature bit pair, either + optional or remote may be set, but not both. If this field is nil or empty, + the router will try to load destination features from the graph as a + fallback. + */ + repeated lnrpc.FeatureBit dest_features = 17; } message NodePair { - /// The sending node of the pair. + /** + The sending node of the pair. When using REST, this field must be encoded as + base64. + */ bytes from = 1; - /// The receiving node of the pair. + /** + The receiving node of the pair. When using REST, this field must be encoded + as base64. + */ bytes to = 2; } message EdgeLocator { /// The short channel id of this edge. - uint64 channel_id = 1; + uint64 channel_id = 1 [jstype = JS_STRING]; /** The direction of this edge. If direction_reverse is false, the direction @@ -1777,13 +2421,13 @@ message QueryRoutesResponse { The route that results from the path finding operation. This is still a repeated field to retain backwards compatibility. */ - repeated Route routes = 1 [json_name = "routes"]; + repeated Route routes = 1; /** The success probability of the returned route based on the current mission control state. [EXPERIMENTAL] */ - double success_prob = 2 [json_name = "success_prob"]; + double success_prob = 2; } message Hop { @@ -1792,25 +2436,59 @@ message Hop { height, the next 3 the index within the block, and the last 2 bytes are the output index for the channel. */ - uint64 chan_id = 1 [json_name = "chan_id"]; - int64 chan_capacity = 2 [json_name = "chan_capacity"]; - int64 amt_to_forward = 3 [json_name = "amt_to_forward", deprecated = true]; - int64 fee = 4 [json_name = "fee", deprecated = true]; - uint32 expiry = 5 [json_name = "expiry"]; - int64 amt_to_forward_msat = 6 [json_name = "amt_to_forward_msat"]; - int64 fee_msat = 7 [json_name = "fee_msat"]; + uint64 chan_id = 1 [jstype = JS_STRING]; + int64 chan_capacity = 2; + int64 amt_to_forward = 3 [deprecated = true]; + int64 fee = 4 [deprecated = true]; + uint32 expiry = 5; + int64 amt_to_forward_msat = 6; + int64 fee_msat = 7; /** An optional public key of the hop. If the public key is given, the payment can be executed without relying on a copy of the channel graph. */ - string pub_key = 8 [json_name = "pub_key"]; + string pub_key = 8; - /** + /** If set to true, then this hop will be encoded using the new variable length - TLV format. + TLV format. Note that if any custom tlv_records below are specified, then + this field MUST be set to true for them to be encoded properly. */ - bool tlv_payload = 9 [json_name = "tlv_payload"]; + bool tlv_payload = 9; + + /** + An optional TLV record that signals the use of an MPP payment. If present, + the receiver will enforce that that the same mpp_record is included in the + final hop payload of all non-zero payments in the HTLC set. If empty, a + regular single-shot payment is or was attempted. + */ + MPPRecord mpp_record = 10; + + /** + An optional set of key-value TLV records. This is useful within the context + of the SendToRoute call as it allows callers to specify arbitrary K-V pairs + to drop off at each hop within the onion. + */ + map custom_records = 11; +} + +message MPPRecord { + /** + A unique, random identifier used to authenticate the sender as the intended + payer of a multi-path payment. The payment_addr must be the same for all + subpayments, and match the payment_addr provided in the receiver's invoice. + The same payment_addr must be used on all subpayments. + */ + bytes payment_addr = 11; + + /** + The total amount in milli-satoshis being sent as part of a larger multi-path + payment. The caller is responsible for ensuring subpayments to the same node + and payment_hash sum exactly to total_amt_msat. The same + total_amt_msat must be used on all subpayments. + */ + int64 total_amt_msat = 10; } /** @@ -1821,21 +2499,20 @@ route is only selected as valid if all the channels have sufficient capacity to carry the initial payment amount after fees are accounted for. */ message Route { - /** - The cumulative (final) time lock across the entire route. This is the CLTV + The cumulative (final) time lock across the entire route. This is the CLTV value that should be extended to the first hop in the route. All other hops will decrement the time-lock as advertised, leaving enough time for all hops to wait for or present the payment preimage to complete the payment. */ - uint32 total_time_lock = 1 [json_name = "total_time_lock"]; + uint32 total_time_lock = 1; /** - The sum of the fees paid at each hop within the final route. In the case + The sum of the fees paid at each hop within the final route. In the case of a one-hop payment, this value will be zero as we don't need to pay a fee to ourselves. */ - int64 total_fees = 2 [json_name = "total_fees", deprecated = true]; + int64 total_fees = 2 [deprecated = true]; /** The total amount of funds required to complete a payment over this route. @@ -1844,26 +2521,26 @@ message Route { satoshis, otherwise the route will fail at an intermediate node due to an insufficient amount of fees. */ - int64 total_amt = 3 [json_name = "total_amt", deprecated = true]; + int64 total_amt = 3 [deprecated = true]; /** Contains details concerning the specific forwarding details at each hop. */ - repeated Hop hops = 4 [json_name = "hops"]; - + repeated Hop hops = 4; + /** The total fees in millisatoshis. */ - int64 total_fees_msat = 5 [json_name = "total_fees_msat"]; - + int64 total_fees_msat = 5; + /** The total amount in millisatoshis. */ - int64 total_amt_msat = 6 [json_name = "total_amt_msat"]; + int64 total_amt_msat = 6; } message NodeInfoRequest { - /// The 33-byte hex-encoded compressed public of the target node + /// The 33-byte hex-encoded compressed public of the target node string pub_key = 1; /// If true, will include all known channels associated with the node. @@ -1871,23 +2548,22 @@ message NodeInfoRequest { } message NodeInfo { - /** An individual vertex/node within the channel graph. A node is connected to other nodes by one or more channel edges emanating from it. As the graph is directed, a node will also have an incoming edge attached to it for each outgoing edge. */ - LightningNode node = 1 [json_name = "node"]; + LightningNode node = 1; /// The total number of channels for the node. - uint32 num_channels = 2 [json_name = "num_channels"]; + uint32 num_channels = 2; /// The sum of all channels capacity for the node, denominated in satoshis. - int64 total_capacity = 3 [json_name = "total_capacity"]; + int64 total_capacity = 3; /// A list of all public channels for the node. - repeated ChannelEdge channels = 4 [json_name = "channels"]; + repeated ChannelEdge channels = 4; } /** @@ -1897,26 +2573,27 @@ graph is directed, a node will also have an incoming edge attached to it for each outgoing edge. */ message LightningNode { - uint32 last_update = 1 [ json_name = "last_update" ]; - string pub_key = 2 [ json_name = "pub_key" ]; - string alias = 3 [ json_name = "alias" ]; - repeated NodeAddress addresses = 4 [ json_name = "addresses" ]; - string color = 5 [ json_name = "color" ]; + uint32 last_update = 1; + string pub_key = 2; + string alias = 3; + repeated NodeAddress addresses = 4; + string color = 5; + map features = 6; } message NodeAddress { - string network = 1 [ json_name = "network" ]; - string addr = 2 [ json_name = "addr" ]; + string network = 1; + string addr = 2; } message RoutingPolicy { - uint32 time_lock_delta = 1 [json_name = "time_lock_delta"]; - int64 min_htlc = 2 [json_name = "min_htlc"]; - int64 fee_base_msat = 3 [json_name = "fee_base_msat"]; - int64 fee_rate_milli_msat = 4 [json_name = "fee_rate_milli_msat"]; - bool disabled = 5 [json_name = "disabled"]; - uint64 max_htlc_msat = 6 [json_name = "max_htlc_msat"]; - uint32 last_update = 7 [json_name = "last_update"]; + uint32 time_lock_delta = 1; + int64 min_htlc = 2; + int64 fee_base_msat = 3; + int64 fee_rate_milli_msat = 4; + bool disabled = 5; + uint64 max_htlc_msat = 6; + uint32 last_update = 7; } /** @@ -1927,42 +2604,70 @@ stored. The other portions relevant to routing policy of a channel are stored within a ChannelEdgePolicy for each direction of the channel. */ message ChannelEdge { - /** The unique channel ID for the channel. The first 3 bytes are the block height, the next 3 the index within the block, and the last 2 bytes are the output index for the channel. */ - uint64 channel_id = 1 [json_name = "channel_id"]; - string chan_point = 2 [json_name = "chan_point"]; + uint64 channel_id = 1 [jstype = JS_STRING]; + string chan_point = 2; - uint32 last_update = 3 [json_name = "last_update", deprecated = true]; + uint32 last_update = 3 [deprecated = true]; - string node1_pub = 4 [json_name = "node1_pub"]; - string node2_pub = 5 [json_name = "node2_pub"]; + string node1_pub = 4; + string node2_pub = 5; - int64 capacity = 6 [json_name = "capacity"]; + int64 capacity = 6; - RoutingPolicy node1_policy = 7 [json_name = "node1_policy"]; - RoutingPolicy node2_policy = 8 [json_name = "node2_policy"]; + RoutingPolicy node1_policy = 7; + RoutingPolicy node2_policy = 8; } message ChannelGraphRequest { - /** - Whether unannounced channels are included in the response or not. If set, - unannounced channels are included. Unannounced channels are both private - channels, and public channels that are not yet announced to the network. - */ - bool include_unannounced = 1 [json_name = "include_unannounced"]; + /** + Whether unannounced channels are included in the response or not. If set, + unannounced channels are included. Unannounced channels are both private + channels, and public channels that are not yet announced to the network. + */ + bool include_unannounced = 1; } /// Returns a new instance of the directed channel graph. message ChannelGraph { /// The list of `LightningNode`s in this channel graph - repeated LightningNode nodes = 1 [json_name = "nodes"]; + repeated LightningNode nodes = 1; /// The list of `ChannelEdge`s in this channel graph - repeated ChannelEdge edges = 2 [json_name = "edges"]; + repeated ChannelEdge edges = 2; +} + +enum NodeMetricType { + UNKNOWN = 0; + BETWEENNESS_CENTRALITY = 1; +} + +message NodeMetricsRequest { + /// The requested node metrics. + repeated NodeMetricType types = 1; +} + +message NodeMetricsResponse { + /** + Betweenness centrality is the sum of the ratio of shortest paths that pass + through the node for each pair of nodes in the graph (not counting paths + starting or ending at this node). + Map of node pubkey to betweenness centrality of the node. Normalized + values are in the [0,1] closed interval. + */ + map betweenness_centrality = 1; +} + +message FloatMetric { + /// Arbitrary float value. + double value = 1; + + /// The value normalized to [0,1] or [-1,1]. + double normalized_value = 2; } message ChanInfoRequest { @@ -1971,37 +2676,40 @@ message ChanInfoRequest { height, the next 3 the index within the block, and the last 2 bytes are the output index for the channel. */ - uint64 chan_id = 1; + uint64 chan_id = 1 [jstype = JS_STRING]; } message NetworkInfoRequest { } message NetworkInfo { - uint32 graph_diameter = 1 [json_name = "graph_diameter"]; - double avg_out_degree = 2 [json_name = "avg_out_degree"]; - uint32 max_out_degree = 3 [json_name = "max_out_degree"]; + uint32 graph_diameter = 1; + double avg_out_degree = 2; + uint32 max_out_degree = 3; - uint32 num_nodes = 4 [json_name = "num_nodes"]; - uint32 num_channels = 5 [json_name = "num_channels"]; + uint32 num_nodes = 4; + uint32 num_channels = 5; - int64 total_network_capacity = 6 [json_name = "total_network_capacity"]; + int64 total_network_capacity = 6; - double avg_channel_size = 7 [json_name = "avg_channel_size"]; - int64 min_channel_size = 8 [json_name = "min_channel_size"]; - int64 max_channel_size = 9 [json_name = "max_channel_size"]; - int64 median_channel_size_sat = 10 [json_name = "median_channel_size_sat"]; + double avg_channel_size = 7; + int64 min_channel_size = 8; + int64 max_channel_size = 9; + int64 median_channel_size_sat = 10; // The number of edges marked as zombies. - uint64 num_zombie_chans = 11 [json_name = "num_zombie_chans"]; + uint64 num_zombie_chans = 11; // TODO(roasbeef): fee rate info, expiry // * also additional RPC for tracking fee info once in } -message StopRequest{} -message StopResponse{} +message StopRequest { +} +message StopResponse { +} -message GraphTopologySubscription {} +message GraphTopologySubscription { +} message GraphTopologyUpdate { repeated NodeUpdate node_updates = 1; repeated ChannelEdgeUpdate channel_updates = 2; @@ -2020,15 +2728,15 @@ message ChannelEdgeUpdate { height, the next 3 the index within the block, and the last 2 bytes are the output index for the channel. */ - uint64 chan_id = 1; + uint64 chan_id = 1 [jstype = JS_STRING]; ChannelPoint chan_point = 2; int64 capacity = 3; - RoutingPolicy routing_policy = 4; + RoutingPolicy routing_policy = 4; - string advertising_node = 5; + string advertising_node = 5; string connecting_node = 6; } message ClosedChannelUpdate { @@ -2037,7 +2745,7 @@ message ClosedChannelUpdate { height, the next 3 the index within the block, and the last 2 bytes are the output index for the channel. */ - uint64 chan_id = 1; + uint64 chan_id = 1 [jstype = JS_STRING]; int64 capacity = 2; uint32 closed_height = 3; ChannelPoint chan_point = 4; @@ -2045,22 +2753,22 @@ message ClosedChannelUpdate { message HopHint { /// The public key of the node at the start of the channel. - string node_id = 1 [json_name = "node_id"]; + string node_id = 1; /// The unique identifier of the channel. - uint64 chan_id = 2 [json_name = "chan_id"]; + uint64 chan_id = 2 [jstype = JS_STRING]; /// The base fee of the channel denominated in millisatoshis. - uint32 fee_base_msat = 3 [json_name = "fee_base_msat"]; + uint32 fee_base_msat = 3; /** The fee rate of the channel for sending one satoshi across it denominated in millionths of a satoshi. */ - uint32 fee_proportional_millionths = 4 [json_name = "fee_proportional_millionths"]; + uint32 fee_proportional_millionths = 4; /// The time-lock delta of the channel. - uint32 cltv_expiry_delta = 5 [json_name = "cltv_expiry_delta"]; + uint32 cltv_expiry_delta = 5; } message RouteHint { @@ -2068,7 +2776,7 @@ message RouteHint { A list of hop hints that when chained together can assist in reaching a specific destination. */ - repeated HopHint hop_hints = 1 [json_name = "hop_hints"]; + repeated HopHint hop_hints = 1; } message Invoice { @@ -2078,65 +2786,78 @@ message Invoice { field of the encoded payment request if the description_hash field is not being used. */ - string memo = 1 [json_name = "memo"]; + string memo = 1; - /** Deprecated. An optional cryptographic receipt of payment which is not - implemented. - */ - bytes receipt = 2 [json_name = "receipt", deprecated = true]; + reserved 2; /** The hex-encoded preimage (32 byte) which will allow settling an incoming - HTLC payable to this preimage + HTLC payable to this preimage. When using REST, this field must be encoded + as base64. + */ + bytes r_preimage = 3; + + /** + The hash of the preimage. When using REST, this field must be encoded as + base64. + */ + bytes r_hash = 4; + + /** + The value of this invoice in satoshis + + The fields value and value_msat are mutually exclusive. */ - bytes r_preimage = 3 [json_name = "r_preimage"]; + int64 value = 5; - /// The hash of the preimage - bytes r_hash = 4 [json_name = "r_hash"]; + /** + The value of this invoice in millisatoshis - /// The value of this invoice in satoshis - int64 value = 5 [json_name = "value"]; + The fields value and value_msat are mutually exclusive. + */ + int64 value_msat = 23; /// Whether this invoice has been fulfilled - bool settled = 6 [json_name = "settled", deprecated = true]; + bool settled = 6 [deprecated = true]; /// When this invoice was created - int64 creation_date = 7 [json_name = "creation_date"]; + int64 creation_date = 7; /// When this invoice was settled - int64 settle_date = 8 [json_name = "settle_date"]; + int64 settle_date = 8; /** - A bare-bones invoice for a payment within the Lightning Network. With the + A bare-bones invoice for a payment within the Lightning Network. With the details of the invoice, the sender has all the data necessary to send a payment to the recipient. */ - string payment_request = 9 [json_name = "payment_request"]; + string payment_request = 9; /** Hash (SHA-256) of a description of the payment. Used if the description of payment (memo) is too long to naturally fit within the description field - of an encoded payment request. + of an encoded payment request. When using REST, this field must be encoded + as base64. */ - bytes description_hash = 10 [json_name = "description_hash"]; + bytes description_hash = 10; /// Payment request expiry time in seconds. Default is 3600 (1 hour). - int64 expiry = 11 [json_name = "expiry"]; + int64 expiry = 11; /// Fallback on-chain address. - string fallback_addr = 12 [json_name = "fallback_addr"]; + string fallback_addr = 12; /// Delta to use for the time-lock of the CLTV extended to the final hop. - uint64 cltv_expiry = 13 [json_name = "cltv_expiry"]; + uint64 cltv_expiry = 13; /** Route hints that can each be individually used to assist in reaching the invoice's destination. */ - repeated RouteHint route_hints = 14 [json_name = "route_hints"]; + repeated RouteHint route_hints = 14; /// Whether this invoice should include routing hints for private channels. - bool private = 15 [json_name = "private"]; + bool private = 15; /** The "add" index of this invoice. Each newly created invoice will increment @@ -2144,7 +2865,7 @@ message Invoice { SubscribeInvoices call can use this to instantly get notified of all added invoices with an add_index greater than this one. */ - uint64 add_index = 16 [json_name = "add_index"]; + uint64 add_index = 16; /** The "settle" index of this invoice. Each newly settled invoice will @@ -2152,10 +2873,10 @@ message Invoice { SubscribeInvoices call can use this to instantly get notified of all settled invoices with an settle_index greater than this one. */ - uint64 settle_index = 17 [json_name = "settle_index"]; + uint64 settle_index = 17; /// Deprecated, use amt_paid_sat or amt_paid_msat. - int64 amt_paid = 18 [json_name = "amt_paid", deprecated = true]; + int64 amt_paid = 18 [deprecated = true]; /** The amount that was accepted for this invoice, in satoshis. This will ONLY @@ -2165,7 +2886,7 @@ message Invoice { MORE that was specified in the original invoice. So we'll record that here as well. */ - int64 amt_paid_sat = 19 [json_name = "amt_paid_sat"]; + int64 amt_paid_sat = 19; /** The amount that was accepted for this invoice, in millisatoshis. This will @@ -2175,7 +2896,7 @@ message Invoice { paid MORE that was specified in the original invoice. So we'll record that here as well. */ - int64 amt_paid_msat = 20 [json_name = "amt_paid_msat"]; + int64 amt_paid_msat = 20; enum InvoiceState { OPEN = 0; @@ -2187,10 +2908,19 @@ message Invoice { /** The state the invoice is in. */ - InvoiceState state = 21 [json_name = "state"]; + InvoiceState state = 21; /// List of HTLCs paying to this invoice [EXPERIMENTAL]. - repeated InvoiceHTLC htlcs = 22 [json_name = "htlcs"]; + repeated InvoiceHTLC htlcs = 22; + + /// List of features advertised on the invoice. + map features = 24; + + /** + Indicates if this invoice was a spontaneous payment that arrived via keysend + [EXPERIMENTAL]. + */ + bool is_keysend = 25; } enum InvoiceHTLCState { @@ -2202,39 +2932,45 @@ enum InvoiceHTLCState { /// Details of an HTLC that paid to an invoice message InvoiceHTLC { /// Short channel id over which the htlc was received. - uint64 chan_id = 1 [json_name = "chan_id"]; + uint64 chan_id = 1 [jstype = JS_STRING]; /// Index identifying the htlc on the channel. - uint64 htlc_index = 2 [json_name = "htlc_index"]; + uint64 htlc_index = 2; /// The amount of the htlc in msat. - uint64 amt_msat = 3 [json_name = "amt_msat"]; + uint64 amt_msat = 3; /// Block height at which this htlc was accepted. - int32 accept_height = 4 [json_name = "accept_height"]; + int32 accept_height = 4; /// Time at which this htlc was accepted. - int64 accept_time = 5 [json_name = "accept_time"]; + int64 accept_time = 5; /// Time at which this htlc was settled or canceled. - int64 resolve_time = 6 [json_name = "resolve_time"]; - + int64 resolve_time = 6; + /// Block height at which this htlc expires. - int32 expiry_height = 7 [json_name = "expiry_height"]; + int32 expiry_height = 7; /// Current state the htlc is in. - InvoiceHTLCState state = 8 [json_name = "state"]; + InvoiceHTLCState state = 8; + + /// Custom tlv records. + map custom_records = 9; + + /// The total amount of the mpp payment in msat. + uint64 mpp_total_amt_msat = 10; } message AddInvoiceResponse { - bytes r_hash = 1 [json_name = "r_hash"]; + bytes r_hash = 1; /** - A bare-bones invoice for a payment within the Lightning Network. With the + A bare-bones invoice for a payment within the Lightning Network. With the details of the invoice, the sender has all the data necessary to send a payment to the recipient. */ - string payment_request = 2 [json_name = "payment_request"]; + string payment_request = 2; /** The "add" index of this invoice. Each newly created invoice will increment @@ -2242,56 +2978,64 @@ message AddInvoiceResponse { SubscribeInvoices call can use this to instantly get notified of all added invoices with an add_index greater than this one. */ - uint64 add_index = 16 [json_name = "add_index"]; + uint64 add_index = 16; } message PaymentHash { /** The hex-encoded payment hash of the invoice to be looked up. The passed payment hash must be exactly 32 bytes, otherwise an error is returned. + Deprecated now that the REST gateway supports base64 encoding of bytes + fields. */ - string r_hash_str = 1 [json_name = "r_hash_str"]; + string r_hash_str = 1 [deprecated = true]; - /// The payment hash of the invoice to be looked up. - bytes r_hash = 2 [json_name = "r_hash"]; + /** + The payment hash of the invoice to be looked up. When using REST, this field + must be encoded as base64. + */ + bytes r_hash = 2; } message ListInvoiceRequest { - /// If set, only unsettled invoices will be returned in the response. - bool pending_only = 1 [json_name = "pending_only"]; + /** + If set, only invoices that are not settled and not canceled will be returned + in the response. + */ + bool pending_only = 1; /** The index of an invoice that will be used as either the start or end of a query to determine which invoices should be returned in the response. */ - uint64 index_offset = 4 [json_name = "index_offset"]; + uint64 index_offset = 4; /// The max number of invoices to return in the response to this query. - uint64 num_max_invoices = 5 [json_name = "num_max_invoices"]; + uint64 num_max_invoices = 5; /** If set, the invoices returned will result from seeking backwards from the specified index offset. This can be used to paginate backwards. */ - bool reversed = 6 [json_name = "reversed"]; + bool reversed = 6; } message ListInvoiceResponse { /** A list of invoices from the time slice of the time series specified in the request. */ - repeated Invoice invoices = 1 [json_name = "invoices"]; + repeated Invoice invoices = 1; /** The index of the last item in the set of returned invoices. This can be used to seek further, pagination style. */ - uint64 last_index_offset = 2 [json_name = "last_index_offset"]; + uint64 last_index_offset = 2; /** The index of the last item in the set of returned invoices. This can be used to seek backwards, pagination style. */ - uint64 first_index_offset = 3 [json_name = "first_index_offset"]; + uint64 first_index_offset = 3; } message InvoiceSubscription { @@ -2301,7 +3045,7 @@ message InvoiceSubscription { value. This allows callers to catch up on any events they missed while they weren't connected to the streaming RPC. */ - uint64 add_index = 1 [json_name = "add_index"]; + uint64 add_index = 1; /** If specified (non-zero), then we'll first start by sending out @@ -2309,37 +3053,69 @@ message InvoiceSubscription { this value. This allows callers to catch up on any events they missed while they weren't connected to the streaming RPC. */ - uint64 settle_index = 2 [json_name = "settle_index"]; + uint64 settle_index = 2; } +enum PaymentFailureReason { + /** + Payment isn't failed (yet). + */ + FAILURE_REASON_NONE = 0; + + /** + There are more routes to try, but the payment timeout was exceeded. + */ + FAILURE_REASON_TIMEOUT = 1; + + /** + All possible routes were tried and failed permanently. Or were no + routes to the destination at all. + */ + FAILURE_REASON_NO_ROUTE = 2; + + /** + A non-recoverable error has occured. + */ + FAILURE_REASON_ERROR = 3; + + /** + Payment details incorrect (unknown hash, invalid amt or + invalid final cltv delta) + */ + FAILURE_REASON_INCORRECT_PAYMENT_DETAILS = 4; + + /** + Insufficient local balance. + */ + FAILURE_REASON_INSUFFICIENT_BALANCE = 5; +} message Payment { /// The payment hash - string payment_hash = 1 [json_name = "payment_hash"]; + string payment_hash = 1; /// Deprecated, use value_sat or value_msat. - int64 value = 2 [json_name = "value", deprecated = true]; + int64 value = 2 [deprecated = true]; - /// The date of this payment - int64 creation_date = 3 [json_name = "creation_date"]; + /// Deprecated, use creation_time_ns + int64 creation_date = 3 [deprecated = true]; - /// The path this payment took - repeated string path = 4 [ json_name = "path" ]; + reserved 4; /// Deprecated, use fee_sat or fee_msat. - int64 fee = 5 [json_name = "fee", deprecated = true]; + int64 fee = 5 [deprecated = true]; /// The payment preimage - string payment_preimage = 6 [json_name = "payment_preimage"]; + string payment_preimage = 6; /// The value of the payment in satoshis - int64 value_sat = 7 [json_name = "value_sat"]; + int64 value_sat = 7; /// The value of the payment in milli-satoshis - int64 value_msat = 8 [json_name = "value_msat"]; + int64 value_msat = 8; /// The optional payment request being fulfilled. - string payment_request = 9 [json_name = "payment_request"]; + string payment_request = 9; enum PaymentStatus { UNKNOWN = 0; @@ -2349,27 +3125,100 @@ message Payment { } // The status of the payment. - PaymentStatus status = 10 [json_name = "status"]; + PaymentStatus status = 10; /// The fee paid for this payment in satoshis - int64 fee_sat = 11 [json_name = "fee_sat"]; + int64 fee_sat = 11; /// The fee paid for this payment in milli-satoshis - int64 fee_msat = 12 [json_name = "fee_msat"]; + int64 fee_msat = 12; + + /// The time in UNIX nanoseconds at which the payment was created. + int64 creation_time_ns = 13; + + /// The HTLCs made in attempt to settle the payment. + repeated HTLCAttempt htlcs = 14; + + /** + The creation index of this payment. Each payment can be uniquely identified + by this index, which may not strictly increment by 1 for payments made in + older versions of lnd. + */ + uint64 payment_index = 15; + + PaymentFailureReason failure_reason = 16; +} + +message HTLCAttempt { + enum HTLCStatus { + IN_FLIGHT = 0; + SUCCEEDED = 1; + FAILED = 2; + } + + /// The status of the HTLC. + HTLCStatus status = 1; + + /// The route taken by this HTLC. + Route route = 2; + + /// The time in UNIX nanoseconds at which this HTLC was sent. + int64 attempt_time_ns = 3; + + /** + The time in UNIX nanoseconds at which this HTLC was settled or failed. + This value will not be set if the HTLC is still IN_FLIGHT. + */ + int64 resolve_time_ns = 4; + + // Detailed htlc failure info. + Failure failure = 5; } message ListPaymentsRequest { /** If true, then return payments that have not yet fully completed. This means that pending payments, as well as failed payments will show up if this - field is set to True. + field is set to true. This flag doesn't change the meaning of the indices, + which are tied to individual payments. */ bool include_incomplete = 1; + + /** + The index of a payment that will be used as either the start or end of a + query to determine which payments should be returned in the response. The + index_offset is exclusive. In the case of a zero index_offset, the query + will start with the oldest payment when paginating forwards, or will end + with the most recent payment when paginating backwards. + */ + uint64 index_offset = 2; + + /// The maximal number of payments returned in the response to this query. + uint64 max_payments = 3; + + /** + If set, the payments returned will result from seeking backwards from the + specified index offset. This can be used to paginate backwards. The order + of the returned payments is always oldest first (ascending index order). + */ + bool reversed = 4; } message ListPaymentsResponse { /// The list of payments - repeated Payment payments = 1 [json_name = "payments"]; + repeated Payment payments = 1; + + /** + The index of the first item in the set of returned payments. This can be + used as the index_offset to continue seeking backwards in the next request. + */ + uint64 first_index_offset = 2; + + /** + The index of the last item in the set of returned payments. This can be used + as the index_offset to continue seeking forwards in the next request. + */ + uint64 last_index_offset = 3; } message DeleteAllPaymentsRequest { @@ -2385,13 +3234,12 @@ message AbandonChannelRequest { message AbandonChannelResponse { } - message DebugLevelRequest { bool show = 1; string level_spec = 2; } message DebugLevelResponse { - string sub_systems = 1 [json_name = "sub_systems"]; + string sub_systems = 1; } message PayReqString { @@ -2399,115 +3247,183 @@ message PayReqString { string pay_req = 1; } message PayReq { - string destination = 1 [json_name = "destination"]; - string payment_hash = 2 [json_name = "payment_hash"]; - int64 num_satoshis = 3 [json_name = "num_satoshis"]; - int64 timestamp = 4 [json_name = "timestamp"]; - int64 expiry = 5 [json_name = "expiry"]; - string description = 6 [json_name = "description"]; - string description_hash = 7 [json_name = "description_hash"]; - string fallback_addr = 8 [json_name = "fallback_addr"]; - int64 cltv_expiry = 9 [json_name = "cltv_expiry"]; - repeated RouteHint route_hints = 10 [json_name = "route_hints"]; -} - -message FeeReportRequest {} + string destination = 1; + string payment_hash = 2; + int64 num_satoshis = 3; + int64 timestamp = 4; + int64 expiry = 5; + string description = 6; + string description_hash = 7; + string fallback_addr = 8; + int64 cltv_expiry = 9; + repeated RouteHint route_hints = 10; + bytes payment_addr = 11; + int64 num_msat = 12; + map features = 13; +} + +enum FeatureBit { + DATALOSS_PROTECT_REQ = 0; + DATALOSS_PROTECT_OPT = 1; + INITIAL_ROUING_SYNC = 3; + UPFRONT_SHUTDOWN_SCRIPT_REQ = 4; + UPFRONT_SHUTDOWN_SCRIPT_OPT = 5; + GOSSIP_QUERIES_REQ = 6; + GOSSIP_QUERIES_OPT = 7; + TLV_ONION_REQ = 8; + TLV_ONION_OPT = 9; + EXT_GOSSIP_QUERIES_REQ = 10; + EXT_GOSSIP_QUERIES_OPT = 11; + STATIC_REMOTE_KEY_REQ = 12; + STATIC_REMOTE_KEY_OPT = 13; + PAYMENT_ADDR_REQ = 14; + PAYMENT_ADDR_OPT = 15; + MPP_REQ = 16; + MPP_OPT = 17; +} + +message Feature { + string name = 2; + bool is_required = 3; + bool is_known = 4; +} + +message FeeReportRequest { +} message ChannelFeeReport { + /// The short channel id that this fee report belongs to. + uint64 chan_id = 5 [jstype = JS_STRING]; + /// The channel that this fee report belongs to. - string chan_point = 1 [json_name = "channel_point"]; + string channel_point = 1; /// The base fee charged regardless of the number of milli-satoshis sent. - int64 base_fee_msat = 2 [json_name = "base_fee_msat"]; + int64 base_fee_msat = 2; - /// The amount charged per milli-satoshis transferred expressed in millionths of a satoshi. - int64 fee_per_mil = 3 [json_name = "fee_per_mil"]; + /// The amount charged per milli-satoshis transferred expressed in + /// millionths of a satoshi. + int64 fee_per_mil = 3; - /// The effective fee rate in milli-satoshis. Computed by dividing the fee_per_mil value by 1 million. - double fee_rate = 4 [json_name = "fee_rate"]; + /// The effective fee rate in milli-satoshis. Computed by dividing the + /// fee_per_mil value by 1 million. + double fee_rate = 4; } message FeeReportResponse { - /// An array of channel fee reports which describes the current fee schedule for each channel. - repeated ChannelFeeReport channel_fees = 1 [json_name = "channel_fees"]; + /// An array of channel fee reports which describes the current fee schedule + /// for each channel. + repeated ChannelFeeReport channel_fees = 1; - /// The total amount of fee revenue (in satoshis) the switch has collected over the past 24 hrs. - uint64 day_fee_sum = 2 [json_name = "day_fee_sum"]; + /// The total amount of fee revenue (in satoshis) the switch has collected + /// over the past 24 hrs. + uint64 day_fee_sum = 2; - /// The total amount of fee revenue (in satoshis) the switch has collected over the past 1 week. - uint64 week_fee_sum = 3 [json_name = "week_fee_sum"]; + /// The total amount of fee revenue (in satoshis) the switch has collected + /// over the past 1 week. + uint64 week_fee_sum = 3; - /// The total amount of fee revenue (in satoshis) the switch has collected over the past 1 month. - uint64 month_fee_sum = 4 [json_name = "month_fee_sum"]; + /// The total amount of fee revenue (in satoshis) the switch has collected + /// over the past 1 month. + uint64 month_fee_sum = 4; } message PolicyUpdateRequest { oneof scope { /// If set, then this update applies to all currently active channels. - bool global = 1 [json_name = "global"] ; + bool global = 1; /// If set, this update will target a specific channel. - ChannelPoint chan_point = 2 [json_name = "chan_point"]; + ChannelPoint chan_point = 2; } /// The base fee charged regardless of the number of milli-satoshis sent. - int64 base_fee_msat = 3 [json_name = "base_fee_msat"]; + int64 base_fee_msat = 3; - /// The effective fee rate in milli-satoshis. The precision of this value goes up to 6 decimal places, so 1e-6. - double fee_rate = 4 [json_name = "fee_rate"]; + /// The effective fee rate in milli-satoshis. The precision of this value + /// goes up to 6 decimal places, so 1e-6. + double fee_rate = 4; /// The required timelock delta for HTLCs forwarded over the channel. - uint32 time_lock_delta = 5 [json_name = "time_lock_delta"]; + uint32 time_lock_delta = 5; - /// If set, the maximum HTLC size in milli-satoshis. If unset, the maximum HTLC will be unchanged. - uint64 max_htlc_msat = 6 [json_name = "max_htlc_msat"]; + /// If set, the maximum HTLC size in milli-satoshis. If unset, the maximum + /// HTLC will be unchanged. + uint64 max_htlc_msat = 6; + + /// The minimum HTLC size in milli-satoshis. Only applied if + /// min_htlc_msat_specified is true. + uint64 min_htlc_msat = 7; + + /// If true, min_htlc_msat is applied. + bool min_htlc_msat_specified = 8; } message PolicyUpdateResponse { } message ForwardingHistoryRequest { - /// Start time is the starting point of the forwarding history request. All records beyond this point will be included, respecting the end time, and the index offset. - uint64 start_time = 1 [json_name = "start_time"]; + /// Start time is the starting point of the forwarding history request. All + /// records beyond this point will be included, respecting the end time, and + /// the index offset. + uint64 start_time = 1; - /// End time is the end point of the forwarding history request. The response will carry at most 50k records between the start time and the end time. The index offset can be used to implement pagination. - uint64 end_time = 2 [json_name = "end_time"]; + /// End time is the end point of the forwarding history request. The + /// response will carry at most 50k records between the start time and the + /// end time. The index offset can be used to implement pagination. + uint64 end_time = 2; - /// Index offset is the offset in the time series to start at. As each response can only contain 50k records, callers can use this to skip around within a packed time series. - uint32 index_offset = 3 [json_name = "index_offset"]; + /// Index offset is the offset in the time series to start at. As each + /// response can only contain 50k records, callers can use this to skip + /// around within a packed time series. + uint32 index_offset = 3; /// The max number of events to return in the response to this query. - uint32 num_max_events = 4 [json_name = "num_max_events"]; + uint32 num_max_events = 4; } message ForwardingEvent { - /// Timestamp is the time (unix epoch offset) that this circuit was completed. - uint64 timestamp = 1 [json_name = "timestamp"]; + /// Timestamp is the time (unix epoch offset) that this circuit was + /// completed. + uint64 timestamp = 1; /// The incoming channel ID that carried the HTLC that created the circuit. - uint64 chan_id_in = 2 [json_name = "chan_id_in"]; + uint64 chan_id_in = 2 [jstype = JS_STRING]; - /// The outgoing channel ID that carried the preimage that completed the circuit. - uint64 chan_id_out = 4 [json_name = "chan_id_out"]; + /// The outgoing channel ID that carried the preimage that completed the + /// circuit. + uint64 chan_id_out = 4 [jstype = JS_STRING]; - /// The total amount (in satoshis) of the incoming HTLC that created half the circuit. - uint64 amt_in = 5 [json_name = "amt_in"]; + /// The total amount (in satoshis) of the incoming HTLC that created half + /// the circuit. + uint64 amt_in = 5; - /// The total amount (in satoshis) of the outgoing HTLC that created the second half of the circuit. - uint64 amt_out = 6 [json_name = "amt_out"]; + /// The total amount (in satoshis) of the outgoing HTLC that created the + /// second half of the circuit. + uint64 amt_out = 6; /// The total fee (in satoshis) that this payment circuit carried. - uint64 fee = 7 [json_name = "fee"]; + uint64 fee = 7; /// The total fee (in milli-satoshis) that this payment circuit carried. - uint64 fee_msat = 8 [json_name = "fee_msat"]; + uint64 fee_msat = 8; + + /// The total amount (in milli-satoshis) of the incoming HTLC that created + /// half the circuit. + uint64 amt_in_msat = 9; + + /// The total amount (in milli-satoshis) of the outgoing HTLC that created + /// the second half of the circuit. + uint64 amt_out_msat = 10; // TODO(roasbeef): add settlement latency? // * use FPE on the chan id? // * also list failures? } message ForwardingHistoryResponse { - /// A list of forwarding events from the time slice of the time series specified in the request. - repeated ForwardingEvent forwarding_events = 1 [json_name = "forwarding_events"]; + /// A list of forwarding events from the time slice of the time series + /// specified in the request. + repeated ForwardingEvent forwarding_events = 1; - /// The index of the last time in the set of returned forwarding events. Can be used to seek further, pagination style. - uint32 last_offset_index = 2 [json_name = "last_offset_index"]; + /// The index of the last time in the set of returned forwarding events. Can + /// be used to seek further, pagination style. + uint32 last_offset_index = 2; } message ExportChannelBackupRequest { @@ -2519,62 +3435,253 @@ message ChannelBackup { /** Identifies the channel that this backup belongs to. */ - ChannelPoint chan_point = 1 [ json_name = "chan_point" ]; + ChannelPoint chan_point = 1; /** Is an encrypted single-chan backup. this can be passed to RestoreChannelBackups, or the WalletUnlocker Init and Unlock methods in - order to trigger the recovery protocol. + order to trigger the recovery protocol. When using REST, this field must be + encoded as base64. */ - bytes chan_backup = 2 [ json_name = "chan_backup" ]; + bytes chan_backup = 2; } message MultiChanBackup { /** Is the set of all channels that are included in this multi-channel backup. */ - repeated ChannelPoint chan_points = 1 [ json_name = "chan_points" ]; + repeated ChannelPoint chan_points = 1; /** A single encrypted blob containing all the static channel backups of the channel listed above. This can be stored as a single file or blob, and - safely be replaced with any prior/future versions. + safely be replaced with any prior/future versions. When using REST, this + field must be encoded as base64. */ - bytes multi_chan_backup = 2 [ json_name = "multi_chan_backup" ]; + bytes multi_chan_backup = 2; } -message ChanBackupExportRequest {} -message ChanBackupSnapshot { +message ChanBackupExportRequest { +} +message ChanBackupSnapshot { /** The set of new channels that have been added since the last channel backup snapshot was requested. */ - ChannelBackups single_chan_backups = 1 [ json_name = "single_chan_backups" ]; + ChannelBackups single_chan_backups = 1; /** A multi-channel backup that covers all open channels currently known to lnd. */ - MultiChanBackup multi_chan_backup = 2 [ json_name = "multi_chan_backup" ]; + MultiChanBackup multi_chan_backup = 2; } message ChannelBackups { /** A set of single-chan static channel backups. */ - repeated ChannelBackup chan_backups = 1 [ json_name = "chan_backups" ]; + repeated ChannelBackup chan_backups = 1; } message RestoreChanBackupRequest { oneof backup { - ChannelBackups chan_backups = 1 [ json_name = "chan_backups" ]; + /** + The channels to restore as a list of channel/backup pairs. + */ + ChannelBackups chan_backups = 1; - bytes multi_chan_backup = 2 [ json_name = "multi_chan_backup" ]; + /** + The channels to restore in the packed multi backup format. When using + REST, this field must be encoded as base64. + */ + bytes multi_chan_backup = 2; } } -message RestoreBackupResponse {} +message RestoreBackupResponse { +} -message ChannelBackupSubscription {} +message ChannelBackupSubscription { +} message VerifyChanBackupResponse { } + +message MacaroonPermission { + /// The entity a permission grants access to. + string entity = 1; + + /// The action that is granted. + string action = 2; +} +message BakeMacaroonRequest { + /// The list of permissions the new macaroon should grant. + repeated MacaroonPermission permissions = 1; +} +message BakeMacaroonResponse { + /// The hex encoded macaroon, serialized in binary format. + string macaroon = 1; +} + +message Failure { + enum FailureCode { + /** + The numbers assigned in this enumeration match the failure codes as + defined in BOLT #4. Because protobuf 3 requires enums to start with 0, + a RESERVED value is added. + */ + RESERVED = 0; + + INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS = 1; + INCORRECT_PAYMENT_AMOUNT = 2; + FINAL_INCORRECT_CLTV_EXPIRY = 3; + FINAL_INCORRECT_HTLC_AMOUNT = 4; + FINAL_EXPIRY_TOO_SOON = 5; + INVALID_REALM = 6; + EXPIRY_TOO_SOON = 7; + INVALID_ONION_VERSION = 8; + INVALID_ONION_HMAC = 9; + INVALID_ONION_KEY = 10; + AMOUNT_BELOW_MINIMUM = 11; + FEE_INSUFFICIENT = 12; + INCORRECT_CLTV_EXPIRY = 13; + CHANNEL_DISABLED = 14; + TEMPORARY_CHANNEL_FAILURE = 15; + REQUIRED_NODE_FEATURE_MISSING = 16; + REQUIRED_CHANNEL_FEATURE_MISSING = 17; + UNKNOWN_NEXT_PEER = 18; + TEMPORARY_NODE_FAILURE = 19; + PERMANENT_NODE_FAILURE = 20; + PERMANENT_CHANNEL_FAILURE = 21; + EXPIRY_TOO_FAR = 22; + MPP_TIMEOUT = 23; + + /** + An internal error occurred. + */ + INTERNAL_FAILURE = 997; + + /** + The error source is known, but the failure itself couldn't be decoded. + */ + UNKNOWN_FAILURE = 998; + + /** + An unreadable failure result is returned if the received failure message + cannot be decrypted. In that case the error source is unknown. + */ + UNREADABLE_FAILURE = 999; + } + + /// Failure code as defined in the Lightning spec + FailureCode code = 1; + + reserved 2; + + /// An optional channel update message. + ChannelUpdate channel_update = 3; + + /// A failure type-dependent htlc value. + uint64 htlc_msat = 4; + + /// The sha256 sum of the onion payload. + bytes onion_sha_256 = 5; + + /// A failure type-dependent cltv expiry value. + uint32 cltv_expiry = 6; + + /// A failure type-dependent flags value. + uint32 flags = 7; + + /** + The position in the path of the intermediate or final node that generated + the failure message. Position zero is the sender node. + **/ + uint32 failure_source_index = 8; + + /// A failure type-dependent block height. + uint32 height = 9; +} + +message ChannelUpdate { + /** + The signature that validates the announced data and proves the ownership + of node id. + */ + bytes signature = 1; + + /** + The target chain that this channel was opened within. This value + should be the genesis hash of the target chain. Along with the short + channel ID, this uniquely identifies the channel globally in a + blockchain. + */ + bytes chain_hash = 2; + + /** + The unique description of the funding transaction. + */ + uint64 chan_id = 3 [jstype = JS_STRING]; + + /** + A timestamp that allows ordering in the case of multiple announcements. + We should ignore the message if timestamp is not greater than the + last-received. + */ + uint32 timestamp = 4; + + /** + The bitfield that describes whether optional fields are present in this + update. Currently, the least-significant bit must be set to 1 if the + optional field MaxHtlc is present. + */ + uint32 message_flags = 10; + + /** + The bitfield that describes additional meta-data concerning how the + update is to be interpreted. Currently, the least-significant bit must be + set to 0 if the creating node corresponds to the first node in the + previously sent channel announcement and 1 otherwise. If the second bit + is set, then the channel is set to be disabled. + */ + uint32 channel_flags = 5; + + /** + The minimum number of blocks this node requires to be added to the expiry + of HTLCs. This is a security parameter determined by the node operator. + This value represents the required gap between the time locks of the + incoming and outgoing HTLC's set to this node. + */ + uint32 time_lock_delta = 6; + + /** + The minimum HTLC value which will be accepted. + */ + uint64 htlc_minimum_msat = 7; + + /** + The base fee that must be used for incoming HTLC's to this particular + channel. This value will be tacked onto the required for a payment + independent of the size of the payment. + */ + uint32 base_fee = 8; + + /** + The fee rate that will be charged per millionth of a satoshi. + */ + uint32 fee_rate = 9; + + /** + The maximum HTLC value which will be accepted. + */ + uint64 htlc_maximum_msat = 11; + + /** + The set of data that was appended to this message, some of which we may + not actually know how to iterate or parse. By holding onto this data, we + ensure that we're able to properly validate the set of signatures that + cover these new fields, and ensure we're able to make upgrades to the + network in a forwards compatible manner. + */ + bytes extra_opaque_data = 12; +} diff --git a/lnrpc/rpc.swagger.json b/lnrpc/rpc.swagger.json index 983469bb43..189fb6f0e3 100644 --- a/lnrpc/rpc.swagger.json +++ b/lnrpc/rpc.swagger.json @@ -21,7 +21,7 @@ "operationId": "WalletBalance", "responses": { "200": { - "description": "", + "description": "A successful response.", "schema": { "$ref": "#/definitions/lnrpcWalletBalanceResponse" } @@ -38,7 +38,7 @@ "operationId": "ChannelBalance", "responses": { "200": { - "description": "", + "description": "A successful response.", "schema": { "$ref": "#/definitions/lnrpcChannelBalanceResponse" } @@ -55,7 +55,7 @@ "operationId": "ChangePassword", "responses": { "200": { - "description": "", + "description": "A successful response.", "schema": { "$ref": "#/definitions/lnrpcChangePasswordResponse" } @@ -82,7 +82,7 @@ "operationId": "ListChannels", "responses": { "200": { - "description": "", + "description": "A successful response.", "schema": { "$ref": "#/definitions/lnrpcListChannelsResponse" } @@ -116,6 +116,14 @@ "required": false, "type": "boolean", "format": "boolean" + }, + { + "name": "peer", + "description": "*\nFilters the response for channels with a target peer's pubkey. If peer is\nempty, all channels will be returned.", + "in": "query", + "required": false, + "type": "string", + "format": "byte" } ], "tags": [ @@ -127,7 +135,7 @@ "operationId": "OpenChannelSync", "responses": { "200": { - "description": "", + "description": "A successful response.", "schema": { "$ref": "#/definitions/lnrpcChannelPoint" } @@ -154,7 +162,7 @@ "operationId": "AbandonChannel", "responses": { "200": { - "description": "", + "description": "A successful response.", "schema": { "$ref": "#/definitions/lnrpcAbandonChannelResponse" } @@ -163,16 +171,26 @@ "parameters": [ { "name": "channel_point.funding_txid_str", + "description": "*\nHex-encoded string representing the byte-reversed hash of the funding\ntransaction.", "in": "path", "required": true, "type": "string" }, { "name": "channel_point.output_index", + "description": "/ The index of the output of the funding transaction", "in": "path", "required": true, "type": "integer", "format": "int64" + }, + { + "name": "channel_point.funding_txid_bytes", + "description": "*\nTxid of the funding transaction. When using REST, this field must be\nencoded as base64.", + "in": "query", + "required": false, + "type": "string", + "format": "byte" } ], "tags": [ @@ -186,7 +204,7 @@ "operationId": "ExportAllChannelBackups", "responses": { "200": { - "description": "", + "description": "A successful response.", "schema": { "$ref": "#/definitions/lnrpcChanBackupSnapshot" } @@ -203,7 +221,7 @@ "operationId": "RestoreChannelBackups", "responses": { "200": { - "description": "", + "description": "A successful response.", "schema": { "$ref": "#/definitions/lnrpcRestoreBackupResponse" } @@ -230,7 +248,7 @@ "operationId": "VerifyChanBackup", "responses": { "200": { - "description": "", + "description": "A successful response.", "schema": { "$ref": "#/definitions/lnrpcVerifyChanBackupResponse" } @@ -257,7 +275,7 @@ "operationId": "ExportChannelBackup", "responses": { "200": { - "description": "", + "description": "A successful response.", "schema": { "$ref": "#/definitions/lnrpcChannelBackup" } @@ -266,12 +284,14 @@ "parameters": [ { "name": "chan_point.funding_txid_str", + "description": "*\nHex-encoded string representing the byte-reversed hash of the funding\ntransaction.", "in": "path", "required": true, "type": "string" }, { "name": "chan_point.output_index", + "description": "/ The index of the output of the funding transaction", "in": "path", "required": true, "type": "integer", @@ -279,7 +299,7 @@ }, { "name": "chan_point.funding_txid_bytes", - "description": "/ Txid of the funding transaction.", + "description": "*\nTxid of the funding transaction. When using REST, this field must be\nencoded as base64.", "in": "query", "required": false, "type": "string", @@ -297,7 +317,7 @@ "operationId": "ClosedChannels", "responses": { "200": { - "description": "", + "description": "A successful response.", "schema": { "$ref": "#/definitions/lnrpcClosedChannelsResponse" } @@ -358,7 +378,7 @@ "operationId": "PendingChannels", "responses": { "200": { - "description": "", + "description": "A successful response.", "schema": { "$ref": "#/definitions/lnrpcPendingChannelsResponse" } @@ -375,7 +395,7 @@ "operationId": "SendPaymentSync", "responses": { "200": { - "description": "", + "description": "A successful response.", "schema": { "$ref": "#/definitions/lnrpcSendResponse" } @@ -402,7 +422,7 @@ "operationId": "SendToRouteSync", "responses": { "200": { - "description": "", + "description": "A successful response.", "schema": { "$ref": "#/definitions/lnrpcSendResponse" } @@ -429,25 +449,66 @@ "operationId": "CloseChannel", "responses": { "200": { - "description": "(streaming responses)", + "description": "A successful response.(streaming responses)", "schema": { - "$ref": "#/definitions/lnrpcCloseStatusUpdate" + "$ref": "#/x-stream-definitions/lnrpcCloseStatusUpdate" } } }, "parameters": [ { "name": "channel_point.funding_txid_str", + "description": "*\nHex-encoded string representing the byte-reversed hash of the funding\ntransaction.", "in": "path", "required": true, "type": "string" }, { "name": "channel_point.output_index", + "description": "/ The index of the output of the funding transaction", "in": "path", "required": true, "type": "integer", "format": "int64" + }, + { + "name": "channel_point.funding_txid_bytes", + "description": "*\nTxid of the funding transaction. When using REST, this field must be\nencoded as base64.", + "in": "query", + "required": false, + "type": "string", + "format": "byte" + }, + { + "name": "force", + "description": "/ If true, then the channel will be closed forcibly. This means the\n/ current commitment transaction will be signed and broadcast.", + "in": "query", + "required": false, + "type": "boolean", + "format": "boolean" + }, + { + "name": "target_conf", + "description": "/ The target number of blocks that the closure transaction should be\n/ confirmed by.", + "in": "query", + "required": false, + "type": "integer", + "format": "int32" + }, + { + "name": "sat_per_byte", + "description": "/ A manual fee rate set in sat/byte that should be used when crafting the\n/ closure transaction.", + "in": "query", + "required": false, + "type": "string", + "format": "int64" + }, + { + "name": "delivery_address", + "description": "An optional address to send funds to in the case of a cooperative close.\nIf the channel was opened with an upfront shutdown script and this field\nis set, the request to close will fail because the channel must pay out\nto the upfront shutdown addresss.", + "in": "query", + "required": false, + "type": "string" } ], "tags": [ @@ -461,7 +522,7 @@ "operationId": "UpdateChannelPolicy", "responses": { "200": { - "description": "", + "description": "A successful response.", "schema": { "$ref": "#/definitions/lnrpcPolicyUpdateResponse" } @@ -488,7 +549,7 @@ "operationId": "FeeReport", "responses": { "200": { - "description": "", + "description": "A successful response.", "schema": { "$ref": "#/definitions/lnrpcFeeReportResponse" } @@ -506,7 +567,7 @@ "operationId": "GenSeed", "responses": { "200": { - "description": "", + "description": "A successful response.", "schema": { "$ref": "#/definitions/lnrpcGenSeedResponse" } @@ -515,7 +576,7 @@ "parameters": [ { "name": "aezeed_passphrase", - "description": "*\naezeed_passphrase is an optional user provided passphrase that will be used\nto encrypt the generated aezeed cipher seed.", + "description": "*\naezeed_passphrase is an optional user provided passphrase that will be used\nto encrypt the generated aezeed cipher seed. When using REST, this field\nmust be encoded as base64.", "in": "query", "required": false, "type": "string", @@ -523,7 +584,7 @@ }, { "name": "seed_entropy", - "description": "*\nseed_entropy is an optional 16-bytes generated via CSPRNG. If not\nspecified, then a fresh set of randomness will be used to create the seed.", + "description": "*\nseed_entropy is an optional 16-bytes generated via CSPRNG. If not\nspecified, then a fresh set of randomness will be used to create the seed.\nWhen using REST, this field must be encoded as base64.", "in": "query", "required": false, "type": "string", @@ -541,7 +602,7 @@ "operationId": "GetInfo", "responses": { "200": { - "description": "", + "description": "A successful response.", "schema": { "$ref": "#/definitions/lnrpcGetInfoResponse" } @@ -554,11 +615,11 @@ }, "/v1/graph": { "get": { - "summary": "* lncli: `describegraph`\nDescribeGraph returns a description of the latest graph state from the\npoint of view of the node. The graph information is partitioned into two\ncomponents: all the nodes/vertexes, and all the edges that connect the\nvertexes themselves. As this is a directed graph, the edges also contain\nthe node directional specific routing policy which includes: the time lock\ndelta, fee information, etc.", + "summary": "* lncli: `describegraph`\nDescribeGraph returns a description of the latest graph state from the\npoint of view of the node. The graph information is partitioned into two\ncomponents: all the nodes/vertexes, and all the edges that connect the\nvertexes themselves. As this is a directed graph, the edges also contain\nthe node directional specific routing policy which includes: the time lock\ndelta, fee information, etc.", "operationId": "DescribeGraph", "responses": { "200": { - "description": "", + "description": "A successful response.", "schema": { "$ref": "#/definitions/lnrpcChannelGraph" } @@ -585,7 +646,7 @@ "operationId": "GetChanInfo", "responses": { "200": { - "description": "", + "description": "A successful response.", "schema": { "$ref": "#/definitions/lnrpcChannelEdge" } @@ -594,6 +655,7 @@ "parameters": [ { "name": "chan_id", + "description": "*\nThe unique channel ID for the channel. The first 3 bytes are the block\nheight, the next 3 the index within the block, and the last 2 bytes are the\noutput index for the channel.", "in": "path", "required": true, "type": "string", @@ -611,7 +673,7 @@ "operationId": "GetNetworkInfo", "responses": { "200": { - "description": "", + "description": "A successful response.", "schema": { "$ref": "#/definitions/lnrpcNetworkInfo" } @@ -628,7 +690,7 @@ "operationId": "GetNodeInfo", "responses": { "200": { - "description": "", + "description": "A successful response.", "schema": { "$ref": "#/definitions/lnrpcNodeInfo" } @@ -637,6 +699,7 @@ "parameters": [ { "name": "pub_key", + "description": "/ The 33-byte hex-encoded compressed public of the target node", "in": "path", "required": true, "type": "string" @@ -655,13 +718,47 @@ ] } }, + "/v1/graph/nodemetrics": { + "get": { + "summary": "* lncli: `getnodemetrics`\nGetNodeMetrics returns node metrics calculated from the graph. Currently\nthe only supported metric is betweenness centrality of individual nodes.", + "operationId": "GetNodeMetrics", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/lnrpcNodeMetricsResponse" + } + } + }, + "parameters": [ + { + "name": "types", + "description": "/ The requested node metrics.", + "in": "query", + "required": false, + "type": "array", + "items": { + "type": "string", + "enum": [ + "UNKNOWN", + "BETWEENNESS_CENTRALITY" + ] + }, + "collectionFormat": "multi" + } + ], + "tags": [ + "Lightning" + ] + } + }, "/v1/graph/routes/{pub_key}/{amt}": { "get": { "summary": "* lncli: `queryroutes`\nQueryRoutes attempts to query the daemon's Channel Router for a possible\nroute to a target destination capable of carrying a specific amount of\nsatoshis. The returned route contains the full details required to craft and\nsend an HTLC, also including the necessary information that should be\npresent within the Sphinx packet encapsulated within the HTLC.", "operationId": "QueryRoutes", "responses": { "200": { - "description": "", + "description": "A successful response.", "schema": { "$ref": "#/definitions/lnrpcQueryRoutesResponse" } @@ -670,20 +767,30 @@ "parameters": [ { "name": "pub_key", + "description": "/ The 33-byte hex-encoded public key for the payment destination", "in": "path", "required": true, "type": "string" }, { "name": "amt", + "description": "*\nThe amount to send expressed in satoshis.\n\nThe fields amt and amt_msat are mutually exclusive.", "in": "path", "required": true, "type": "string", "format": "int64" }, + { + "name": "amt_msat", + "description": "*\nThe amount to send expressed in millisatoshis.\n\nThe fields amt and amt_msat are mutually exclusive.", + "in": "query", + "required": false, + "type": "string", + "format": "int64" + }, { "name": "final_cltv_delta", - "description": "/ An optional CLTV delta from the current height that should be used for the timelock of the final hop.", + "description": "*\nAn optional CLTV delta from the current height that should be used for the\ntimelock of the final hop. Note that unlike SendPayment, QueryRoutes does\nnot add any additional block padding on top of final_ctlv_delta. This\npadding of a few blocks needs to be added manually or otherwise failures may\nhappen when a block comes in while the payment is in flight.", "in": "query", "required": false, "type": "integer", @@ -691,7 +798,15 @@ }, { "name": "fee_limit.fixed", - "description": "/ The fee limit expressed as a fixed amount of satoshis.", + "description": "*\nThe fee limit expressed as a fixed amount of satoshis.\n\nThe fields fixed and fixed_msat are mutually exclusive.", + "in": "query", + "required": false, + "type": "string", + "format": "int64" + }, + { + "name": "fee_limit.fixed_msat", + "description": "*\nThe fee limit expressed as a fixed amount of millisatoshis.\n\nThe fields fixed and fixed_msat are mutually exclusive.", "in": "query", "required": false, "type": "string", @@ -707,14 +822,15 @@ }, { "name": "ignored_nodes", - "description": "*\nA list of nodes to ignore during path finding.", + "description": "*\nA list of nodes to ignore during path finding. When using REST, these fields\nmust be encoded as base64.", "in": "query", "required": false, "type": "array", "items": { "type": "string", "format": "byte" - } + }, + "collectionFormat": "multi" }, { "name": "source_pub_key", @@ -733,11 +849,57 @@ }, { "name": "cltv_limit", - "description": "* \nAn optional maximum total time lock for the route. If the source is empty or\nourselves, this should not exceed lnd's `--max-cltv-expiry` setting. If\nzero, then the value of `--max-cltv-expiry` is used as the limit.", + "description": "*\nAn optional maximum total time lock for the route. If the source is empty or\nourselves, this should not exceed lnd's `--max-cltv-expiry` setting. If\nzero, then the value of `--max-cltv-expiry` is used as the limit.", "in": "query", "required": false, "type": "integer", "format": "int64" + }, + { + "name": "outgoing_chan_id", + "description": "*\nThe channel id of the channel that must be taken to the first hop. If zero,\nany channel may be used.", + "in": "query", + "required": false, + "type": "string", + "format": "uint64" + }, + { + "name": "last_hop_pubkey", + "description": "*\nThe pubkey of the last hop of the route. If empty, any hop may be used.", + "in": "query", + "required": false, + "type": "string", + "format": "byte" + }, + { + "name": "dest_features", + "description": "*\nFeatures assumed to be supported by the final node. All transitive feature\ndependencies must also be set properly. For a given feature bit pair, either\noptional or remote may be set, but not both. If this field is nil or empty,\nthe router will try to load destination features from the graph as a\nfallback.", + "in": "query", + "required": false, + "type": "array", + "items": { + "type": "string", + "enum": [ + "DATALOSS_PROTECT_REQ", + "DATALOSS_PROTECT_OPT", + "INITIAL_ROUING_SYNC", + "UPFRONT_SHUTDOWN_SCRIPT_REQ", + "UPFRONT_SHUTDOWN_SCRIPT_OPT", + "GOSSIP_QUERIES_REQ", + "GOSSIP_QUERIES_OPT", + "TLV_ONION_REQ", + "TLV_ONION_OPT", + "EXT_GOSSIP_QUERIES_REQ", + "EXT_GOSSIP_QUERIES_OPT", + "STATIC_REMOTE_KEY_REQ", + "STATIC_REMOTE_KEY_OPT", + "PAYMENT_ADDR_REQ", + "PAYMENT_ADDR_OPT", + "MPP_REQ", + "MPP_OPT" + ] + }, + "collectionFormat": "multi" } ], "tags": [ @@ -747,12 +909,12 @@ }, "/v1/initwallet": { "post": { - "summary": "* \nInitWallet is used when lnd is starting up for the first time to fully\ninitialize the daemon and its internal wallet. At the very least a wallet\npassword must be provided. This will be used to encrypt sensitive material\non disk.", + "summary": "*\nInitWallet is used when lnd is starting up for the first time to fully\ninitialize the daemon and its internal wallet. At the very least a wallet\npassword must be provided. This will be used to encrypt sensitive material\non disk.", "description": "In the case of a recovery scenario, the user can also specify their aezeed\nmnemonic and passphrase. If set, then the daemon will use this prior state\nto initialize its internal wallet.\n\nAlternatively, this can be used along with the GenSeed RPC to obtain a\nseed, then present it to the user. Once it has been verified by the user,\nthe seed can be fed into this RPC in order to commit the new wallet.", "operationId": "InitWallet", "responses": { "200": { - "description": "", + "description": "A successful response.", "schema": { "$ref": "#/definitions/lnrpcInitWalletResponse" } @@ -779,7 +941,7 @@ "operationId": "LookupInvoice", "responses": { "200": { - "description": "", + "description": "A successful response.", "schema": { "$ref": "#/definitions/lnrpcInvoice" } @@ -788,13 +950,14 @@ "parameters": [ { "name": "r_hash_str", + "description": "*\nThe hex-encoded payment hash of the invoice to be looked up. The passed\npayment hash must be exactly 32 bytes, otherwise an error is returned.\nDeprecated now that the REST gateway supports base64 encoding of bytes\nfields.", "in": "path", "required": true, "type": "string" }, { "name": "r_hash", - "description": "/ The payment hash of the invoice to be looked up.", + "description": "*\nThe payment hash of the invoice to be looked up. When using REST, this field\nmust be encoded as base64.", "in": "query", "required": false, "type": "string", @@ -812,7 +975,7 @@ "operationId": "ListInvoices", "responses": { "200": { - "description": "", + "description": "A successful response.", "schema": { "$ref": "#/definitions/lnrpcListInvoiceResponse" } @@ -821,7 +984,7 @@ "parameters": [ { "name": "pending_only", - "description": "/ If set, only unsettled invoices will be returned in the response.", + "description": "*\nIf set, only invoices that are not settled and not canceled will be returned\nin the response.", "in": "query", "required": false, "type": "boolean", @@ -861,7 +1024,7 @@ "operationId": "AddInvoice", "responses": { "200": { - "description": "", + "description": "A successful response.", "schema": { "$ref": "#/definitions/lnrpcAddInvoiceResponse" } @@ -884,13 +1047,13 @@ }, "/v1/invoices/subscribe": { "get": { - "summary": "*\nSubscribeInvoices returns a uni-directional stream (server -\u003e client) for\nnotifying the client of newly added/settled invoices. The caller can\noptionally specify the add_index and/or the settle_index. If the add_index\nis specified, then we'll first start by sending add invoice events for all\ninvoices with an add_index greater than the specified value. If the\nsettle_index is specified, the next, we'll send out all settle events for\ninvoices with a settle_index greater than the specified value. One or both\nof these fields can be set. If no fields are set, then we'll only send out\nthe latest add/settle events.", + "summary": "*\nSubscribeInvoices returns a uni-directional stream (server -\u003e client) for\nnotifying the client of newly added/settled invoices. The caller can\noptionally specify the add_index and/or the settle_index. If the add_index\nis specified, then we'll first start by sending add invoice events for all\ninvoices with an add_index greater than the specified value. If the\nsettle_index is specified, the next, we'll send out all settle events for\ninvoices with a settle_index greater than the specified value. One or both\nof these fields can be set. If no fields are set, then we'll only send out\nthe latest add/settle events.", "operationId": "SubscribeInvoices", "responses": { "200": { - "description": "(streaming responses)", + "description": "A successful response.(streaming responses)", "schema": { - "$ref": "#/definitions/lnrpcInvoice" + "$ref": "#/x-stream-definitions/lnrpcInvoice" } } }, @@ -917,13 +1080,40 @@ ] } }, + "/v1/macaroon": { + "post": { + "summary": "* lncli: `bakemacaroon`\nBakeMacaroon allows the creation of a new macaroon with custom read and\nwrite permissions. No first-party caveats are added since this can be done\noffline.", + "operationId": "BakeMacaroon", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/lnrpcBakeMacaroonResponse" + } + } + }, + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/lnrpcBakeMacaroonRequest" + } + } + ], + "tags": [ + "Lightning" + ] + } + }, "/v1/newaddress": { "get": { "summary": "* lncli: `newaddress`\nNewAddress creates a new address under control of the local wallet.", "operationId": "NewAddress", "responses": { "200": { - "description": "", + "description": "A successful response.", "schema": { "$ref": "#/definitions/lnrpcNewAddressResponse" } @@ -956,7 +1146,7 @@ "operationId": "ListPayments", "responses": { "200": { - "description": "", + "description": "A successful response.", "schema": { "$ref": "#/definitions/lnrpcListPaymentsResponse" } @@ -965,7 +1155,31 @@ "parameters": [ { "name": "include_incomplete", - "description": "*\nIf true, then return payments that have not yet fully completed. This means\nthat pending payments, as well as failed payments will show up if this\nfield is set to True.", + "description": "*\nIf true, then return payments that have not yet fully completed. This means\nthat pending payments, as well as failed payments will show up if this\nfield is set to true. This flag doesn't change the meaning of the indices,\nwhich are tied to individual payments.", + "in": "query", + "required": false, + "type": "boolean", + "format": "boolean" + }, + { + "name": "index_offset", + "description": "*\nThe index of a payment that will be used as either the start or end of a\nquery to determine which payments should be returned in the response. The\nindex_offset is exclusive. In the case of a zero index_offset, the query\nwill start with the oldest payment when paginating forwards, or will end\nwith the most recent payment when paginating backwards.", + "in": "query", + "required": false, + "type": "string", + "format": "uint64" + }, + { + "name": "max_payments", + "description": "/ The maximal number of payments returned in the response to this query.", + "in": "query", + "required": false, + "type": "string", + "format": "uint64" + }, + { + "name": "reversed", + "description": "*\nIf set, the payments returned will result from seeking backwards from the\nspecified index offset. This can be used to paginate backwards. The order\nof the returned payments is always oldest first (ascending index order).", "in": "query", "required": false, "type": "boolean", @@ -981,7 +1195,7 @@ "operationId": "DeleteAllPayments", "responses": { "200": { - "description": "", + "description": "A successful response.", "schema": { "$ref": "#/definitions/lnrpcDeleteAllPaymentsResponse" } @@ -998,7 +1212,7 @@ "operationId": "DecodePayReq", "responses": { "200": { - "description": "", + "description": "A successful response.", "schema": { "$ref": "#/definitions/lnrpcPayReq" } @@ -1007,6 +1221,7 @@ "parameters": [ { "name": "pay_req", + "description": "/ The payment request string to be decoded", "in": "path", "required": true, "type": "string" @@ -1023,12 +1238,22 @@ "operationId": "ListPeers", "responses": { "200": { - "description": "", + "description": "A successful response.", "schema": { "$ref": "#/definitions/lnrpcListPeersResponse" } } }, + "parameters": [ + { + "name": "latest_error", + "description": "If true, only the last error that our peer sent us will be returned with\nthe peer's information, rather than the full set of historic errors we have\nstored.", + "in": "query", + "required": false, + "type": "boolean", + "format": "boolean" + } + ], "tags": [ "Lightning" ] @@ -1038,7 +1263,7 @@ "operationId": "ConnectPeer", "responses": { "200": { - "description": "", + "description": "A successful response.", "schema": { "$ref": "#/definitions/lnrpcConnectPeerResponse" } @@ -1065,7 +1290,7 @@ "operationId": "DisconnectPeer", "responses": { "200": { - "description": "", + "description": "A successful response.", "schema": { "$ref": "#/definitions/lnrpcDisconnectPeerResponse" } @@ -1074,6 +1299,7 @@ "parameters": [ { "name": "pub_key", + "description": "/ The pubkey of the node to disconnect from", "in": "path", "required": true, "type": "string" @@ -1090,7 +1316,7 @@ "operationId": "SignMessage", "responses": { "200": { - "description": "", + "description": "A successful response.", "schema": { "$ref": "#/definitions/lnrpcSignMessageResponse" } @@ -1114,11 +1340,11 @@ "/v1/switch": { "post": { "summary": "* lncli: `fwdinghistory`\nForwardingHistory allows the caller to query the htlcswitch for a record of\nall HTLCs forwarded within the target time range, and integer offset\nwithin that time range. If no time-range is specified, then the first chunk\nof the past 24 hrs of forwarding history are returned.", - "description": "A list of forwarding events are returned. The size of each forwarding event\nis 40 bytes, and the max message size able to be returned in gRPC is 4 MiB.\nAs a result each message can only contain 50k entries. Each response has\nthe index offset of the last entry. The index offset can be provided to the\nrequest to allow the caller to skip a series of records.", + "description": "A list of forwarding events are returned. The size of each forwarding event\nis 40 bytes, and the max message size able to be returned in gRPC is 4 MiB.\nAs a result each message can only contain 50k entries. Each response has\nthe index offset of the last entry. The index offset can be provided to the\nrequest to allow the caller to skip a series of records.", "operationId": "ForwardingHistory", "responses": { "200": { - "description": "", + "description": "A successful response.", "schema": { "$ref": "#/definitions/lnrpcForwardingHistoryResponse" } @@ -1145,7 +1371,7 @@ "operationId": "GetTransactions", "responses": { "200": { - "description": "", + "description": "A successful response.", "schema": { "$ref": "#/definitions/lnrpcTransactionDetails" } @@ -1160,7 +1386,7 @@ "operationId": "SendCoins", "responses": { "200": { - "description": "", + "description": "A successful response.", "schema": { "$ref": "#/definitions/lnrpcSendCoinsResponse" } @@ -1187,7 +1413,7 @@ "operationId": "EstimateFee", "responses": { "200": { - "description": "", + "description": "A successful response.", "schema": { "$ref": "#/definitions/lnrpcEstimateFeeResponse" } @@ -1196,7 +1422,7 @@ "parameters": [ { "name": "target_conf", - "description": "/ The target number of blocks that this transaction should be confirmed by.", + "description": "/ The target number of blocks that this transaction should be confirmed\n/ by.", "in": "query", "required": false, "type": "integer", @@ -1214,7 +1440,7 @@ "operationId": "UnlockWallet", "responses": { "200": { - "description": "", + "description": "A successful response.", "schema": { "$ref": "#/definitions/lnrpcUnlockWalletResponse" } @@ -1241,7 +1467,7 @@ "operationId": "ListUnspent", "responses": { "200": { - "description": "", + "description": "A successful response.", "schema": { "$ref": "#/definitions/lnrpcListUnspentResponse" } @@ -1276,7 +1502,7 @@ "operationId": "VerifyMessage", "responses": { "200": { - "description": "", + "description": "A successful response.", "schema": { "$ref": "#/definitions/lnrpcVerifyMessageResponse" } @@ -1317,10 +1543,63 @@ "OPEN_CHANNEL", "CLOSED_CHANNEL", "ACTIVE_CHANNEL", - "INACTIVE_CHANNEL" + "INACTIVE_CHANNEL", + "PENDING_OPEN_CHANNEL" ], "default": "OPEN_CHANNEL" }, + "FailureFailureCode": { + "type": "string", + "enum": [ + "RESERVED", + "INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS", + "INCORRECT_PAYMENT_AMOUNT", + "FINAL_INCORRECT_CLTV_EXPIRY", + "FINAL_INCORRECT_HTLC_AMOUNT", + "FINAL_EXPIRY_TOO_SOON", + "INVALID_REALM", + "EXPIRY_TOO_SOON", + "INVALID_ONION_VERSION", + "INVALID_ONION_HMAC", + "INVALID_ONION_KEY", + "AMOUNT_BELOW_MINIMUM", + "FEE_INSUFFICIENT", + "INCORRECT_CLTV_EXPIRY", + "CHANNEL_DISABLED", + "TEMPORARY_CHANNEL_FAILURE", + "REQUIRED_NODE_FEATURE_MISSING", + "REQUIRED_CHANNEL_FEATURE_MISSING", + "UNKNOWN_NEXT_PEER", + "TEMPORARY_NODE_FAILURE", + "PERMANENT_NODE_FAILURE", + "PERMANENT_CHANNEL_FAILURE", + "EXPIRY_TOO_FAR", + "MPP_TIMEOUT", + "INTERNAL_FAILURE", + "UNKNOWN_FAILURE", + "UNREADABLE_FAILURE" + ], + "default": "RESERVED", + "description": " - RESERVED: *\nThe numbers assigned in this enumeration match the failure codes as\ndefined in BOLT #4. Because protobuf 3 requires enums to start with 0,\na RESERVED value is added.\n - INTERNAL_FAILURE: *\nAn internal error occurred.\n - UNKNOWN_FAILURE: *\nThe error source is known, but the failure itself couldn't be decoded.\n - UNREADABLE_FAILURE: *\nAn unreadable failure result is returned if the received failure message\ncannot be decrypted. In that case the error source is unknown." + }, + "ForceClosedChannelAnchorState": { + "type": "string", + "enum": [ + "LIMBO", + "RECOVERED", + "LOST" + ], + "default": "LIMBO" + }, + "HTLCAttemptHTLCStatus": { + "type": "string", + "enum": [ + "IN_FLIGHT", + "SUCCEEDED", + "FAILED" + ], + "default": "IN_FLIGHT" + }, "InvoiceInvoiceState": { "type": "string", "enum": [ @@ -1341,6 +1620,14 @@ ], "default": "UNKNOWN" }, + "PeerEventEventType": { + "type": "string", + "enum": [ + "PEER_ONLINE", + "PEER_OFFLINE" + ], + "default": "PEER_ONLINE" + }, "PeerSyncType": { "type": "string", "enum": [ @@ -1364,6 +1651,38 @@ } } }, + "PendingChannelsResponseCommitments": { + "type": "object", + "properties": { + "local_txid": { + "type": "string", + "description": "/ Hash of the local version of the commitment tx." + }, + "remote_txid": { + "type": "string", + "description": "/ Hash of the remote version of the commitment tx." + }, + "remote_pending_txid": { + "type": "string", + "description": "/ Hash of the remote pending version of the commitment tx." + }, + "local_commit_fee_sat": { + "type": "string", + "format": "uint64", + "description": "The amount in satoshis calculated to be paid in fees for the local\ncommitment." + }, + "remote_commit_fee_sat": { + "type": "string", + "format": "uint64", + "description": "The amount in satoshis calculated to be paid in fees for the remote\ncommitment." + }, + "remote_pending_commit_fee_sat": { + "type": "string", + "format": "uint64", + "description": "The amount in satoshis calculated to be paid in fees for the remote\npending commitment." + } + } + }, "PendingChannelsResponseForceClosedChannel": { "type": "object", "properties": { @@ -1400,6 +1719,9 @@ "items": { "$ref": "#/definitions/lnrpcPendingHTLC" } + }, + "anchor": { + "$ref": "#/definitions/ForceClosedChannelAnchorState" } } }, @@ -1427,12 +1749,20 @@ "local_chan_reserve_sat": { "type": "string", "format": "int64", - "description": "/ The minimum satoshis this node is required to reserve in its balance." + "description": "/ The minimum satoshis this node is required to reserve in its\n/ balance." }, "remote_chan_reserve_sat": { "type": "string", "format": "int64", "description": "*\nThe minimum satoshis the other node is required to reserve in its\nbalance." + }, + "initiator": { + "$ref": "#/definitions/lnrpcInitiator", + "description": "The party that initiated opening the channel." + }, + "commitment_type": { + "$ref": "#/definitions/lnrpcCommitmentType", + "description": "/ The commitment type used by this channel." } } }, @@ -1476,6 +1806,10 @@ "type": "string", "format": "int64", "title": "/ The balance in satoshis encumbered in this channel" + }, + "commitments": { + "$ref": "#/definitions/PendingChannelsResponseCommitments", + "description": "*\nA list of valid commitment transactions. Any of these can confirm at\nthis point." } } }, @@ -1491,7 +1825,7 @@ }, "payment_request": { "type": "string", - "description": "*\nA bare-bones invoice for a payment within the Lightning Network. With the\ndetails of the invoice, the sender has all the data necessary to send a\npayment to the recipient." + "description": "*\nA bare-bones invoice for a payment within the Lightning Network. With the\ndetails of the invoice, the sender has all the data necessary to send a\npayment to the recipient." }, "add_index": { "type": "string", @@ -1510,7 +1844,28 @@ ], "default": "WITNESS_PUBKEY_HASH", "description": "- `p2wkh`: Pay to witness key hash (`WITNESS_PUBKEY_HASH` = 0)\n- `np2wkh`: Pay to nested witness key hash (`NESTED_PUBKEY_HASH` = 1)", - "title": "* \n`AddressType` has to be one of:" + "title": "*\n`AddressType` has to be one of:" + }, + "lnrpcBakeMacaroonRequest": { + "type": "object", + "properties": { + "permissions": { + "type": "array", + "items": { + "$ref": "#/definitions/lnrpcMacaroonPermission" + }, + "description": "/ The list of permissions the new macaroon should grant." + } + } + }, + "lnrpcBakeMacaroonResponse": { + "type": "object", + "properties": { + "macaroon": { + "type": "string", + "description": "/ The hex encoded macaroon, serialized in binary format." + } + } }, "lnrpcChain": { "type": "object", @@ -1538,18 +1893,51 @@ } } }, + "lnrpcChanPointShim": { + "type": "object", + "properties": { + "amt": { + "type": "string", + "format": "int64", + "description": "*\nThe size of the pre-crafted output to be used as the channel point for this\nchannel funding." + }, + "chan_point": { + "$ref": "#/definitions/lnrpcChannelPoint", + "description": "/ The target channel point to refrence in created commitment transactions." + }, + "local_key": { + "$ref": "#/definitions/lnrpcKeyDescriptor", + "description": "/ Our local key to use when creating the multi-sig output." + }, + "remote_key": { + "type": "string", + "format": "byte", + "description": "/ The key of the remote party to use when creating the multi-sig output." + }, + "pending_chan_id": { + "type": "string", + "format": "byte", + "description": "*\nIf non-zero, then this will be used as the pending channel ID on the wire\nprotocol to initate the funding request. This is an optional field, and\nshould only be set if the responder is already expecting a specific pending\nchannel ID." + }, + "thaw_height": { + "type": "integer", + "format": "int64", + "description": "*\nThis uint32 indicates if this channel is to be considered 'frozen'. A\nfrozen channel does not allow a cooperative channel close by the\ninitiator. The thaw_height is the height that this restriction stops\napplying to the channel." + } + } + }, "lnrpcChangePasswordRequest": { "type": "object", "properties": { "current_password": { "type": "string", "format": "byte", - "description": "*\ncurrent_password should be the current valid passphrase used to unlock the\ndaemon." + "description": "*\ncurrent_password should be the current valid passphrase used to unlock the\ndaemon. When using REST, this field must be encoded as base64." }, "new_password": { "type": "string", "format": "byte", - "description": "*\nnew_password should be the new passphrase that will be needed to unlock the\ndaemon." + "description": "*\nnew_password should be the new passphrase that will be needed to unlock the\ndaemon. When using REST, this field must be encoded as base64." } } }, @@ -1666,7 +2054,35 @@ "static_remote_key": { "type": "boolean", "format": "boolean", - "description": "*\nIf true, then this channel uses the modern commitment format where the key\nin the output of the remote party does not change each state. This makes\nback up and recovery easier as when the channel is closed, the funds go\ndirectly to that key." + "description": "/ Deprecated. Use commitment_type." + }, + "commitment_type": { + "$ref": "#/definitions/lnrpcCommitmentType", + "description": "/ The commitment type used by this channel." + }, + "lifetime": { + "type": "string", + "format": "int64", + "description": "*\nThe number of seconds that the channel has been monitored by the channel\nscoring system. Scores are currently not persisted, so this value may be\nless than the lifetime of the channel [EXPERIMENTAL]." + }, + "uptime": { + "type": "string", + "format": "int64", + "description": "*\nThe number of seconds that the remote peer has been observed as being online\nby the channel scoring system over the lifetime of the channel\n[EXPERIMENTAL]." + }, + "close_address": { + "type": "string", + "description": "*\nClose address is the address that we will enforce payout to on cooperative\nclose if the channel was opened utilizing option upfront shutdown. This\nvalue can be set on channel open by setting close_address in an open channel\nrequest. If this value is not set, you can still choose a payout address by\ncooperatively closing with the delivery_address field set." + }, + "push_amount_sat": { + "type": "string", + "format": "uint64", + "description": "The amount that the initiator of the channel optionally pushed to the remote\nparty on channel open. This amount will be zero if the channel initiator did\nnot push any funds to the remote peer. If the initiator field is true, we\npushed this amount to our peer, if it is false, the remote peer pushed this\namount to us." + }, + "thaw_height": { + "type": "integer", + "format": "int64", + "description": "*\nThis uint32 indicates if this channel is to be considered 'frozen'. A\nfrozen channel doest not allow a cooperative channel close by the\ninitiator. The thaw_height is the height that this restriction stops\napplying to the channel. This field is optional, not setting it or using a\nvalue of zero will mean the channel has no additional restrictions." } } }, @@ -1691,7 +2107,7 @@ "funding_amt": { "type": "string", "format": "uint64", - "description": "/ The funding amount in satoshis that initiator wishes to use in the channel." + "description": "/ The funding amount in satoshis that initiator wishes to use in the\n/ channel." }, "push_amt": { "type": "string", @@ -1706,12 +2122,12 @@ "max_value_in_flight": { "type": "string", "format": "uint64", - "description": "/ The maximum amount of coins in millisatoshis that can be pending in this channel." + "description": "/ The maximum amount of coins in millisatoshis that can be pending in this\n/ channel." }, "channel_reserve": { "type": "string", "format": "uint64", - "description": "/ The minimum amount of satoshis the initiator requires us to have at all times." + "description": "/ The minimum amount of satoshis the initiator requires us to have at all\n/ times." }, "min_htlc": { "type": "string", @@ -1721,12 +2137,12 @@ "fee_per_kw": { "type": "string", "format": "uint64", - "description": "/ The initial fee rate that the initiator suggests for both commitment transactions." + "description": "/ The initial fee rate that the initiator suggests for both commitment\n/ transactions." }, "csv_delay": { "type": "integer", "format": "int64", - "description": "*\nThe number of blocks to use for the relative time lock in the pay-to-self output\nof both commitment transactions." + "description": "*\nThe number of blocks to use for the relative time lock in the pay-to-self\noutput of both commitment transactions." }, "max_accepted_htlcs": { "type": "integer", @@ -1736,7 +2152,7 @@ "channel_flags": { "type": "integer", "format": "int64", - "description": "/ A bit-field which the initiator uses to specify proposed channel behavior." + "description": "/ A bit-field which the initiator uses to specify proposed channel\n/ behavior." } } }, @@ -1750,7 +2166,7 @@ "chan_backup": { "type": "string", "format": "byte", - "description": "*\nIs an encrypted single-chan backup. this can be passed to\nRestoreChannelBackups, or the WalletUnlocker Init and Unlock methods in\norder to trigger the recovery protocol." + "description": "*\nIs an encrypted single-chan backup. this can be passed to\nRestoreChannelBackups, or the WalletUnlocker Init and Unlock methods in\norder to trigger the recovery protocol. When using REST, this field must be\nencoded as base64." } } }, @@ -1828,6 +2244,14 @@ "close_type": { "$ref": "#/definitions/ChannelCloseSummaryClosureType", "description": "/ Details on how the channel was closed." + }, + "open_initiator": { + "$ref": "#/definitions/lnrpcInitiator", + "description": "*\nOpen initiator is the party that initiated opening the channel. Note that\nthis value may be unknown if the channel was closed before we migrated to\nstore open channel information after close." + }, + "close_initiator": { + "$ref": "#/definitions/lnrpcInitiator", + "description": "*\nClose initiator indicates which party initiated the close. This value will\nbe unknown for channels that were cooperatively closed before we started\ntracking cooperative close initiators. Note that this indicates which party\ninitiated a close, and it is possible for both to initiate cooperative or\nforce closes, although only one party's close will be confirmed on chain." } } }, @@ -1919,6 +2343,9 @@ "inactive_channel": { "$ref": "#/definitions/lnrpcChannelPoint" }, + "pending_open_channel": { + "$ref": "#/definitions/lnrpcPendingUpdate" + }, "type": { "$ref": "#/definitions/ChannelEventUpdateUpdateType" } @@ -1927,7 +2354,12 @@ "lnrpcChannelFeeReport": { "type": "object", "properties": { - "chan_point": { + "chan_id": { + "type": "string", + "format": "uint64", + "description": "/ The short channel id that this fee report belongs to." + }, + "channel_point": { "type": "string", "description": "/ The channel that this fee report belongs to." }, @@ -1939,12 +2371,12 @@ "fee_per_mil": { "type": "string", "format": "int64", - "description": "/ The amount charged per milli-satoshis transferred expressed in millionths of a satoshi." + "description": "/ The amount charged per milli-satoshis transferred expressed in\n/ millionths of a satoshi." }, "fee_rate": { "type": "number", "format": "double", - "description": "/ The effective fee rate in milli-satoshis. Computed by dividing the fee_per_mil value by 1 million." + "description": "/ The effective fee rate in milli-satoshis. Computed by dividing the\n/ fee_per_mil value by 1 million." } } }, @@ -1982,11 +2414,11 @@ "funding_txid_bytes": { "type": "string", "format": "byte", - "title": "/ Txid of the funding transaction" + "description": "*\nTxid of the funding transaction. When using REST, this field must be\nencoded as base64." }, "funding_txid_str": { "type": "string", - "title": "/ Hex-encoded string representing the funding transaction" + "description": "*\nHex-encoded string representing the byte-reversed hash of the funding\ntransaction." }, "output_index": { "type": "integer", @@ -1995,11 +2427,76 @@ } } }, - "lnrpcCloseStatusUpdate": { + "lnrpcChannelUpdate": { "type": "object", "properties": { - "close_pending": { - "$ref": "#/definitions/lnrpcPendingUpdate" + "signature": { + "type": "string", + "format": "byte", + "description": "*\nThe signature that validates the announced data and proves the ownership\nof node id." + }, + "chain_hash": { + "type": "string", + "format": "byte", + "description": "*\nThe target chain that this channel was opened within. This value\nshould be the genesis hash of the target chain. Along with the short\nchannel ID, this uniquely identifies the channel globally in a\nblockchain." + }, + "chan_id": { + "type": "string", + "format": "uint64", + "description": "*\nThe unique description of the funding transaction." + }, + "timestamp": { + "type": "integer", + "format": "int64", + "description": "*\nA timestamp that allows ordering in the case of multiple announcements.\nWe should ignore the message if timestamp is not greater than the\nlast-received." + }, + "message_flags": { + "type": "integer", + "format": "int64", + "description": "*\nThe bitfield that describes whether optional fields are present in this\nupdate. Currently, the least-significant bit must be set to 1 if the\noptional field MaxHtlc is present." + }, + "channel_flags": { + "type": "integer", + "format": "int64", + "description": "*\nThe bitfield that describes additional meta-data concerning how the\nupdate is to be interpreted. Currently, the least-significant bit must be\nset to 0 if the creating node corresponds to the first node in the\npreviously sent channel announcement and 1 otherwise. If the second bit\nis set, then the channel is set to be disabled." + }, + "time_lock_delta": { + "type": "integer", + "format": "int64", + "description": "*\nThe minimum number of blocks this node requires to be added to the expiry\nof HTLCs. This is a security parameter determined by the node operator.\nThis value represents the required gap between the time locks of the\nincoming and outgoing HTLC's set to this node." + }, + "htlc_minimum_msat": { + "type": "string", + "format": "uint64", + "description": "*\nThe minimum HTLC value which will be accepted." + }, + "base_fee": { + "type": "integer", + "format": "int64", + "description": "*\nThe base fee that must be used for incoming HTLC's to this particular\nchannel. This value will be tacked onto the required for a payment\nindependent of the size of the payment." + }, + "fee_rate": { + "type": "integer", + "format": "int64", + "description": "*\nThe fee rate that will be charged per millionth of a satoshi." + }, + "htlc_maximum_msat": { + "type": "string", + "format": "uint64", + "description": "*\nThe maximum HTLC value which will be accepted." + }, + "extra_opaque_data": { + "type": "string", + "format": "byte", + "description": "*\nThe set of data that was appended to this message, some of which we may\nnot actually know how to iterate or parse. By holding onto this data, we\nensure that we're able to properly validate the set of signatures that\ncover these new fields, and ensure we're able to make upgrades to the\nnetwork in a forwards compatible manner." + } + } + }, + "lnrpcCloseStatusUpdate": { + "type": "object", + "properties": { + "close_pending": { + "$ref": "#/definitions/lnrpcPendingUpdate" }, "chan_close": { "$ref": "#/definitions/lnrpcChannelCloseUpdate" @@ -2038,6 +2535,17 @@ } } }, + "lnrpcCommitmentType": { + "type": "string", + "enum": [ + "LEGACY", + "STATIC_REMOTE_KEY", + "ANCHORS", + "UNKNOWN_COMMITMENT_TYPE" + ], + "default": "LEGACY", + "description": " - LEGACY: *\nA channel using the legacy commitment format having tweaked to_remote\nkeys.\n - STATIC_REMOTE_KEY: *\nA channel that uses the modern commitment format where the key in the\noutput of the remote party does not change each state. This makes back\nup and recovery easier as when the channel is closed, the funds go\ndirectly to that key.\n - ANCHORS: *\nA channel that uses a commitment format that has anchor outputs on the\ncommitments, allowing fee bumping after a force close transaction has\nbeen broadcast.\n - UNKNOWN_COMMITMENT_TYPE: *\nReturned when the commitment type isn't known or unavailable." + }, "lnrpcConnectPeerRequest": { "type": "object", "properties": { @@ -2048,7 +2556,7 @@ "perm": { "type": "boolean", "format": "boolean", - "description": "* If set, the daemon will attempt to persistently connect to the target\npeer. Otherwise, the call will be synchronous." + "description": "* If set, the daemon will attempt to persistently connect to the target\npeer. Otherwise, the call will be synchronous." } } }, @@ -2069,6 +2577,21 @@ "lnrpcDisconnectPeerResponse": { "type": "object" }, + "lnrpcEdgeLocator": { + "type": "object", + "properties": { + "channel_id": { + "type": "string", + "format": "uint64", + "description": "/ The short channel id of this edge." + }, + "direction_reverse": { + "type": "boolean", + "format": "boolean", + "description": "*\nThe direction of this edge. If direction_reverse is false, the direction\nof this edge is from the channel endpoint with the lexicographically smaller\npub key to the endpoint with the larger pub key. If direction_reverse is\nis true, the edge goes the other way." + } + } + }, "lnrpcEstimateFeeResponse": { "type": "object", "properties": { @@ -2084,13 +2607,100 @@ } } }, + "lnrpcFailure": { + "type": "object", + "properties": { + "code": { + "$ref": "#/definitions/FailureFailureCode", + "title": "/ Failure code as defined in the Lightning spec" + }, + "channel_update": { + "$ref": "#/definitions/lnrpcChannelUpdate", + "description": "/ An optional channel update message." + }, + "htlc_msat": { + "type": "string", + "format": "uint64", + "description": "/ A failure type-dependent htlc value." + }, + "onion_sha_256": { + "type": "string", + "format": "byte", + "description": "/ The sha256 sum of the onion payload." + }, + "cltv_expiry": { + "type": "integer", + "format": "int64", + "description": "/ A failure type-dependent cltv expiry value." + }, + "flags": { + "type": "integer", + "format": "int64", + "description": "/ A failure type-dependent flags value." + }, + "failure_source_index": { + "type": "integer", + "format": "int64", + "description": "*\nThe position in the path of the intermediate or final node that generated\nthe failure message. Position zero is the sender node." + }, + "height": { + "type": "integer", + "format": "int64", + "description": "/ A failure type-dependent block height." + } + } + }, + "lnrpcFeature": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "is_required": { + "type": "boolean", + "format": "boolean" + }, + "is_known": { + "type": "boolean", + "format": "boolean" + } + } + }, + "lnrpcFeatureBit": { + "type": "string", + "enum": [ + "DATALOSS_PROTECT_REQ", + "DATALOSS_PROTECT_OPT", + "INITIAL_ROUING_SYNC", + "UPFRONT_SHUTDOWN_SCRIPT_REQ", + "UPFRONT_SHUTDOWN_SCRIPT_OPT", + "GOSSIP_QUERIES_REQ", + "GOSSIP_QUERIES_OPT", + "TLV_ONION_REQ", + "TLV_ONION_OPT", + "EXT_GOSSIP_QUERIES_REQ", + "EXT_GOSSIP_QUERIES_OPT", + "STATIC_REMOTE_KEY_REQ", + "STATIC_REMOTE_KEY_OPT", + "PAYMENT_ADDR_REQ", + "PAYMENT_ADDR_OPT", + "MPP_REQ", + "MPP_OPT" + ], + "default": "DATALOSS_PROTECT_REQ" + }, "lnrpcFeeLimit": { "type": "object", "properties": { "fixed": { "type": "string", "format": "int64", - "description": "/ The fee limit expressed as a fixed amount of satoshis." + "description": "*\nThe fee limit expressed as a fixed amount of satoshis.\n\nThe fields fixed and fixed_msat are mutually exclusive." + }, + "fixed_msat": { + "type": "string", + "format": "int64", + "description": "*\nThe fee limit expressed as a fixed amount of millisatoshis.\n\nThe fields fixed and fixed_msat are mutually exclusive." }, "percent": { "type": "string", @@ -2107,22 +2717,37 @@ "items": { "$ref": "#/definitions/lnrpcChannelFeeReport" }, - "description": "/ An array of channel fee reports which describes the current fee schedule for each channel." + "description": "/ An array of channel fee reports which describes the current fee schedule\n/ for each channel." }, "day_fee_sum": { "type": "string", "format": "uint64", - "description": "/ The total amount of fee revenue (in satoshis) the switch has collected over the past 24 hrs." + "description": "/ The total amount of fee revenue (in satoshis) the switch has collected\n/ over the past 24 hrs." }, "week_fee_sum": { "type": "string", "format": "uint64", - "description": "/ The total amount of fee revenue (in satoshis) the switch has collected over the past 1 week." + "description": "/ The total amount of fee revenue (in satoshis) the switch has collected\n/ over the past 1 week." }, "month_fee_sum": { "type": "string", "format": "uint64", - "description": "/ The total amount of fee revenue (in satoshis) the switch has collected over the past 1 month." + "description": "/ The total amount of fee revenue (in satoshis) the switch has collected\n/ over the past 1 month." + } + } + }, + "lnrpcFloatMetric": { + "type": "object", + "properties": { + "value": { + "type": "number", + "format": "double", + "description": "/ Arbitrary float value." + }, + "normalized_value": { + "type": "number", + "format": "double", + "description": "/ The value normalized to [0,1] or [-1,1]." } } }, @@ -2132,7 +2757,7 @@ "timestamp": { "type": "string", "format": "uint64", - "description": "/ Timestamp is the time (unix epoch offset) that this circuit was completed." + "description": "/ Timestamp is the time (unix epoch offset) that this circuit was\n/ completed." }, "chan_id_in": { "type": "string", @@ -2142,17 +2767,17 @@ "chan_id_out": { "type": "string", "format": "uint64", - "description": "/ The outgoing channel ID that carried the preimage that completed the circuit." + "description": "/ The outgoing channel ID that carried the preimage that completed the\n/ circuit." }, "amt_in": { "type": "string", "format": "uint64", - "description": "/ The total amount (in satoshis) of the incoming HTLC that created half the circuit." + "description": "/ The total amount (in satoshis) of the incoming HTLC that created half\n/ the circuit." }, "amt_out": { "type": "string", "format": "uint64", - "description": "/ The total amount (in satoshis) of the outgoing HTLC that created the second half of the circuit." + "description": "/ The total amount (in satoshis) of the outgoing HTLC that created the\n/ second half of the circuit." }, "fee": { "type": "string", @@ -2163,6 +2788,16 @@ "type": "string", "format": "uint64", "description": "/ The total fee (in milli-satoshis) that this payment circuit carried." + }, + "amt_in_msat": { + "type": "string", + "format": "uint64", + "description": "/ The total amount (in milli-satoshis) of the incoming HTLC that created\n/ half the circuit." + }, + "amt_out_msat": { + "type": "string", + "format": "uint64", + "description": "/ The total amount (in milli-satoshis) of the outgoing HTLC that created\n/ the second half of the circuit." } } }, @@ -2172,17 +2807,17 @@ "start_time": { "type": "string", "format": "uint64", - "description": "/ Start time is the starting point of the forwarding history request. All records beyond this point will be included, respecting the end time, and the index offset." + "description": "/ Start time is the starting point of the forwarding history request. All\n/ records beyond this point will be included, respecting the end time, and\n/ the index offset." }, "end_time": { "type": "string", "format": "uint64", - "description": "/ End time is the end point of the forwarding history request. The response will carry at most 50k records between the start time and the end time. The index offset can be used to implement pagination." + "description": "/ End time is the end point of the forwarding history request. The\n/ response will carry at most 50k records between the start time and the\n/ end time. The index offset can be used to implement pagination." }, "index_offset": { "type": "integer", "format": "int64", - "description": "/ Index offset is the offset in the time series to start at. As each response can only contain 50k records, callers can use this to skip around within a packed time series." + "description": "/ Index offset is the offset in the time series to start at. As each\n/ response can only contain 50k records, callers can use this to skip\n/ around within a packed time series." }, "num_max_events": { "type": "integer", @@ -2199,15 +2834,71 @@ "items": { "$ref": "#/definitions/lnrpcForwardingEvent" }, - "description": "/ A list of forwarding events from the time slice of the time series specified in the request." + "description": "/ A list of forwarding events from the time slice of the time series\n/ specified in the request." }, "last_offset_index": { "type": "integer", "format": "int64", - "description": "/ The index of the last time in the set of returned forwarding events. Can be used to seek further, pagination style." + "description": "/ The index of the last time in the set of returned forwarding events. Can\n/ be used to seek further, pagination style." + } + } + }, + "lnrpcFundingPsbtFinalize": { + "type": "object", + "properties": { + "signed_psbt": { + "type": "string", + "format": "byte", + "description": "*\nThe funded PSBT that contains all witness data to send the exact channel\ncapacity amount to the PK script returned in the open channel message in a\nprevious step." + }, + "pending_chan_id": { + "type": "string", + "format": "byte", + "description": "/ The pending channel ID of the channel to get the PSBT for." + } + } + }, + "lnrpcFundingPsbtVerify": { + "type": "object", + "properties": { + "funded_psbt": { + "type": "string", + "format": "byte", + "description": "*\nThe funded but not yet signed PSBT that sends the exact channel capacity\namount to the PK script returned in the open channel message in a previous\nstep." + }, + "pending_chan_id": { + "type": "string", + "format": "byte", + "description": "/ The pending channel ID of the channel to get the PSBT for." + } + } + }, + "lnrpcFundingShim": { + "type": "object", + "properties": { + "chan_point_shim": { + "$ref": "#/definitions/lnrpcChanPointShim", + "description": "*\nA channel shim where the channel point was fully constructed outside\nof lnd's wallet and the transaction might already be published." + }, + "psbt_shim": { + "$ref": "#/definitions/lnrpcPsbtShim", + "description": "*\nA channel shim that uses a PSBT to fund and sign the channel funding\ntransaction." } } }, + "lnrpcFundingShimCancel": { + "type": "object", + "properties": { + "pending_chan_id": { + "type": "string", + "format": "byte", + "description": "/ The pending channel ID of the channel to cancel the funding shim for." + } + } + }, + "lnrpcFundingStateStepResp": { + "type": "object" + }, "lnrpcGenSeedResponse": { "type": "object", "properties": { @@ -2228,6 +2919,14 @@ "lnrpcGetInfoResponse": { "type": "object", "properties": { + "version": { + "type": "string", + "description": "/ The version of the LND software that the node is running." + }, + "commit_hash": { + "type": "string", + "description": "/ The SHA1 commit hash that the daemon is compiled with." + }, "identity_pubkey": { "type": "string", "description": "/ The identity pubkey of the current node." @@ -2236,6 +2935,10 @@ "type": "string", "title": "/ If applicable, the alias of the current node, e.g. \"bob\"" }, + "color": { + "type": "string", + "title": "/ The color of the current node in hex code format" + }, "num_pending_channels": { "type": "integer", "format": "int64", @@ -2246,6 +2949,11 @@ "format": "int64", "title": "/ Number of active channels" }, + "num_inactive_channels": { + "type": "integer", + "format": "int64", + "title": "/ Number of inactive channels" + }, "num_peers": { "type": "integer", "format": "int64", @@ -2260,36 +2968,25 @@ "type": "string", "title": "/ The node's current view of the hash of the best block" }, + "best_header_timestamp": { + "type": "string", + "format": "int64", + "title": "/ Timestamp of the block best known to the wallet" + }, "synced_to_chain": { "type": "boolean", "format": "boolean", "title": "/ Whether the wallet's view is synced to the main chain" }, - "testnet": { + "synced_to_graph": { "type": "boolean", "format": "boolean", - "title": "* \nWhether the current node is connected to testnet. This field is \ndeprecated and the network field should be used instead" - }, - "uris": { - "type": "array", - "items": { - "type": "string" - }, - "description": "/ The URIs of the current node." - }, - "best_header_timestamp": { - "type": "string", - "format": "int64", - "title": "/ Timestamp of the block best known to the wallet" - }, - "version": { - "type": "string", - "description": "/ The version of the LND software that the node is running." + "description": "Whether we consider ourselves synced with the public channel graph." }, - "num_inactive_channels": { - "type": "integer", - "format": "int64", - "title": "/ Number of inactive channels" + "testnet": { + "type": "boolean", + "format": "boolean", + "title": "*\nWhether the current node is connected to testnet. This field is\ndeprecated and the network field should be used instead" }, "chains": { "type": "array", @@ -2298,14 +2995,19 @@ }, "title": "/ A list of active chains the node is connected to" }, - "color": { - "type": "string", - "title": "/ The color of the current node in hex code format" + "uris": { + "type": "array", + "items": { + "type": "string" + }, + "description": "/ The URIs of the current node." }, - "synced_to_graph": { - "type": "boolean", - "format": "boolean", - "description": "Whether we consider ourselves synced with the public channel graph." + "features": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/lnrpcFeature" + }, + "description": "Features that our node has advertised in our init message, node\nannouncements and invoices." } } }, @@ -2353,6 +3055,33 @@ } } }, + "lnrpcHTLCAttempt": { + "type": "object", + "properties": { + "status": { + "$ref": "#/definitions/HTLCAttemptHTLCStatus", + "description": "/ The status of the HTLC." + }, + "route": { + "$ref": "#/definitions/lnrpcRoute", + "description": "/ The route taken by this HTLC." + }, + "attempt_time_ns": { + "type": "string", + "format": "int64", + "description": "/ The time in UNIX nanoseconds at which this HTLC was sent." + }, + "resolve_time_ns": { + "type": "string", + "format": "int64", + "description": "*\nThe time in UNIX nanoseconds at which this HTLC was settled or failed.\nThis value will not be set if the HTLC is still IN_FLIGHT." + }, + "failure": { + "$ref": "#/definitions/lnrpcFailure", + "description": "Detailed htlc failure info." + } + } + }, "lnrpcHop": { "type": "object", "properties": { @@ -2392,7 +3121,19 @@ "tlv_payload": { "type": "boolean", "format": "boolean", - "description": "* \nIf set to true, then this hop will be encoded using the new variable length\nTLV format." + "description": "*\nIf set to true, then this hop will be encoded using the new variable length\nTLV format. Note that if any custom tlv_records below are specified, then\nthis field MUST be set to true for them to be encoded properly." + }, + "mpp_record": { + "$ref": "#/definitions/lnrpcMPPRecord", + "description": "*\nAn optional TLV record that signals the use of an MPP payment. If present,\nthe receiver will enforce that that the same mpp_record is included in the\nfinal hop payload of all non-zero payments in the HTLC set. If empty, a\nregular single-shot payment is or was attempted." + }, + "custom_records": { + "type": "object", + "additionalProperties": { + "type": "string", + "format": "byte" + }, + "description": "*\nAn optional set of key-value TLV records. This is useful within the context\nof the SendToRoute call as it allows callers to specify arbitrary K-V pairs\nto drop off at each hop within the onion." } } }, @@ -2431,7 +3172,7 @@ "wallet_password": { "type": "string", "format": "byte", - "description": "*\nwallet_password is the passphrase that should be used to encrypt the\nwallet. This MUST be at least 8 chars in length. After creation, this\npassword is required to unlock the daemon." + "description": "*\nwallet_password is the passphrase that should be used to encrypt the\nwallet. This MUST be at least 8 chars in length. After creation, this\npassword is required to unlock the daemon. When using REST, this field\nmust be encoded as base64." }, "cipher_seed_mnemonic": { "type": "array", @@ -2443,7 +3184,7 @@ "aezeed_passphrase": { "type": "string", "format": "byte", - "description": "*\naezeed_passphrase is an optional user provided passphrase that will be used\nto encrypt the generated aezeed cipher seed." + "description": "*\naezeed_passphrase is an optional user provided passphrase that will be used\nto encrypt the generated aezeed cipher seed. When using REST, this field\nmust be encoded as base64." }, "recovery_window": { "type": "integer", @@ -2459,6 +3200,16 @@ "lnrpcInitWalletResponse": { "type": "object" }, + "lnrpcInitiator": { + "type": "string", + "enum": [ + "INITIATOR_UNKNOWN", + "INITIATOR_LOCAL", + "INITIATOR_REMOTE", + "INITIATOR_BOTH" + ], + "default": "INITIATOR_UNKNOWN" + }, "lnrpcInvoice": { "type": "object", "properties": { @@ -2466,25 +3217,27 @@ "type": "string", "description": "*\nAn optional memo to attach along with the invoice. Used for record keeping\npurposes for the invoice's creator, and will also be set in the description\nfield of the encoded payment request if the description_hash field is not\nbeing used." }, - "receipt": { - "type": "string", - "format": "byte", - "description": "* Deprecated. An optional cryptographic receipt of payment which is not\nimplemented." - }, "r_preimage": { "type": "string", "format": "byte", - "title": "*\nThe hex-encoded preimage (32 byte) which will allow settling an incoming\nHTLC payable to this preimage" + "description": "*\nThe hex-encoded preimage (32 byte) which will allow settling an incoming\nHTLC payable to this preimage. When using REST, this field must be encoded\nas base64." }, "r_hash": { "type": "string", "format": "byte", - "title": "/ The hash of the preimage" + "description": "*\nThe hash of the preimage. When using REST, this field must be encoded as\nbase64." }, "value": { "type": "string", "format": "int64", - "title": "/ The value of this invoice in satoshis" + "description": "The fields value and value_msat are mutually exclusive.", + "title": "*\nThe value of this invoice in satoshis" + }, + "value_msat": { + "type": "string", + "format": "int64", + "description": "The fields value and value_msat are mutually exclusive.", + "title": "*\nThe value of this invoice in millisatoshis" }, "settled": { "type": "boolean", @@ -2503,12 +3256,12 @@ }, "payment_request": { "type": "string", - "description": "*\nA bare-bones invoice for a payment within the Lightning Network. With the\ndetails of the invoice, the sender has all the data necessary to send a\npayment to the recipient." + "description": "*\nA bare-bones invoice for a payment within the Lightning Network. With the\ndetails of the invoice, the sender has all the data necessary to send a\npayment to the recipient." }, "description_hash": { "type": "string", "format": "byte", - "description": "*\nHash (SHA-256) of a description of the payment. Used if the description of\npayment (memo) is too long to naturally fit within the description field\nof an encoded payment request." + "description": "*\nHash (SHA-256) of a description of the payment. Used if the description of\npayment (memo) is too long to naturally fit within the description field\nof an encoded payment request. When using REST, this field must be encoded\nas base64." }, "expiry": { "type": "string", @@ -2571,6 +3324,18 @@ "$ref": "#/definitions/lnrpcInvoiceHTLC" }, "description": "/ List of HTLCs paying to this invoice [EXPERIMENTAL]." + }, + "features": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/lnrpcFeature" + }, + "description": "/ List of features advertised on the invoice." + }, + "is_keysend": { + "type": "boolean", + "format": "boolean", + "description": "*\nIndicates if this invoice was a spontaneous payment that arrived via keysend\n[EXPERIMENTAL]." } } }, @@ -2615,6 +3380,19 @@ "state": { "$ref": "#/definitions/lnrpcInvoiceHTLCState", "description": "/ Current state the htlc is in." + }, + "custom_records": { + "type": "object", + "additionalProperties": { + "type": "string", + "format": "byte" + }, + "description": "/ Custom tlv records." + }, + "mpp_total_amt_msat": { + "type": "string", + "format": "uint64", + "description": "/ The total amount of the mpp payment in msat." } }, "title": "/ Details of an HTLC that paid to an invoice" @@ -2628,6 +3406,35 @@ ], "default": "ACCEPTED" }, + "lnrpcKeyDescriptor": { + "type": "object", + "properties": { + "raw_key_bytes": { + "type": "string", + "format": "byte", + "description": "*\nThe raw bytes of the key being identified." + }, + "key_loc": { + "$ref": "#/definitions/lnrpcKeyLocator", + "description": "*\nThe key locator that identifies which key to use for signing." + } + } + }, + "lnrpcKeyLocator": { + "type": "object", + "properties": { + "key_family": { + "type": "integer", + "format": "int32", + "description": "/ The family of key being identified." + }, + "key_index": { + "type": "integer", + "format": "int32", + "description": "/ The precise index of the key being identified." + } + } + }, "lnrpcLightningAddress": { "type": "object", "properties": { @@ -2637,7 +3444,7 @@ }, "host": { "type": "string", - "title": "/ The network location of the lightning node, e.g. `69.69.69.69:1337` or `localhost:10011`" + "title": "/ The network location of the lightning node, e.g. `69.69.69.69:1337` or\n/ `localhost:10011`" } } }, @@ -2662,6 +3469,12 @@ }, "color": { "type": "string" + }, + "features": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/lnrpcFeature" + } } }, "description": "*\nAn individual vertex/node within the channel graph. A node is\nconnected to other nodes by one or more channel edges emanating from it. As the\ngraph is directed, a node will also have an incoming edge attached to it for\neach outgoing edge." @@ -2709,6 +3522,16 @@ "$ref": "#/definitions/lnrpcPayment" }, "title": "/ The list of payments" + }, + "first_index_offset": { + "type": "string", + "format": "uint64", + "description": "*\nThe index of the first item in the set of returned payments. This can be\nused as the index_offset to continue seeking backwards in the next request." + }, + "last_index_offset": { + "type": "string", + "format": "uint64", + "description": "*\nThe index of the last item in the set of returned payments. This can be used\nas the index_offset to continue seeking forwards in the next request." } } }, @@ -2736,6 +3559,34 @@ } } }, + "lnrpcMPPRecord": { + "type": "object", + "properties": { + "payment_addr": { + "type": "string", + "format": "byte", + "description": "*\nA unique, random identifier used to authenticate the sender as the intended\npayer of a multi-path payment. The payment_addr must be the same for all\nsubpayments, and match the payment_addr provided in the receiver's invoice.\nThe same payment_addr must be used on all subpayments." + }, + "total_amt_msat": { + "type": "string", + "format": "int64", + "description": "*\nThe total amount in milli-satoshis being sent as part of a larger multi-path\npayment. The caller is responsible for ensuring subpayments to the same node\nand payment_hash sum exactly to total_amt_msat. The same\ntotal_amt_msat must be used on all subpayments." + } + } + }, + "lnrpcMacaroonPermission": { + "type": "object", + "properties": { + "entity": { + "type": "string", + "description": "/ The entity a permission grants access to." + }, + "action": { + "type": "string", + "description": "/ The action that is granted." + } + } + }, "lnrpcMultiChanBackup": { "type": "object", "properties": { @@ -2749,7 +3600,7 @@ "multi_chan_backup": { "type": "string", "format": "byte", - "description": "*\nA single encrypted blob containing all the static channel backups of the\nchannel listed above. This can be stored as a single file or blob, and\nsafely be replaced with any prior/future versions." + "description": "*\nA single encrypted blob containing all the static channel backups of the\nchannel listed above. This can be stored as a single file or blob, and\nsafely be replaced with any prior/future versions. When using REST, this\nfield must be encoded as base64." } } }, @@ -2849,6 +3700,41 @@ } } }, + "lnrpcNodeMetricType": { + "type": "string", + "enum": [ + "UNKNOWN", + "BETWEENNESS_CENTRALITY" + ], + "default": "UNKNOWN" + }, + "lnrpcNodeMetricsResponse": { + "type": "object", + "properties": { + "betweenness_centrality": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/lnrpcFloatMetric" + }, + "description": "*\nBetweenness centrality is the sum of the ratio of shortest paths that pass\nthrough the node for each pair of nodes in the graph (not counting paths\nstarting or ending at this node).\nMap of node pubkey to betweenness centrality of the node. Normalized\nvalues are in the [0,1] closed interval." + } + } + }, + "lnrpcNodePair": { + "type": "object", + "properties": { + "from": { + "type": "string", + "format": "byte", + "description": "*\nThe sending node of the pair. When using REST, this field must be encoded as\nbase64." + }, + "to": { + "type": "string", + "format": "byte", + "description": "*\nThe receiving node of the pair. When using REST, this field must be encoded\nas base64." + } + } + }, "lnrpcNodeUpdate": { "type": "object", "properties": { @@ -2879,11 +3765,11 @@ "node_pubkey": { "type": "string", "format": "byte", - "title": "/ The pubkey of the node to open a channel with" + "description": "*\nThe pubkey of the node to open a channel with. When using REST, this field\nmust be encoded as base64." }, "node_pubkey_string": { "type": "string", - "title": "/ The hex encoded pubkey of the node to open a channel with" + "description": "*\nThe hex encoded pubkey of the node to open a channel with. Deprecated now\nthat the REST gateway supports base64 encoding of bytes fields." }, "local_funding_amount": { "type": "string", @@ -2893,42 +3779,50 @@ "push_sat": { "type": "string", "format": "int64", - "title": "/ The number of satoshis to push to the remote side as part of the initial commitment state" + "title": "/ The number of satoshis to push to the remote side as part of the initial\n/ commitment state" }, "target_conf": { "type": "integer", "format": "int32", - "description": "/ The target number of blocks that the funding transaction should be confirmed by." + "description": "/ The target number of blocks that the funding transaction should be\n/ confirmed by." }, "sat_per_byte": { "type": "string", "format": "int64", - "description": "/ A manual fee rate set in sat/byte that should be used when crafting the funding transaction." + "description": "/ A manual fee rate set in sat/byte that should be used when crafting the\n/ funding transaction." }, "private": { "type": "boolean", "format": "boolean", - "description": "/ Whether this channel should be private, not announced to the greater network." + "description": "/ Whether this channel should be private, not announced to the greater\n/ network." }, "min_htlc_msat": { "type": "string", "format": "int64", - "description": "/ The minimum value in millisatoshi we will require for incoming HTLCs on the channel." + "description": "/ The minimum value in millisatoshi we will require for incoming HTLCs on\n/ the channel." }, "remote_csv_delay": { "type": "integer", "format": "int64", - "description": "/ The delay we require on the remote's commitment transaction. If this is not set, it will be scaled automatically with the channel size." + "description": "/ The delay we require on the remote's commitment transaction. If this is\n/ not set, it will be scaled automatically with the channel size." }, "min_confs": { "type": "integer", "format": "int32", - "description": "/ The minimum number of confirmations each one of your outputs used for the funding transaction must satisfy." + "description": "/ The minimum number of confirmations each one of your outputs used for\n/ the funding transaction must satisfy." }, "spend_unconfirmed": { "type": "boolean", "format": "boolean", - "description": "/ Whether unconfirmed outputs should be used as inputs for the funding transaction." + "description": "/ Whether unconfirmed outputs should be used as inputs for the funding\n/ transaction." + }, + "close_address": { + "type": "string", + "description": "Close address is an optional address which specifies the address to which\nfunds should be paid out to upon cooperative close. This field may only be\nset if the peer supports the option upfront feature bit (call listpeers\nto check). The remote peer will only accept cooperative closes to this\naddress if it is set.\n\nNote: If this value is set on channel creation, you will *not* be able to\ncooperatively close out to a different address." + }, + "funding_shim": { + "$ref": "#/definitions/lnrpcFundingShim", + "description": "*\nFunding shims are an optional argument that allow the caller to intercept\ncertain funding functionality. For example, a shim can be provided to use a\nparticular key for the commitment key (ideally cold) rather than use one\nthat is generated by the wallet as normal, or signal that signing will be\ncarried out in an interactive manner (PSBT based)." } } }, @@ -2936,10 +3830,21 @@ "type": "object", "properties": { "chan_pending": { - "$ref": "#/definitions/lnrpcPendingUpdate" + "$ref": "#/definitions/lnrpcPendingUpdate", + "description": "*\nSignals that the channel is now fully negotiated and the funding\ntransaction published." }, "chan_open": { - "$ref": "#/definitions/lnrpcChannelOpenUpdate" + "$ref": "#/definitions/lnrpcChannelOpenUpdate", + "description": "*\nSignals that the channel's funding transaction has now reached the\nrequired number of confirmations on chain and can be used." + }, + "psbt_fund": { + "$ref": "#/definitions/lnrpcReadyForPsbtFunding", + "description": "*\nSignals that the funding process has been suspended and the construction\nof a PSBT that funds the channel PK script is now required." + }, + "pending_chan_id": { + "type": "string", + "format": "byte", + "description": "*\nThe pending channel ID of the created channel. This value may be used to\nfurther the funding flow manually via the FundingStateStep method." } } }, @@ -3001,6 +3906,20 @@ "items": { "$ref": "#/definitions/lnrpcRouteHint" } + }, + "payment_addr": { + "type": "string", + "format": "byte" + }, + "num_msat": { + "type": "string", + "format": "int64" + }, + "features": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/lnrpcFeature" + } } } }, @@ -3019,14 +3938,7 @@ "creation_date": { "type": "string", "format": "int64", - "title": "/ The date of this payment" - }, - "path": { - "type": "array", - "items": { - "type": "string" - }, - "title": "/ The path this payment took" + "title": "/ Deprecated, use creation_time_ns" }, "fee": { "type": "string", @@ -3064,9 +3976,42 @@ "type": "string", "format": "int64", "title": "/ The fee paid for this payment in milli-satoshis" + }, + "creation_time_ns": { + "type": "string", + "format": "int64", + "description": "/ The time in UNIX nanoseconds at which the payment was created." + }, + "htlcs": { + "type": "array", + "items": { + "$ref": "#/definitions/lnrpcHTLCAttempt" + }, + "description": "/ The HTLCs made in attempt to settle the payment." + }, + "payment_index": { + "type": "string", + "format": "uint64", + "description": "*\nThe creation index of this payment. Each payment can be uniquely identified\nby this index, which may not strictly increment by 1 for payments made in\nolder versions of lnd." + }, + "failure_reason": { + "$ref": "#/definitions/lnrpcPaymentFailureReason" } } }, + "lnrpcPaymentFailureReason": { + "type": "string", + "enum": [ + "FAILURE_REASON_NONE", + "FAILURE_REASON_TIMEOUT", + "FAILURE_REASON_NO_ROUTE", + "FAILURE_REASON_ERROR", + "FAILURE_REASON_INCORRECT_PAYMENT_DETAILS", + "FAILURE_REASON_INSUFFICIENT_BALANCE" + ], + "default": "FAILURE_REASON_NONE", + "description": " - FAILURE_REASON_NONE: *\nPayment isn't failed (yet).\n - FAILURE_REASON_TIMEOUT: *\nThere are more routes to try, but the payment timeout was exceeded.\n - FAILURE_REASON_NO_ROUTE: *\nAll possible routes were tried and failed permanently. Or were no\nroutes to the destination at all.\n - FAILURE_REASON_ERROR: *\nA non-recoverable error has occured.\n - FAILURE_REASON_INCORRECT_PAYMENT_DETAILS: *\nPayment details incorrect (unknown hash, invalid amt or\ninvalid final cltv delta)\n - FAILURE_REASON_INSUFFICIENT_BALANCE: *\nInsufficient local balance." + }, "lnrpcPeer": { "type": "object", "properties": { @@ -3111,6 +4056,32 @@ "sync_type": { "$ref": "#/definitions/PeerSyncType", "description": "The type of sync we are currently performing with this peer." + }, + "features": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/lnrpcFeature" + }, + "description": "/ Features advertised by the remote peer in their init message." + }, + "errors": { + "type": "array", + "items": { + "$ref": "#/definitions/lnrpcTimestampedError" + }, + "description": "The latest errors received from our peer with timestamps, limited to the 10\nmost recent errors. These errors are tracked across peer connections, but\nare not persisted across lnd restarts. Note that these errors are only\nstored for peers that we have channels open with, to prevent peers from\nspamming us with errors at no cost." + } + } + }, + "lnrpcPeerEvent": { + "type": "object", + "properties": { + "pub_key": { + "type": "string", + "description": "/ The identity pubkey of the peer." + }, + "type": { + "$ref": "#/definitions/PeerEventEventType" } } }, @@ -3134,7 +4105,7 @@ "items": { "$ref": "#/definitions/PendingChannelsResponseClosedChannel" }, - "title": "/ Channels pending closing" + "description": "Deprecated: Channels pending closing previously contained cooperatively\nclosed channels with a single confirmation. These channels are now\nconsidered closed from the time we see them on chain." }, "pending_force_closing_channels": { "type": "array", @@ -3219,7 +4190,7 @@ "fee_rate": { "type": "number", "format": "double", - "description": "/ The effective fee rate in milli-satoshis. The precision of this value goes up to 6 decimal places, so 1e-6." + "description": "/ The effective fee rate in milli-satoshis. The precision of this value\n/ goes up to 6 decimal places, so 1e-6." }, "time_lock_delta": { "type": "integer", @@ -3229,13 +4200,38 @@ "max_htlc_msat": { "type": "string", "format": "uint64", - "description": "/ If set, the maximum HTLC size in milli-satoshis. If unset, the maximum HTLC will be unchanged." + "description": "/ If set, the maximum HTLC size in milli-satoshis. If unset, the maximum\n/ HTLC will be unchanged." + }, + "min_htlc_msat": { + "type": "string", + "format": "uint64", + "description": "/ The minimum HTLC size in milli-satoshis. Only applied if\n/ min_htlc_msat_specified is true." + }, + "min_htlc_msat_specified": { + "type": "boolean", + "format": "boolean", + "description": "/ If true, min_htlc_msat is applied." } } }, "lnrpcPolicyUpdateResponse": { "type": "object" }, + "lnrpcPsbtShim": { + "type": "object", + "properties": { + "pending_chan_id": { + "type": "string", + "format": "byte", + "description": "*\nA unique identifier of 32 random bytes that will be used as the pending\nchannel ID to identify the PSBT state machine when interacting with it and\non the wire protocol to initiate the funding request." + }, + "base_psbt": { + "type": "string", + "format": "byte", + "description": "*\nAn optional base PSBT the new channel output will be added to. If this is\nnon-empty, it must be a binary serialized PSBT." + } + } + }, "lnrpcQueryRoutesResponse": { "type": "object", "properties": { @@ -3253,6 +4249,25 @@ } } }, + "lnrpcReadyForPsbtFunding": { + "type": "object", + "properties": { + "funding_address": { + "type": "string", + "description": "*\nThe P2WSH address of the channel funding multisig address that the below\nspecified amount in satoshis needs to be sent to." + }, + "funding_amount": { + "type": "string", + "format": "int64", + "description": "*\nThe exact amount in satoshis that needs to be sent to the above address to\nfund the pending channel." + }, + "psbt": { + "type": "string", + "format": "byte", + "description": "*\nA raw PSBT that contains the pending channel output. If a base PSBT was\nprovided in the PsbtShim, this is the base PSBT with one additional output.\nIf no base PSBT was specified, this is an otherwise empty PSBT with exactly\none output." + } + } + }, "lnrpcRestoreBackupResponse": { "type": "object" }, @@ -3260,11 +4275,13 @@ "type": "object", "properties": { "chan_backups": { - "$ref": "#/definitions/lnrpcChannelBackups" + "$ref": "#/definitions/lnrpcChannelBackups", + "description": "*\nThe channels to restore as a list of channel/backup pairs." }, "multi_chan_backup": { "type": "string", - "format": "byte" + "format": "byte", + "description": "*\nThe channels to restore in the packed multi backup format. When using\nREST, this field must be encoded as base64." } } }, @@ -3274,12 +4291,12 @@ "total_time_lock": { "type": "integer", "format": "int64", - "description": "*\nThe cumulative (final) time lock across the entire route. This is the CLTV\nvalue that should be extended to the first hop in the route. All other hops\nwill decrement the time-lock as advertised, leaving enough time for all\nhops to wait for or present the payment preimage to complete the payment." + "description": "*\nThe cumulative (final) time lock across the entire route. This is the CLTV\nvalue that should be extended to the first hop in the route. All other hops\nwill decrement the time-lock as advertised, leaving enough time for all\nhops to wait for or present the payment preimage to complete the payment." }, "total_fees": { "type": "string", "format": "int64", - "description": "*\nThe sum of the fees paid at each hop within the final route. In the case\nof a one-hop payment, this value will be zero as we don't need to pay a fee\nto ourselves." + "description": "*\nThe sum of the fees paid at each hop within the final route. In the case\nof a one-hop payment, this value will be zero as we don't need to pay a fee\nto ourselves." }, "total_amt": { "type": "string", @@ -3366,12 +4383,12 @@ "target_conf": { "type": "integer", "format": "int32", - "description": "/ The target number of blocks that this transaction should be confirmed by." + "description": "/ The target number of blocks that this transaction should be confirmed\n/ by." }, "sat_per_byte": { "type": "string", "format": "int64", - "description": "/ A manual fee rate set in sat/byte that should be used when crafting the transaction." + "description": "/ A manual fee rate set in sat/byte that should be used when crafting the\n/ transaction." }, "send_all": { "type": "boolean", @@ -3404,29 +4421,34 @@ "dest": { "type": "string", "format": "byte", - "title": "/ The identity pubkey of the payment recipient" + "description": "*\nThe identity pubkey of the payment recipient. When using REST, this field\nmust be encoded as base64." }, "dest_string": { "type": "string", - "title": "/ The hex-encoded identity pubkey of the payment recipient" + "description": "*\nThe hex-encoded identity pubkey of the payment recipient. Deprecated now\nthat the REST gateway supports base64 encoding of bytes fields." }, "amt": { "type": "string", "format": "int64", - "description": "/ Number of satoshis to send." + "description": "*\nThe amount to send expressed in satoshis.\n\nThe fields amt and amt_msat are mutually exclusive." + }, + "amt_msat": { + "type": "string", + "format": "int64", + "description": "*\nThe amount to send expressed in millisatoshis.\n\nThe fields amt and amt_msat are mutually exclusive." }, "payment_hash": { "type": "string", "format": "byte", - "title": "/ The hash to use within the payment's HTLC" + "description": "*\nThe hash to use within the payment's HTLC. When using REST, this field\nmust be encoded as base64." }, "payment_hash_string": { "type": "string", - "title": "/ The hex-encoded hash to use within the payment's HTLC" + "description": "*\nThe hex-encoded hash to use within the payment's HTLC. Deprecated now\nthat the REST gateway supports base64 encoding of bytes fields." }, "payment_request": { "type": "string", - "description": "*\nA bare-bones invoice for a payment within the Lightning Network. With the\ndetails of the invoice, the sender has all the data necessary to send a\npayment to the recipient." + "description": "*\nA bare-bones invoice for a payment within the Lightning Network. With the\ndetails of the invoice, the sender has all the data necessary to send a\npayment to the recipient." }, "final_cltv_delta": { "type": "integer", @@ -3442,18 +4464,35 @@ "format": "uint64", "description": "*\nThe channel id of the channel that must be taken to the first hop. If zero,\nany channel may be used." }, + "last_hop_pubkey": { + "type": "string", + "format": "byte", + "description": "*\nThe pubkey of the last hop of the route. If empty, any hop may be used." + }, "cltv_limit": { "type": "integer", "format": "int64", - "description": "* \nAn optional maximum total time lock for the route. This should not exceed\nlnd's `--max-cltv-expiry` setting. If zero, then the value of\n`--max-cltv-expiry` is enforced." + "description": "*\nAn optional maximum total time lock for the route. This should not exceed\nlnd's `--max-cltv-expiry` setting. If zero, then the value of\n`--max-cltv-expiry` is enforced." }, - "dest_tlv": { + "dest_custom_records": { "type": "object", "additionalProperties": { "type": "string", "format": "byte" }, - "description": "* \nAn optional field that can be used to pass an arbitrary set of TLV records\nto a peer which understands the new records. This can be used to pass\napplication specific data during the payment attempt." + "description": "*\nAn optional field that can be used to pass an arbitrary set of TLV records\nto a peer which understands the new records. This can be used to pass\napplication specific data during the payment attempt. Record types are\nrequired to be in the custom range \u003e= 65536. When using REST, the values\nmust be encoded as base64." + }, + "allow_self_payment": { + "type": "boolean", + "format": "boolean", + "description": "/ If set, circular payments to self are permitted." + }, + "dest_features": { + "type": "array", + "items": { + "$ref": "#/definitions/lnrpcFeatureBit" + }, + "description": "*\nFeatures assumed to be supported by the final node. All transitive feature\ndependencies must also be set properly. For a given feature bit pair, either\noptional or remote may be set, but not both. If this field is nil or empty,\nthe router will try to load destination features from the graph as a\nfallback." } } }, @@ -3482,11 +4521,11 @@ "payment_hash": { "type": "string", "format": "byte", - "description": "/ The payment hash to use for the HTLC." + "description": "*\nThe payment hash to use for the HTLC. When using REST, this field must be\nencoded as base64." }, "payment_hash_string": { "type": "string", - "description": "/ An optional hex-encoded payment hash to be used for the HTLC." + "description": "*\nAn optional hex-encoded payment hash to be used for the HTLC. Deprecated now\nthat the REST gateway supports base64 encoding of bytes fields." }, "route": { "$ref": "#/definitions/lnrpcRoute", @@ -3500,7 +4539,7 @@ "msg": { "type": "string", "format": "byte", - "title": "/ The message to be signed" + "description": "*\nThe message to be signed. When using REST, this field must be encoded as\nbase64." } } }, @@ -3516,6 +4555,20 @@ "lnrpcStopResponse": { "type": "object" }, + "lnrpcTimestampedError": { + "type": "object", + "properties": { + "timestamp": { + "type": "string", + "format": "uint64", + "description": "The unix timestamp in seconds when the error occurred." + }, + "error": { + "type": "string", + "description": "The string representation of the error sent by our peer." + } + } + }, "lnrpcTransaction": { "type": "object", "properties": { @@ -3583,7 +4636,7 @@ "wallet_password": { "type": "string", "format": "byte", - "description": "*\nwallet_password should be the current valid passphrase for the daemon. This\nwill be required to decrypt on-disk material that the daemon requires to\nfunction properly." + "description": "*\nwallet_password should be the current valid passphrase for the daemon. This\nwill be required to decrypt on-disk material that the daemon requires to\nfunction properly. When using REST, this field must be encoded as base64." }, "recovery_window": { "type": "integer", @@ -3602,7 +4655,7 @@ "lnrpcUtxo": { "type": "object", "properties": { - "type": { + "address_type": { "$ref": "#/definitions/lnrpcAddressType", "title": "/ The type of address" }, @@ -3639,7 +4692,7 @@ "msg": { "type": "string", "format": "byte", - "title": "/ The message over which the signature is to be verified" + "description": "*\nThe message over which the signature is to be verified. When using REST,\nthis field must be encoded as base64." }, "signature": { "type": "string", @@ -3680,6 +4733,165 @@ "title": "/ The unconfirmed balance of a wallet(with 0 confirmations)" } } + }, + "protobufAny": { + "type": "object", + "properties": { + "type_url": { + "type": "string" + }, + "value": { + "type": "string", + "format": "byte" + } + } + }, + "runtimeStreamError": { + "type": "object", + "properties": { + "grpc_code": { + "type": "integer", + "format": "int32" + }, + "http_code": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + }, + "http_status": { + "type": "string" + }, + "details": { + "type": "array", + "items": { + "$ref": "#/definitions/protobufAny" + } + } + } + } + }, + "x-stream-definitions": { + "lnrpcChanBackupSnapshot": { + "type": "object", + "properties": { + "result": { + "$ref": "#/definitions/lnrpcChanBackupSnapshot" + }, + "error": { + "$ref": "#/definitions/runtimeStreamError" + } + }, + "title": "Stream result of lnrpcChanBackupSnapshot" + }, + "lnrpcChannelAcceptRequest": { + "type": "object", + "properties": { + "result": { + "$ref": "#/definitions/lnrpcChannelAcceptRequest" + }, + "error": { + "$ref": "#/definitions/runtimeStreamError" + } + }, + "title": "Stream result of lnrpcChannelAcceptRequest" + }, + "lnrpcChannelEventUpdate": { + "type": "object", + "properties": { + "result": { + "$ref": "#/definitions/lnrpcChannelEventUpdate" + }, + "error": { + "$ref": "#/definitions/runtimeStreamError" + } + }, + "title": "Stream result of lnrpcChannelEventUpdate" + }, + "lnrpcCloseStatusUpdate": { + "type": "object", + "properties": { + "result": { + "$ref": "#/definitions/lnrpcCloseStatusUpdate" + }, + "error": { + "$ref": "#/definitions/runtimeStreamError" + } + }, + "title": "Stream result of lnrpcCloseStatusUpdate" + }, + "lnrpcGraphTopologyUpdate": { + "type": "object", + "properties": { + "result": { + "$ref": "#/definitions/lnrpcGraphTopologyUpdate" + }, + "error": { + "$ref": "#/definitions/runtimeStreamError" + } + }, + "title": "Stream result of lnrpcGraphTopologyUpdate" + }, + "lnrpcInvoice": { + "type": "object", + "properties": { + "result": { + "$ref": "#/definitions/lnrpcInvoice" + }, + "error": { + "$ref": "#/definitions/runtimeStreamError" + } + }, + "title": "Stream result of lnrpcInvoice" + }, + "lnrpcOpenStatusUpdate": { + "type": "object", + "properties": { + "result": { + "$ref": "#/definitions/lnrpcOpenStatusUpdate" + }, + "error": { + "$ref": "#/definitions/runtimeStreamError" + } + }, + "title": "Stream result of lnrpcOpenStatusUpdate" + }, + "lnrpcPeerEvent": { + "type": "object", + "properties": { + "result": { + "$ref": "#/definitions/lnrpcPeerEvent" + }, + "error": { + "$ref": "#/definitions/runtimeStreamError" + } + }, + "title": "Stream result of lnrpcPeerEvent" + }, + "lnrpcSendResponse": { + "type": "object", + "properties": { + "result": { + "$ref": "#/definitions/lnrpcSendResponse" + }, + "error": { + "$ref": "#/definitions/runtimeStreamError" + } + }, + "title": "Stream result of lnrpcSendResponse" + }, + "lnrpcTransaction": { + "type": "object", + "properties": { + "result": { + "$ref": "#/definitions/lnrpcTransaction" + }, + "error": { + "$ref": "#/definitions/runtimeStreamError" + } + }, + "title": "Stream result of lnrpcTransaction" } } } diff --git a/lnrpc/signrpc/config_active.go b/lnrpc/signrpc/config_active.go index 1e959ae48b..c5ce25a9da 100644 --- a/lnrpc/signrpc/config_active.go +++ b/lnrpc/signrpc/config_active.go @@ -4,6 +4,7 @@ package signrpc import ( "github.com/lightningnetwork/lnd/input" + "github.com/lightningnetwork/lnd/keychain" "github.com/lightningnetwork/lnd/macaroons" ) @@ -30,4 +31,8 @@ type Config struct { // job of the signer RPC server is simply to proxy valid requests to // the active signer instance. Signer input.Signer + + // KeyRing is an interface that the signer will use to derive any keys + // for signing messages. + KeyRing keychain.SecretKeyRing } diff --git a/lnrpc/signrpc/signer.pb.go b/lnrpc/signrpc/signer.pb.go index 854a14fd03..6100237b17 100644 --- a/lnrpc/signrpc/signer.pb.go +++ b/lnrpc/signrpc/signer.pb.go @@ -202,7 +202,8 @@ type SignDescriptor struct { //only be populated if a p2wsh or a p2sh output is being signed. WitnessScript []byte `protobuf:"bytes,4,opt,name=witness_script,json=witnessScript,proto3" json:"witness_script,omitempty"` //* - //A description of the output being spent. The value and script MUST be provided. + //A description of the output being spent. The value and script MUST be + //provided. Output *TxOut `protobuf:"bytes,5,opt,name=output,proto3" json:"output,omitempty"` //* //The target sighash type that should be used when generating the final @@ -472,6 +473,287 @@ func (m *InputScriptResp) GetInputScripts() []*InputScript { return nil } +type SignMessageReq struct { + /// The message to be signed. + Msg []byte `protobuf:"bytes,1,opt,name=msg,proto3" json:"msg,omitempty"` + /// The key locator that identifies which key to use for signing. + KeyLoc *KeyLocator `protobuf:"bytes,2,opt,name=key_loc,json=keyLoc,proto3" json:"key_loc,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SignMessageReq) Reset() { *m = SignMessageReq{} } +func (m *SignMessageReq) String() string { return proto.CompactTextString(m) } +func (*SignMessageReq) ProtoMessage() {} +func (*SignMessageReq) Descriptor() ([]byte, []int) { + return fileDescriptor_4ecd772f6c7ffacf, []int{8} +} + +func (m *SignMessageReq) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SignMessageReq.Unmarshal(m, b) +} +func (m *SignMessageReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SignMessageReq.Marshal(b, m, deterministic) +} +func (m *SignMessageReq) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignMessageReq.Merge(m, src) +} +func (m *SignMessageReq) XXX_Size() int { + return xxx_messageInfo_SignMessageReq.Size(m) +} +func (m *SignMessageReq) XXX_DiscardUnknown() { + xxx_messageInfo_SignMessageReq.DiscardUnknown(m) +} + +var xxx_messageInfo_SignMessageReq proto.InternalMessageInfo + +func (m *SignMessageReq) GetMsg() []byte { + if m != nil { + return m.Msg + } + return nil +} + +func (m *SignMessageReq) GetKeyLoc() *KeyLocator { + if m != nil { + return m.KeyLoc + } + return nil +} + +type SignMessageResp struct { + //* + //The signature for the given message in the fixed-size LN wire format. + Signature []byte `protobuf:"bytes,1,opt,name=signature,proto3" json:"signature,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SignMessageResp) Reset() { *m = SignMessageResp{} } +func (m *SignMessageResp) String() string { return proto.CompactTextString(m) } +func (*SignMessageResp) ProtoMessage() {} +func (*SignMessageResp) Descriptor() ([]byte, []int) { + return fileDescriptor_4ecd772f6c7ffacf, []int{9} +} + +func (m *SignMessageResp) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SignMessageResp.Unmarshal(m, b) +} +func (m *SignMessageResp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SignMessageResp.Marshal(b, m, deterministic) +} +func (m *SignMessageResp) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignMessageResp.Merge(m, src) +} +func (m *SignMessageResp) XXX_Size() int { + return xxx_messageInfo_SignMessageResp.Size(m) +} +func (m *SignMessageResp) XXX_DiscardUnknown() { + xxx_messageInfo_SignMessageResp.DiscardUnknown(m) +} + +var xxx_messageInfo_SignMessageResp proto.InternalMessageInfo + +func (m *SignMessageResp) GetSignature() []byte { + if m != nil { + return m.Signature + } + return nil +} + +type VerifyMessageReq struct { + /// The message over which the signature is to be verified. + Msg []byte `protobuf:"bytes,1,opt,name=msg,proto3" json:"msg,omitempty"` + //* + //The fixed-size LN wire encoded signature to be verified over the given + //message. + Signature []byte `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` + /// The public key the signature has to be valid for. + Pubkey []byte `protobuf:"bytes,3,opt,name=pubkey,proto3" json:"pubkey,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VerifyMessageReq) Reset() { *m = VerifyMessageReq{} } +func (m *VerifyMessageReq) String() string { return proto.CompactTextString(m) } +func (*VerifyMessageReq) ProtoMessage() {} +func (*VerifyMessageReq) Descriptor() ([]byte, []int) { + return fileDescriptor_4ecd772f6c7ffacf, []int{10} +} + +func (m *VerifyMessageReq) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VerifyMessageReq.Unmarshal(m, b) +} +func (m *VerifyMessageReq) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VerifyMessageReq.Marshal(b, m, deterministic) +} +func (m *VerifyMessageReq) XXX_Merge(src proto.Message) { + xxx_messageInfo_VerifyMessageReq.Merge(m, src) +} +func (m *VerifyMessageReq) XXX_Size() int { + return xxx_messageInfo_VerifyMessageReq.Size(m) +} +func (m *VerifyMessageReq) XXX_DiscardUnknown() { + xxx_messageInfo_VerifyMessageReq.DiscardUnknown(m) +} + +var xxx_messageInfo_VerifyMessageReq proto.InternalMessageInfo + +func (m *VerifyMessageReq) GetMsg() []byte { + if m != nil { + return m.Msg + } + return nil +} + +func (m *VerifyMessageReq) GetSignature() []byte { + if m != nil { + return m.Signature + } + return nil +} + +func (m *VerifyMessageReq) GetPubkey() []byte { + if m != nil { + return m.Pubkey + } + return nil +} + +type VerifyMessageResp struct { + /// Whether the signature was valid over the given message. + Valid bool `protobuf:"varint,1,opt,name=valid,proto3" json:"valid,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VerifyMessageResp) Reset() { *m = VerifyMessageResp{} } +func (m *VerifyMessageResp) String() string { return proto.CompactTextString(m) } +func (*VerifyMessageResp) ProtoMessage() {} +func (*VerifyMessageResp) Descriptor() ([]byte, []int) { + return fileDescriptor_4ecd772f6c7ffacf, []int{11} +} + +func (m *VerifyMessageResp) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VerifyMessageResp.Unmarshal(m, b) +} +func (m *VerifyMessageResp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VerifyMessageResp.Marshal(b, m, deterministic) +} +func (m *VerifyMessageResp) XXX_Merge(src proto.Message) { + xxx_messageInfo_VerifyMessageResp.Merge(m, src) +} +func (m *VerifyMessageResp) XXX_Size() int { + return xxx_messageInfo_VerifyMessageResp.Size(m) +} +func (m *VerifyMessageResp) XXX_DiscardUnknown() { + xxx_messageInfo_VerifyMessageResp.DiscardUnknown(m) +} + +var xxx_messageInfo_VerifyMessageResp proto.InternalMessageInfo + +func (m *VerifyMessageResp) GetValid() bool { + if m != nil { + return m.Valid + } + return false +} + +type SharedKeyRequest struct { + // The ephemeral public key to use for the DH key derivation. + EphemeralPubkey []byte `protobuf:"bytes,1,opt,name=ephemeral_pubkey,json=ephemeralPubkey,proto3" json:"ephemeral_pubkey,omitempty"` + //* + //The optional key locator of the local key that should be used. If this + //parameter is not set then the node's identity private key will be used. + KeyLoc *KeyLocator `protobuf:"bytes,2,opt,name=key_loc,json=keyLoc,proto3" json:"key_loc,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SharedKeyRequest) Reset() { *m = SharedKeyRequest{} } +func (m *SharedKeyRequest) String() string { return proto.CompactTextString(m) } +func (*SharedKeyRequest) ProtoMessage() {} +func (*SharedKeyRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_4ecd772f6c7ffacf, []int{12} +} + +func (m *SharedKeyRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SharedKeyRequest.Unmarshal(m, b) +} +func (m *SharedKeyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SharedKeyRequest.Marshal(b, m, deterministic) +} +func (m *SharedKeyRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SharedKeyRequest.Merge(m, src) +} +func (m *SharedKeyRequest) XXX_Size() int { + return xxx_messageInfo_SharedKeyRequest.Size(m) +} +func (m *SharedKeyRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SharedKeyRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SharedKeyRequest proto.InternalMessageInfo + +func (m *SharedKeyRequest) GetEphemeralPubkey() []byte { + if m != nil { + return m.EphemeralPubkey + } + return nil +} + +func (m *SharedKeyRequest) GetKeyLoc() *KeyLocator { + if m != nil { + return m.KeyLoc + } + return nil +} + +type SharedKeyResponse struct { + // The shared public key, hashed with sha256. + SharedKey []byte `protobuf:"bytes,1,opt,name=shared_key,json=sharedKey,proto3" json:"shared_key,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SharedKeyResponse) Reset() { *m = SharedKeyResponse{} } +func (m *SharedKeyResponse) String() string { return proto.CompactTextString(m) } +func (*SharedKeyResponse) ProtoMessage() {} +func (*SharedKeyResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4ecd772f6c7ffacf, []int{13} +} + +func (m *SharedKeyResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SharedKeyResponse.Unmarshal(m, b) +} +func (m *SharedKeyResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SharedKeyResponse.Marshal(b, m, deterministic) +} +func (m *SharedKeyResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SharedKeyResponse.Merge(m, src) +} +func (m *SharedKeyResponse) XXX_Size() int { + return xxx_messageInfo_SharedKeyResponse.Size(m) +} +func (m *SharedKeyResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SharedKeyResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SharedKeyResponse proto.InternalMessageInfo + +func (m *SharedKeyResponse) GetSharedKey() []byte { + if m != nil { + return m.SharedKey + } + return nil +} + func init() { proto.RegisterType((*KeyLocator)(nil), "signrpc.KeyLocator") proto.RegisterType((*KeyDescriptor)(nil), "signrpc.KeyDescriptor") @@ -481,48 +763,66 @@ func init() { proto.RegisterType((*SignResp)(nil), "signrpc.SignResp") proto.RegisterType((*InputScript)(nil), "signrpc.InputScript") proto.RegisterType((*InputScriptResp)(nil), "signrpc.InputScriptResp") + proto.RegisterType((*SignMessageReq)(nil), "signrpc.SignMessageReq") + proto.RegisterType((*SignMessageResp)(nil), "signrpc.SignMessageResp") + proto.RegisterType((*VerifyMessageReq)(nil), "signrpc.VerifyMessageReq") + proto.RegisterType((*VerifyMessageResp)(nil), "signrpc.VerifyMessageResp") + proto.RegisterType((*SharedKeyRequest)(nil), "signrpc.SharedKeyRequest") + proto.RegisterType((*SharedKeyResponse)(nil), "signrpc.SharedKeyResponse") } func init() { proto.RegisterFile("signrpc/signer.proto", fileDescriptor_4ecd772f6c7ffacf) } var fileDescriptor_4ecd772f6c7ffacf = []byte{ - // 562 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x53, 0x4f, 0x8f, 0xd3, 0x3e, - 0x10, 0x55, 0xb7, 0xbf, 0x36, 0xdd, 0x49, 0xd2, 0x1f, 0x98, 0x0a, 0x02, 0x08, 0x51, 0x22, 0x2d, - 0xea, 0x01, 0x5a, 0x51, 0x10, 0x12, 0x9c, 0xd0, 0x82, 0x56, 0xac, 0xba, 0xd2, 0x4a, 0x6e, 0x4f, - 0x5c, 0xa2, 0x34, 0x35, 0xa9, 0x95, 0x34, 0xf1, 0xc6, 0x0e, 0x69, 0x6e, 0x7c, 0x07, 0xbe, 0x30, - 0x1a, 0x3b, 0xfd, 0x07, 0x9c, 0x9a, 0xf7, 0x3c, 0x33, 0xef, 0x79, 0x5e, 0x0d, 0x03, 0xc9, 0xe3, - 0xac, 0x10, 0xd1, 0x04, 0x7f, 0x59, 0x31, 0x16, 0x45, 0xae, 0x72, 0x62, 0x35, 0xac, 0xff, 0x15, - 0x60, 0xc6, 0xea, 0x9b, 0x3c, 0x0a, 0x55, 0x5e, 0x90, 0x67, 0x00, 0x09, 0xab, 0x83, 0xef, 0xe1, - 0x86, 0xa7, 0xb5, 0xd7, 0x1a, 0xb6, 0x46, 0x1d, 0x7a, 0x9e, 0xb0, 0xfa, 0x4a, 0x13, 0xe4, 0x29, - 0x20, 0x08, 0x78, 0xb6, 0x62, 0x5b, 0xef, 0x4c, 0x9f, 0xf6, 0x12, 0x56, 0x5f, 0x23, 0xf6, 0x43, - 0x70, 0x67, 0xac, 0xfe, 0xc2, 0x64, 0x54, 0x70, 0x81, 0xc3, 0x7c, 0x70, 0x8b, 0xb0, 0x0a, 0xb0, - 0x63, 0x59, 0x2b, 0x26, 0xf5, 0x3c, 0x87, 0xda, 0x45, 0x58, 0xcd, 0x58, 0x7d, 0x89, 0x14, 0x79, - 0x05, 0x16, 0x9e, 0xa7, 0x79, 0xa4, 0xe7, 0xd9, 0xd3, 0x07, 0xe3, 0xc6, 0xd9, 0xf8, 0x60, 0x8b, - 0x76, 0x13, 0xfd, 0xed, 0x7f, 0x84, 0xce, 0x62, 0x7b, 0x5b, 0x2a, 0x32, 0x80, 0xce, 0x8f, 0x30, - 0x2d, 0x99, 0x1e, 0xd9, 0xa6, 0x06, 0xa0, 0x3d, 0x91, 0x04, 0x46, 0x5f, 0x8f, 0x73, 0x68, 0x4f, - 0x24, 0x73, 0x8d, 0xfd, 0x5f, 0x67, 0xd0, 0x9f, 0xf3, 0x38, 0x3b, 0x32, 0xf8, 0x06, 0xd0, 0x7d, - 0xb0, 0x62, 0x32, 0xd2, 0x83, 0xec, 0xe9, 0xc3, 0x63, 0xf5, 0x43, 0x25, 0x45, 0x93, 0x08, 0xc9, - 0x0b, 0x70, 0x24, 0xcf, 0xe2, 0x94, 0x05, 0xaa, 0x62, 0x61, 0xd2, 0xa8, 0xd8, 0x86, 0x5b, 0x20, - 0x85, 0x25, 0xab, 0xbc, 0x5c, 0xee, 0x4b, 0xda, 0xa6, 0xc4, 0x70, 0xa6, 0xe4, 0x02, 0xfa, 0x15, - 0x57, 0x19, 0x93, 0x72, 0xe7, 0xf6, 0x3f, 0x5d, 0xe4, 0x36, 0xac, 0xb1, 0x4c, 0x5e, 0x42, 0x37, - 0x2f, 0x95, 0x28, 0x95, 0xd7, 0xd1, 0xee, 0xfa, 0x7b, 0x77, 0x7a, 0x0b, 0xb4, 0x39, 0x25, 0x1e, - 0x60, 0x9c, 0xeb, 0x50, 0xae, 0x3d, 0x6b, 0xd8, 0x1a, 0xb9, 0x74, 0x07, 0xc9, 0x73, 0xb0, 0x79, - 0x26, 0x4a, 0xd5, 0x44, 0xd6, 0xd3, 0x91, 0x81, 0xa6, 0x4c, 0x68, 0x11, 0x58, 0xb8, 0x14, 0xca, - 0xee, 0xc8, 0x10, 0x1c, 0x8c, 0x4b, 0x6d, 0x4f, 0xd2, 0x82, 0x22, 0xac, 0x16, 0x5b, 0x13, 0xd6, - 0x7b, 0x00, 0x34, 0xa0, 0x17, 0x26, 0xbd, 0xb3, 0x61, 0x7b, 0x64, 0x4f, 0x1f, 0xed, 0x3d, 0x9d, - 0x2e, 0x97, 0x9e, 0xcb, 0x06, 0x4b, 0xff, 0x02, 0x7a, 0x46, 0x44, 0x0a, 0xf2, 0x18, 0x7a, 0xa8, - 0x22, 0x79, 0x8c, 0x0a, 0xed, 0x91, 0x43, 0xad, 0x22, 0xac, 0xe6, 0x3c, 0x96, 0xfe, 0x15, 0xd8, - 0xd7, 0xe8, 0xac, 0xb9, 0xbd, 0x07, 0x56, 0xb3, 0x8e, 0x5d, 0x61, 0x03, 0xf1, 0x5f, 0x2a, 0x79, - 0x7c, 0x1a, 0x34, 0xca, 0x35, 0x49, 0xdf, 0xc0, 0xff, 0x47, 0x73, 0xb4, 0xea, 0x07, 0x70, 0xcd, - 0x1e, 0x4c, 0x8f, 0x99, 0x68, 0x4f, 0x07, 0x7b, 0xf3, 0xc7, 0x0d, 0x0e, 0x3f, 0x00, 0x39, 0xfd, - 0xd9, 0x82, 0xee, 0x5c, 0x3f, 0x1d, 0xf2, 0x0e, 0x5c, 0xfc, 0xba, 0xd5, 0x5b, 0xa7, 0x61, 0x45, - 0xee, 0x9d, 0x5c, 0x9e, 0xb2, 0xbb, 0x27, 0xf7, 0xff, 0x60, 0xa4, 0x20, 0x9f, 0x80, 0x7c, 0xce, - 0x37, 0xa2, 0x54, 0xec, 0xf8, 0x76, 0x7f, 0xb7, 0x7a, 0xff, 0x34, 0xc3, 0xa4, 0xb8, 0x9c, 0x7c, - 0x7b, 0x1d, 0x73, 0xb5, 0x2e, 0x97, 0xe3, 0x28, 0xdf, 0x4c, 0x52, 0x1e, 0xaf, 0x55, 0xc6, 0xb3, - 0x38, 0x63, 0xaa, 0xca, 0x8b, 0x64, 0x92, 0x66, 0xab, 0x49, 0xba, 0x7f, 0xe2, 0x85, 0x88, 0x96, - 0x5d, 0xfd, 0xc8, 0xdf, 0xfe, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x80, 0x01, 0xce, 0xe1, 0xfc, 0x03, - 0x00, 0x00, + // 756 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0x5d, 0x8f, 0xdb, 0x44, + 0x14, 0xd5, 0x26, 0x6c, 0x92, 0xbd, 0x4e, 0x76, 0xb3, 0xc3, 0xaa, 0xb8, 0x0b, 0x88, 0x60, 0xa9, + 0x28, 0x95, 0x20, 0x11, 0x01, 0x21, 0xc1, 0x13, 0x2a, 0xd5, 0xaa, 0x55, 0x8a, 0x5a, 0x39, 0x2b, + 0x1e, 0xfa, 0x62, 0x39, 0xce, 0xad, 0x33, 0xb2, 0x63, 0xcf, 0xce, 0x8c, 0xeb, 0xf8, 0x77, 0xf0, + 0xd7, 0xf8, 0x41, 0x68, 0x3e, 0xe2, 0xd8, 0x29, 0x54, 0xea, 0xd3, 0xfa, 0x9e, 0xb9, 0x73, 0xee, + 0xd9, 0x73, 0xae, 0x63, 0xb8, 0x11, 0x34, 0xce, 0x38, 0x8b, 0xe6, 0xea, 0x2f, 0xf2, 0x19, 0xe3, + 0xb9, 0xcc, 0x49, 0xdf, 0xa2, 0xde, 0x0b, 0x80, 0x25, 0x56, 0xaf, 0xf2, 0x28, 0x94, 0x39, 0x27, + 0x5f, 0x03, 0x24, 0x58, 0x05, 0xef, 0xc2, 0x1d, 0x4d, 0x2b, 0xf7, 0x6c, 0x72, 0x36, 0x3d, 0xf7, + 0x2f, 0x12, 0xac, 0xee, 0x34, 0x40, 0xbe, 0x04, 0x55, 0x04, 0x34, 0xdb, 0xe0, 0xde, 0xed, 0xe8, + 0xd3, 0x41, 0x82, 0xd5, 0x4b, 0x55, 0x7b, 0x21, 0x8c, 0x96, 0x58, 0x3d, 0x47, 0x11, 0x71, 0xca, + 0x14, 0x99, 0x07, 0x23, 0x1e, 0x96, 0x81, 0xba, 0xb1, 0xae, 0x24, 0x0a, 0xcd, 0x37, 0xf4, 0x1d, + 0x1e, 0x96, 0x4b, 0xac, 0x9e, 0x29, 0x88, 0x7c, 0x0f, 0x7d, 0x75, 0x9e, 0xe6, 0x91, 0xe6, 0x73, + 0x16, 0x9f, 0xcf, 0xac, 0xb2, 0xd9, 0x51, 0x96, 0xdf, 0x4b, 0xf4, 0xb3, 0xf7, 0x1b, 0x9c, 0xdf, + 0xef, 0x5f, 0x17, 0x92, 0xdc, 0xc0, 0xf9, 0xfb, 0x30, 0x2d, 0x50, 0x53, 0x76, 0x7d, 0x53, 0x28, + 0x79, 0x2c, 0x09, 0xcc, 0x7c, 0x4d, 0x37, 0xf4, 0x07, 0x2c, 0x59, 0xe9, 0xda, 0xfb, 0xbb, 0x03, + 0x97, 0x2b, 0x1a, 0x67, 0x0d, 0x81, 0x3f, 0x82, 0x52, 0x1f, 0x6c, 0x50, 0x44, 0x9a, 0xc8, 0x59, + 0x3c, 0x6a, 0x4e, 0x3f, 0x76, 0xfa, 0x4a, 0xa4, 0x2a, 0xc9, 0xb7, 0x30, 0x14, 0x34, 0x8b, 0x53, + 0x0c, 0x64, 0x89, 0x61, 0x62, 0xa7, 0x38, 0x06, 0xbb, 0x57, 0x90, 0x6a, 0xd9, 0xe4, 0xc5, 0xba, + 0x6e, 0xe9, 0x9a, 0x16, 0x83, 0x99, 0x96, 0x27, 0x70, 0x59, 0x52, 0x99, 0xa1, 0x10, 0x07, 0xb5, + 0x9f, 0xe9, 0xa6, 0x91, 0x45, 0x8d, 0x64, 0xf2, 0x1d, 0xf4, 0xf2, 0x42, 0xb2, 0x42, 0xba, 0xe7, + 0x5a, 0xdd, 0x65, 0xad, 0x4e, 0xbb, 0xe0, 0xdb, 0x53, 0xe2, 0x82, 0x8a, 0x73, 0x1b, 0x8a, 0xad, + 0xdb, 0x9f, 0x9c, 0x4d, 0x47, 0xfe, 0xa1, 0x24, 0xdf, 0x80, 0x43, 0x33, 0x56, 0x48, 0x1b, 0xd9, + 0x40, 0x47, 0x06, 0x1a, 0x32, 0xa1, 0x45, 0xd0, 0x57, 0xa6, 0xf8, 0xf8, 0x40, 0x26, 0x30, 0x54, + 0x71, 0xc9, 0x7d, 0x2b, 0x2d, 0xe0, 0x61, 0x79, 0xbf, 0x37, 0x61, 0xfd, 0x02, 0xa0, 0x04, 0x68, + 0xc3, 0x84, 0xdb, 0x99, 0x74, 0xa7, 0xce, 0xe2, 0x8b, 0x5a, 0x53, 0xdb, 0x5c, 0xff, 0x42, 0xd8, + 0x5a, 0x78, 0x4f, 0x60, 0x60, 0x86, 0x08, 0x46, 0x1e, 0xc3, 0x40, 0x4d, 0x11, 0x34, 0x56, 0x13, + 0xba, 0xd3, 0xa1, 0xdf, 0xe7, 0x61, 0xb9, 0xa2, 0xb1, 0xf0, 0xee, 0xc0, 0x79, 0xa9, 0x94, 0xd9, + 0xff, 0xde, 0x85, 0xbe, 0xb5, 0xe3, 0xd0, 0x68, 0x4b, 0xb5, 0xa5, 0x82, 0xc6, 0xed, 0xa0, 0xd5, + 0x38, 0x9b, 0xf4, 0x2b, 0xb8, 0x6a, 0xf0, 0xe8, 0xa9, 0xbf, 0xc2, 0xc8, 0xf8, 0x60, 0xee, 0x18, + 0x46, 0x67, 0x71, 0x53, 0x8b, 0x6f, 0x5e, 0x18, 0xd2, 0x63, 0x21, 0xbc, 0x37, 0x66, 0x6d, 0xfe, + 0x44, 0x21, 0xc2, 0x18, 0x95, 0x51, 0x63, 0xe8, 0xee, 0x44, 0x6c, 0xfd, 0x51, 0x8f, 0x9f, 0xb8, + 0xc5, 0x73, 0xb8, 0x6a, 0x31, 0x0a, 0x46, 0xbe, 0x02, 0x6d, 0x57, 0x28, 0x0b, 0x8e, 0x96, 0xf8, + 0x08, 0x78, 0x6f, 0x61, 0xfc, 0x17, 0x72, 0xfa, 0xae, 0xfa, 0xa8, 0x88, 0x16, 0x47, 0xe7, 0x84, + 0x83, 0x3c, 0x82, 0x1e, 0x2b, 0xd6, 0x09, 0x56, 0x76, 0x1f, 0x6d, 0xe5, 0x3d, 0x85, 0xeb, 0x13, + 0x6e, 0xc1, 0xec, 0xeb, 0x45, 0x37, 0x9a, 0x7e, 0xe0, 0x9b, 0xc2, 0x4b, 0x60, 0xbc, 0xda, 0x86, + 0x1c, 0x37, 0x4b, 0xac, 0x7c, 0x7c, 0x28, 0x50, 0x48, 0xf2, 0x14, 0xc6, 0xc8, 0xb6, 0xb8, 0x43, + 0x1e, 0xa6, 0x81, 0x1d, 0x60, 0x34, 0x5d, 0xd5, 0xf8, 0x1b, 0x0d, 0x7f, 0xa2, 0x49, 0x0b, 0xb8, + 0x6e, 0x0c, 0x13, 0x2c, 0xcf, 0x04, 0xea, 0xe0, 0x35, 0x18, 0x1c, 0xe7, 0x5c, 0x88, 0x43, 0xdb, + 0xe2, 0x9f, 0x0e, 0xf4, 0x56, 0xfa, 0x57, 0x8e, 0xfc, 0x0c, 0x23, 0xf5, 0xf4, 0x5a, 0xbf, 0x20, + 0x7e, 0x58, 0x92, 0x71, 0x6b, 0x4f, 0x7d, 0x7c, 0xb8, 0xbd, 0x3e, 0x41, 0x04, 0x23, 0xbf, 0x03, + 0xf9, 0x23, 0xdf, 0xb1, 0x42, 0x62, 0x73, 0x11, 0x3f, 0xbc, 0xea, 0xfe, 0xe7, 0xde, 0x18, 0x06, + 0xa7, 0x91, 0x2d, 0x69, 0xbf, 0x1d, 0xc7, 0xf8, 0x1a, 0x0c, 0xa7, 0xab, 0x70, 0x07, 0xa3, 0x56, + 0x20, 0xe4, 0x71, 0xdd, 0x7a, 0xba, 0x04, 0xb7, 0xb7, 0xff, 0x77, 0x24, 0x18, 0x79, 0x01, 0x57, + 0xcf, 0x91, 0xd3, 0xf7, 0x58, 0xdb, 0xd8, 0x60, 0x3a, 0xcd, 0xb1, 0xc1, 0xf4, 0x81, 0xeb, 0xcf, + 0xe6, 0x6f, 0x7f, 0x88, 0xa9, 0xdc, 0x16, 0xeb, 0x59, 0x94, 0xef, 0xe6, 0x29, 0x8d, 0xb7, 0x32, + 0xa3, 0x59, 0x9c, 0xa1, 0x2c, 0x73, 0x9e, 0xcc, 0xd3, 0x6c, 0x33, 0x4f, 0xeb, 0x2f, 0x0c, 0x67, + 0xd1, 0xba, 0xa7, 0xbf, 0x31, 0x3f, 0xfd, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x73, 0xb0, 0xe9, 0x51, + 0x7b, 0x06, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -559,6 +859,29 @@ type SignerClient interface { //in the TxOut field, the value in that same field, and finally the input //index. ComputeInputScript(ctx context.Context, in *SignReq, opts ...grpc.CallOption) (*InputScriptResp, error) + //* + //SignMessage signs a message with the key specified in the key locator. The + //returned signature is fixed-size LN wire format encoded. + // + //The main difference to SignMessage in the main RPC is that a specific key is + //used to sign the message instead of the node identity private key. + SignMessage(ctx context.Context, in *SignMessageReq, opts ...grpc.CallOption) (*SignMessageResp, error) + //* + //VerifyMessage verifies a signature over a message using the public key + //provided. The signature must be fixed-size LN wire format encoded. + // + //The main difference to VerifyMessage in the main RPC is that the public key + //used to sign the message does not have to be a node known to the network. + VerifyMessage(ctx context.Context, in *VerifyMessageReq, opts ...grpc.CallOption) (*VerifyMessageResp, error) + // + //DeriveSharedKey returns a shared secret key by performing Diffie-Hellman key + //derivation between the ephemeral public key in the request and the node's + //key specified in the key_loc parameter (or the node's identity private key + //if no key locator is specified): + //P_shared = privKeyNode * ephemeralPubkey + //The resulting shared public key is serialized in the compressed format and + //hashed with sha256, resulting in the final key length of 256bit. + DeriveSharedKey(ctx context.Context, in *SharedKeyRequest, opts ...grpc.CallOption) (*SharedKeyResponse, error) } type signerClient struct { @@ -587,6 +910,33 @@ func (c *signerClient) ComputeInputScript(ctx context.Context, in *SignReq, opts return out, nil } +func (c *signerClient) SignMessage(ctx context.Context, in *SignMessageReq, opts ...grpc.CallOption) (*SignMessageResp, error) { + out := new(SignMessageResp) + err := c.cc.Invoke(ctx, "/signrpc.Signer/SignMessage", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *signerClient) VerifyMessage(ctx context.Context, in *VerifyMessageReq, opts ...grpc.CallOption) (*VerifyMessageResp, error) { + out := new(VerifyMessageResp) + err := c.cc.Invoke(ctx, "/signrpc.Signer/VerifyMessage", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *signerClient) DeriveSharedKey(ctx context.Context, in *SharedKeyRequest, opts ...grpc.CallOption) (*SharedKeyResponse, error) { + out := new(SharedKeyResponse) + err := c.cc.Invoke(ctx, "/signrpc.Signer/DeriveSharedKey", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // SignerServer is the server API for Signer service. type SignerServer interface { //* @@ -611,6 +961,29 @@ type SignerServer interface { //in the TxOut field, the value in that same field, and finally the input //index. ComputeInputScript(context.Context, *SignReq) (*InputScriptResp, error) + //* + //SignMessage signs a message with the key specified in the key locator. The + //returned signature is fixed-size LN wire format encoded. + // + //The main difference to SignMessage in the main RPC is that a specific key is + //used to sign the message instead of the node identity private key. + SignMessage(context.Context, *SignMessageReq) (*SignMessageResp, error) + //* + //VerifyMessage verifies a signature over a message using the public key + //provided. The signature must be fixed-size LN wire format encoded. + // + //The main difference to VerifyMessage in the main RPC is that the public key + //used to sign the message does not have to be a node known to the network. + VerifyMessage(context.Context, *VerifyMessageReq) (*VerifyMessageResp, error) + // + //DeriveSharedKey returns a shared secret key by performing Diffie-Hellman key + //derivation between the ephemeral public key in the request and the node's + //key specified in the key_loc parameter (or the node's identity private key + //if no key locator is specified): + //P_shared = privKeyNode * ephemeralPubkey + //The resulting shared public key is serialized in the compressed format and + //hashed with sha256, resulting in the final key length of 256bit. + DeriveSharedKey(context.Context, *SharedKeyRequest) (*SharedKeyResponse, error) } func RegisterSignerServer(s *grpc.Server, srv SignerServer) { @@ -653,6 +1026,60 @@ func _Signer_ComputeInputScript_Handler(srv interface{}, ctx context.Context, de return interceptor(ctx, in, info, handler) } +func _Signer_SignMessage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SignMessageReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SignerServer).SignMessage(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/signrpc.Signer/SignMessage", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SignerServer).SignMessage(ctx, req.(*SignMessageReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _Signer_VerifyMessage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VerifyMessageReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SignerServer).VerifyMessage(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/signrpc.Signer/VerifyMessage", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SignerServer).VerifyMessage(ctx, req.(*VerifyMessageReq)) + } + return interceptor(ctx, in, info, handler) +} + +func _Signer_DeriveSharedKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SharedKeyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SignerServer).DeriveSharedKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/signrpc.Signer/DeriveSharedKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SignerServer).DeriveSharedKey(ctx, req.(*SharedKeyRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _Signer_serviceDesc = grpc.ServiceDesc{ ServiceName: "signrpc.Signer", HandlerType: (*SignerServer)(nil), @@ -665,6 +1092,18 @@ var _Signer_serviceDesc = grpc.ServiceDesc{ MethodName: "ComputeInputScript", Handler: _Signer_ComputeInputScript_Handler, }, + { + MethodName: "SignMessage", + Handler: _Signer_SignMessage_Handler, + }, + { + MethodName: "VerifyMessage", + Handler: _Signer_VerifyMessage_Handler, + }, + { + MethodName: "DeriveSharedKey", + Handler: _Signer_DeriveSharedKey_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "signrpc/signer.proto", diff --git a/lnrpc/signrpc/signer.proto b/lnrpc/signrpc/signer.proto index 6e2b000294..346018a958 100644 --- a/lnrpc/signrpc/signer.proto +++ b/lnrpc/signrpc/signer.proto @@ -13,17 +13,17 @@ message KeyLocator { } message KeyDescriptor { - /** - The raw bytes of the key being identified. Either this or the KeyLocator - must be specified. - */ - bytes raw_key_bytes = 1; - - /** - The key locator that identifies which key to use for signing. Either this - or the raw bytes of the target key must be specified. - */ - KeyLocator key_loc = 2; + /** + The raw bytes of the key being identified. Either this or the KeyLocator + must be specified. + */ + bytes raw_key_bytes = 1; + + /** + The key locator that identifies which key to use for signing. Either this + or the raw bytes of the target key must be specified. + */ + KeyLocator key_loc = 2; } message TxOut { @@ -58,7 +58,7 @@ message SignDescriptor { commitment secret from a previously revoked commitment transaction. This value is in combination with two hash values, and the original private key to derive the private key to be used when signing. - + * k = (privKey*sha256(pubKey || tweakPub) + tweakPriv*sha256(tweakPub || pubKey)) mod N */ @@ -71,7 +71,8 @@ message SignDescriptor { bytes witness_script = 4; /** - A description of the output being spent. The value and script MUST be provided. + A description of the output being spent. The value and script MUST be + provided. */ TxOut output = 5; @@ -119,18 +120,66 @@ message InputScriptResp { repeated InputScript input_scripts = 1; } +message SignMessageReq { + /// The message to be signed. + bytes msg = 1; + + /// The key locator that identifies which key to use for signing. + KeyLocator key_loc = 2; +} +message SignMessageResp { + /** + The signature for the given message in the fixed-size LN wire format. + */ + bytes signature = 1; +} + +message VerifyMessageReq { + /// The message over which the signature is to be verified. + bytes msg = 1; + + /** + The fixed-size LN wire encoded signature to be verified over the given + message. + */ + bytes signature = 2; + + /// The public key the signature has to be valid for. + bytes pubkey = 3; +} +message VerifyMessageResp { + /// Whether the signature was valid over the given message. + bool valid = 1; +} + +message SharedKeyRequest { + // The ephemeral public key to use for the DH key derivation. + bytes ephemeral_pubkey = 1; + + /** + The optional key locator of the local key that should be used. If this + parameter is not set then the node's identity private key will be used. + */ + KeyLocator key_loc = 2; +} + +message SharedKeyResponse { + // The shared public key, hashed with sha256. + bytes shared_key = 1; +} + service Signer { /** SignOutputRaw is a method that can be used to generated a signature for a set of inputs/outputs to a transaction. Each request specifies details concerning how the outputs should be signed, which keys they should be signed with, and also any optional tweaks. The return value is a fixed - 64-byte signature (the same format as we use on the wire in Lightning). - + 64-byte signature (the same format as we use on the wire in Lightning). + If we are unable to sign using the specified keys, then an error will be returned. */ - rpc SignOutputRaw(SignReq) returns (SignResp); + rpc SignOutputRaw (SignReq) returns (SignResp); /** ComputeInputScript generates a complete InputIndex for the passed @@ -144,5 +193,34 @@ service Signer { in the TxOut field, the value in that same field, and finally the input index. */ - rpc ComputeInputScript(SignReq) returns (InputScriptResp); + rpc ComputeInputScript (SignReq) returns (InputScriptResp); + + /** + SignMessage signs a message with the key specified in the key locator. The + returned signature is fixed-size LN wire format encoded. + + The main difference to SignMessage in the main RPC is that a specific key is + used to sign the message instead of the node identity private key. + */ + rpc SignMessage (SignMessageReq) returns (SignMessageResp); + + /** + VerifyMessage verifies a signature over a message using the public key + provided. The signature must be fixed-size LN wire format encoded. + + The main difference to VerifyMessage in the main RPC is that the public key + used to sign the message does not have to be a node known to the network. + */ + rpc VerifyMessage (VerifyMessageReq) returns (VerifyMessageResp); + + /* + DeriveSharedKey returns a shared secret key by performing Diffie-Hellman key + derivation between the ephemeral public key in the request and the node's + key specified in the key_loc parameter (or the node's identity private key + if no key locator is specified): + P_shared = privKeyNode * ephemeralPubkey + The resulting shared public key is serialized in the compressed format and + hashed with sha256, resulting in the final key length of 256bit. + */ + rpc DeriveSharedKey (SharedKeyRequest) returns (SharedKeyResponse); } diff --git a/lnrpc/signrpc/signer_server.go b/lnrpc/signrpc/signer_server.go index 20abf43b59..127d113e70 100644 --- a/lnrpc/signrpc/signer_server.go +++ b/lnrpc/signrpc/signer_server.go @@ -5,18 +5,20 @@ package signrpc import ( "bytes" "context" + "crypto/sha256" "fmt" "io/ioutil" "os" "path/filepath" "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/keychain" "github.com/lightningnetwork/lnd/lnrpc" - + "github.com/lightningnetwork/lnd/lnwire" "google.golang.org/grpc" "gopkg.in/macaroon-bakery.v2/bakery" ) @@ -37,6 +39,10 @@ var ( Entity: "signer", Action: "generate", }, + { + Entity: "signer", + Action: "read", + }, } // macPermissions maps RPC calls to the permissions they require. @@ -49,6 +55,18 @@ var ( Entity: "signer", Action: "generate", }}, + "/signrpc.Signer/SignMessage": {{ + Entity: "signer", + Action: "generate", + }}, + "/signrpc.Signer/VerifyMessage": {{ + Entity: "signer", + Action: "read", + }}, + "/signrpc.Signer/DeriveSharedKey": {{ + Entity: "signer", + Action: "generate", + }}, } // DefaultSignerMacFilename is the default name of the signer macaroon @@ -303,7 +321,7 @@ func (s *Server) SignOutputRaw(ctx context.Context, in *SignReq) (*SignResp, err return nil, err } - resp.RawSigs[i] = sig + resp.RawSigs[i] = sig.Serialize() } return resp, nil @@ -355,8 +373,9 @@ func (s *Server) ComputeInputScript(ctx context.Context, Value: signDesc.Output.Value, PkScript: signDesc.Output.PkScript, }, - HashType: txscript.SigHashType(signDesc.Sighash), - SigHashes: sigHashCache, + HashType: txscript.SigHashType(signDesc.Sighash), + SigHashes: sigHashCache, + InputIndex: int(signDesc.InputIndex), }) } @@ -383,3 +402,144 @@ func (s *Server) ComputeInputScript(ctx context.Context, return resp, nil } + +// SignMessage signs a message with the key specified in the key locator. The +// returned signature is fixed-size LN wire format encoded. +func (s *Server) SignMessage(ctx context.Context, + in *SignMessageReq) (*SignMessageResp, error) { + + if in.Msg == nil { + return nil, fmt.Errorf("a message to sign MUST be passed in") + } + if in.KeyLoc == nil { + return nil, fmt.Errorf("a key locator MUST be passed in") + } + + // Derive the private key we'll be using for signing. + keyLocator := keychain.KeyLocator{ + Family: keychain.KeyFamily(in.KeyLoc.KeyFamily), + Index: uint32(in.KeyLoc.KeyIndex), + } + privKey, err := s.cfg.KeyRing.DerivePrivKey(keychain.KeyDescriptor{ + KeyLocator: keyLocator, + }) + if err != nil { + return nil, fmt.Errorf("can't derive private key: %v", err) + } + + // The signature is over the sha256 hash of the message. + digest := chainhash.HashB(in.Msg) + + // Create the raw ECDSA signature first and convert it to the final wire + // format after. + sig, err := privKey.Sign(digest) + if err != nil { + return nil, fmt.Errorf("can't sign the hash: %v", err) + } + wireSig, err := lnwire.NewSigFromSignature(sig) + if err != nil { + return nil, fmt.Errorf("can't convert to wire format: %v", err) + } + return &SignMessageResp{ + Signature: wireSig.ToSignatureBytes(), + }, nil +} + +// VerifyMessage verifies a signature over a message using the public key +// provided. The signature must be fixed-size LN wire format encoded. +func (s *Server) VerifyMessage(ctx context.Context, + in *VerifyMessageReq) (*VerifyMessageResp, error) { + + if in.Msg == nil { + return nil, fmt.Errorf("a message to verify MUST be passed in") + } + if in.Signature == nil { + return nil, fmt.Errorf("a signature to verify MUST be passed " + + "in") + } + if in.Pubkey == nil { + return nil, fmt.Errorf("a pubkey to verify MUST be passed in") + } + pubkey, err := btcec.ParsePubKey(in.Pubkey, btcec.S256()) + if err != nil { + return nil, fmt.Errorf("unable to parse pubkey: %v", err) + } + + // The signature must be fixed-size LN wire format encoded. + wireSig, err := lnwire.NewSigFromRawSignature(in.Signature) + if err != nil { + return nil, fmt.Errorf("failed to decode signature: %v", err) + } + sig, err := wireSig.ToSignature() + if err != nil { + return nil, fmt.Errorf("failed to convert from wire format: %v", + err) + } + + // The signature is over the sha256 hash of the message. + digest := chainhash.HashB(in.Msg) + valid := sig.Verify(digest, pubkey) + return &VerifyMessageResp{ + Valid: valid, + }, nil +} + +// DeriveSharedKey returns a shared secret key by performing Diffie-Hellman key +// derivation between the ephemeral public key in the request and the node's +// key specified in the key_loc parameter (or the node's identity private key +// if no key locator is specified): +// P_shared = privKeyNode * ephemeralPubkey +// The resulting shared public key is serialized in the compressed format and +// hashed with sha256, resulting in the final key length of 256bit. +func (s *Server) DeriveSharedKey(_ context.Context, in *SharedKeyRequest) ( + *SharedKeyResponse, error) { + + if len(in.EphemeralPubkey) != 33 { + return nil, fmt.Errorf("ephemeral pubkey must be " + + "serialized in compressed format") + } + ephemeralPubkey, err := btcec.ParsePubKey( + in.EphemeralPubkey, btcec.S256(), + ) + if err != nil { + return nil, fmt.Errorf("unable to parse pubkey: %v", err) + } + + // By default, use the node identity private key. + locator := keychain.KeyLocator{ + Family: keychain.KeyFamilyNodeKey, + Index: 0, + } + if in.KeyLoc != nil { + locator.Family = keychain.KeyFamily(in.KeyLoc.KeyFamily) + locator.Index = uint32(in.KeyLoc.KeyIndex) + } + + // Derive our node's private key from the key ring. + idPrivKey, err := s.cfg.KeyRing.DerivePrivKey(keychain.KeyDescriptor{ + KeyLocator: locator, + }) + if err != nil { + err := fmt.Errorf("unable to derive node private key: %v", err) + log.Error(err) + return nil, err + } + idPrivKey.Curve = btcec.S256() + + // Derive the shared key using ECDH and hashing the serialized + // compressed shared point. + sharedKeyHash := ecdh(ephemeralPubkey, idPrivKey) + return &SharedKeyResponse{SharedKey: sharedKeyHash}, nil +} + +// ecdh performs an ECDH operation between pub and priv. The returned value is +// the sha256 of the compressed shared point. +func ecdh(pub *btcec.PublicKey, priv *btcec.PrivateKey) []byte { + s := &btcec.PublicKey{} + x, y := btcec.S256().ScalarMult(pub.X, pub.Y, priv.D.Bytes()) + s.X = x + s.Y = y + + h := sha256.Sum256(s.SerializeCompressed()) + return h[:] +} diff --git a/lnrpc/verrpc/driver.go b/lnrpc/verrpc/driver.go new file mode 100644 index 0000000000..db250f7d0d --- /dev/null +++ b/lnrpc/verrpc/driver.go @@ -0,0 +1,25 @@ +package verrpc + +import ( + "fmt" + + "github.com/lightningnetwork/lnd/lnrpc" +) + +func init() { + subServer := &lnrpc.SubServerDriver{ + SubServerName: subServerName, + New: func(c lnrpc.SubServerConfigDispatcher) (lnrpc.SubServer, + lnrpc.MacaroonPerms, error) { + + return &Server{}, macPermissions, nil + }, + } + + // We'll register ourselves as a sub-RPC server within the global lnrpc + // package namespace. + if err := lnrpc.RegisterSubServer(subServer); err != nil { + panic(fmt.Sprintf("failed to register sub server driver '%s': %v", + subServerName, err)) + } +} diff --git a/lnrpc/verrpc/log.go b/lnrpc/verrpc/log.go new file mode 100644 index 0000000000..fb57daa212 --- /dev/null +++ b/lnrpc/verrpc/log.go @@ -0,0 +1,32 @@ +package verrpc + +import ( + "github.com/btcsuite/btclog" + "github.com/lightningnetwork/lnd/build" +) + +// log is a logger that is initialized with no output filters. This +// means the package will not perform any logging by default until the caller +// requests it. +var log btclog.Logger + +// Subsystem defines the logging code for this subsystem. +const Subsystem = "VRPC" + +// The default amount of logging is none. +func init() { + UseLogger(build.NewSubLogger(Subsystem, nil)) +} + +// DisableLog disables all library log output. Logging output is disabled +// by default until UseLogger is called. +func DisableLog() { + UseLogger(btclog.Disabled) +} + +// UseLogger uses a specified Logger to output package logging info. +// This should be used in preference to SetLogWriter if the caller is also +// using btclog. +func UseLogger(logger btclog.Logger) { + log = logger +} diff --git a/lnrpc/verrpc/server.go b/lnrpc/verrpc/server.go new file mode 100644 index 0000000000..d11c61d1f7 --- /dev/null +++ b/lnrpc/verrpc/server.go @@ -0,0 +1,75 @@ +package verrpc + +import ( + "context" + + "github.com/lightningnetwork/lnd/build" + "google.golang.org/grpc" + "gopkg.in/macaroon-bakery.v2/bakery" +) + +const subServerName = "VersionRPC" + +var macPermissions = map[string][]bakery.Op{ + "/verrpc.Versioner/GetVersion": {{ + Entity: "info", + Action: "read", + }}, +} + +// Server is an rpc server that supports querying for information about the +// running binary. +type Server struct{} + +// Start launches any helper goroutines required for the rpcServer to function. +// +// NOTE: This is part of the lnrpc.SubServer interface. +func (s *Server) Start() error { + return nil +} + +// Stop signals any active goroutines for a graceful closure. +// +// NOTE: This is part of the lnrpc.SubServer interface. +func (s *Server) Stop() error { + return nil +} + +// Name returns a unique string representation of the sub-server. This can be +// used to identify the sub-server and also de-duplicate them. +// +// NOTE: This is part of the lnrpc.SubServer interface. +func (s *Server) Name() string { + return subServerName +} + +// RegisterWithRootServer will be called by the root gRPC server to direct a +// sub RPC server to register itself with the main gRPC root server. Until this +// is called, each sub-server won't be able to have requests routed towards it. +// +// NOTE: This is part of the lnrpc.SubServer interface. +func (s *Server) RegisterWithRootServer(grpcServer *grpc.Server) error { + RegisterVersionerServer(grpcServer, s) + + log.Debugf("Versioner RPC server successfully registered with root " + + "gRPC server") + + return nil +} + +// GetVersion returns information about the compiled binary. +func (s *Server) GetVersion(_ context.Context, + _ *VersionRequest) (*Version, error) { + + return &Version{ + Commit: build.Commit, + CommitHash: build.CommitHash, + Version: build.Version(), + AppMajor: uint32(build.AppMajor), + AppMinor: uint32(build.AppMinor), + AppPatch: uint32(build.AppPatch), + AppPreRelease: build.AppPreRelease, + BuildTags: build.Tags(), + GoVersion: build.GoVersion, + }, nil +} diff --git a/lnrpc/verrpc/verrpc.pb.go b/lnrpc/verrpc/verrpc.pb.go new file mode 100644 index 0000000000..00cb36427c --- /dev/null +++ b/lnrpc/verrpc/verrpc.pb.go @@ -0,0 +1,268 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: verrpc/verrpc.proto + +package verrpc + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type VersionRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VersionRequest) Reset() { *m = VersionRequest{} } +func (m *VersionRequest) String() string { return proto.CompactTextString(m) } +func (*VersionRequest) ProtoMessage() {} +func (*VersionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_494312204cefa0e6, []int{0} +} + +func (m *VersionRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VersionRequest.Unmarshal(m, b) +} +func (m *VersionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VersionRequest.Marshal(b, m, deterministic) +} +func (m *VersionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_VersionRequest.Merge(m, src) +} +func (m *VersionRequest) XXX_Size() int { + return xxx_messageInfo_VersionRequest.Size(m) +} +func (m *VersionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_VersionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_VersionRequest proto.InternalMessageInfo + +type Version struct { + /// A verbose description of the daemon's commit. + Commit string `protobuf:"bytes,1,opt,name=commit,proto3" json:"commit,omitempty"` + /// The SHA1 commit hash that the daemon is compiled with. + CommitHash string `protobuf:"bytes,2,opt,name=commit_hash,json=commitHash,proto3" json:"commit_hash,omitempty"` + /// The semantic version. + Version string `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"` + /// The major application version. + AppMajor uint32 `protobuf:"varint,4,opt,name=app_major,json=appMajor,proto3" json:"app_major,omitempty"` + /// The minor application version. + AppMinor uint32 `protobuf:"varint,5,opt,name=app_minor,json=appMinor,proto3" json:"app_minor,omitempty"` + /// The application patch number. + AppPatch uint32 `protobuf:"varint,6,opt,name=app_patch,json=appPatch,proto3" json:"app_patch,omitempty"` + /// The application pre-release modifier, possibly empty. + AppPreRelease string `protobuf:"bytes,7,opt,name=app_pre_release,json=appPreRelease,proto3" json:"app_pre_release,omitempty"` + /// The list of build tags that were supplied during compilation. + BuildTags []string `protobuf:"bytes,8,rep,name=build_tags,json=buildTags,proto3" json:"build_tags,omitempty"` + /// The version of go that compiled the executable. + GoVersion string `protobuf:"bytes,9,opt,name=go_version,json=goVersion,proto3" json:"go_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Version) Reset() { *m = Version{} } +func (m *Version) String() string { return proto.CompactTextString(m) } +func (*Version) ProtoMessage() {} +func (*Version) Descriptor() ([]byte, []int) { + return fileDescriptor_494312204cefa0e6, []int{1} +} + +func (m *Version) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Version.Unmarshal(m, b) +} +func (m *Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Version.Marshal(b, m, deterministic) +} +func (m *Version) XXX_Merge(src proto.Message) { + xxx_messageInfo_Version.Merge(m, src) +} +func (m *Version) XXX_Size() int { + return xxx_messageInfo_Version.Size(m) +} +func (m *Version) XXX_DiscardUnknown() { + xxx_messageInfo_Version.DiscardUnknown(m) +} + +var xxx_messageInfo_Version proto.InternalMessageInfo + +func (m *Version) GetCommit() string { + if m != nil { + return m.Commit + } + return "" +} + +func (m *Version) GetCommitHash() string { + if m != nil { + return m.CommitHash + } + return "" +} + +func (m *Version) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *Version) GetAppMajor() uint32 { + if m != nil { + return m.AppMajor + } + return 0 +} + +func (m *Version) GetAppMinor() uint32 { + if m != nil { + return m.AppMinor + } + return 0 +} + +func (m *Version) GetAppPatch() uint32 { + if m != nil { + return m.AppPatch + } + return 0 +} + +func (m *Version) GetAppPreRelease() string { + if m != nil { + return m.AppPreRelease + } + return "" +} + +func (m *Version) GetBuildTags() []string { + if m != nil { + return m.BuildTags + } + return nil +} + +func (m *Version) GetGoVersion() string { + if m != nil { + return m.GoVersion + } + return "" +} + +func init() { + proto.RegisterType((*VersionRequest)(nil), "verrpc.VersionRequest") + proto.RegisterType((*Version)(nil), "verrpc.Version") +} + +func init() { proto.RegisterFile("verrpc/verrpc.proto", fileDescriptor_494312204cefa0e6) } + +var fileDescriptor_494312204cefa0e6 = []byte{ + // 300 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x91, 0x51, 0x4b, 0xf3, 0x30, + 0x14, 0x86, 0xd9, 0xf6, 0x7d, 0xdd, 0x72, 0x64, 0x4e, 0x22, 0x48, 0x50, 0xc4, 0xb1, 0x0b, 0xd9, + 0x85, 0xb4, 0xa0, 0xf8, 0x07, 0x76, 0xa3, 0x37, 0x82, 0x14, 0xf1, 0xc2, 0x9b, 0x92, 0x75, 0x87, + 0x34, 0xda, 0x26, 0x31, 0xc9, 0xe6, 0x6f, 0xf1, 0xdf, 0x4a, 0x93, 0xae, 0x43, 0xaf, 0x7a, 0x9e, + 0xf7, 0x29, 0xa7, 0xe5, 0x3d, 0x70, 0xba, 0x43, 0x6b, 0x4d, 0x99, 0xc5, 0x47, 0x6a, 0xac, 0xf6, + 0x9a, 0x26, 0x91, 0x16, 0x27, 0x70, 0xfc, 0x8a, 0xd6, 0x49, 0xad, 0x72, 0xfc, 0xdc, 0xa2, 0xf3, + 0x8b, 0xef, 0x21, 0x8c, 0xbb, 0x88, 0x9e, 0x41, 0x52, 0xea, 0xa6, 0x91, 0x9e, 0x0d, 0xe6, 0x83, + 0x25, 0xc9, 0x3b, 0xa2, 0x57, 0x70, 0x14, 0xa7, 0xa2, 0xe2, 0xae, 0x62, 0xc3, 0x20, 0x21, 0x46, + 0x8f, 0xdc, 0x55, 0x94, 0xc1, 0x78, 0x17, 0x77, 0xb0, 0x51, 0x90, 0x7b, 0xa4, 0x17, 0x40, 0xb8, + 0x31, 0x45, 0xc3, 0xdf, 0xb5, 0x65, 0xff, 0xe6, 0x83, 0xe5, 0x34, 0x9f, 0x70, 0x63, 0x9e, 0x5a, + 0xee, 0xa5, 0x54, 0xda, 0xb2, 0xff, 0x07, 0xd9, 0xf2, 0x5e, 0x1a, 0xee, 0xcb, 0x8a, 0x25, 0xbd, + 0x7c, 0x6e, 0x99, 0x5e, 0xc3, 0x2c, 0x48, 0x8b, 0x85, 0xc5, 0x1a, 0xb9, 0x43, 0x36, 0x0e, 0x1f, + 0x9e, 0xb6, 0xaf, 0x58, 0xcc, 0x63, 0x48, 0x2f, 0x01, 0xd6, 0x5b, 0x59, 0x6f, 0x0a, 0xcf, 0x85, + 0x63, 0x93, 0xf9, 0x68, 0x49, 0x72, 0x12, 0x92, 0x17, 0x2e, 0x5c, 0xab, 0x85, 0x2e, 0xf6, 0xbf, + 0x4e, 0xc2, 0x06, 0x22, 0x74, 0xd7, 0xc7, 0xed, 0x0a, 0x48, 0x37, 0xa2, 0xa5, 0xf7, 0x00, 0x0f, + 0xe8, 0xfb, 0xaa, 0xd2, 0xae, 0xdf, 0xdf, 0x75, 0x9e, 0xcf, 0xfe, 0xe4, 0xab, 0xf4, 0xed, 0x46, + 0x48, 0x5f, 0x6d, 0xd7, 0x69, 0xa9, 0x9b, 0xac, 0x96, 0xa2, 0xf2, 0x4a, 0x2a, 0xa1, 0xd0, 0x7f, + 0x69, 0xfb, 0x91, 0xd5, 0x6a, 0x93, 0xd5, 0xea, 0x70, 0xaf, 0x75, 0x12, 0x0e, 0x76, 0xf7, 0x13, + 0x00, 0x00, 0xff, 0xff, 0x00, 0x3d, 0xb5, 0x81, 0xc7, 0x01, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// VersionerClient is the client API for Versioner service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type VersionerClient interface { + GetVersion(ctx context.Context, in *VersionRequest, opts ...grpc.CallOption) (*Version, error) +} + +type versionerClient struct { + cc *grpc.ClientConn +} + +func NewVersionerClient(cc *grpc.ClientConn) VersionerClient { + return &versionerClient{cc} +} + +func (c *versionerClient) GetVersion(ctx context.Context, in *VersionRequest, opts ...grpc.CallOption) (*Version, error) { + out := new(Version) + err := c.cc.Invoke(ctx, "/verrpc.Versioner/GetVersion", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// VersionerServer is the server API for Versioner service. +type VersionerServer interface { + GetVersion(context.Context, *VersionRequest) (*Version, error) +} + +func RegisterVersionerServer(s *grpc.Server, srv VersionerServer) { + s.RegisterService(&_Versioner_serviceDesc, srv) +} + +func _Versioner_GetVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VersionerServer).GetVersion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/verrpc.Versioner/GetVersion", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VersionerServer).GetVersion(ctx, req.(*VersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Versioner_serviceDesc = grpc.ServiceDesc{ + ServiceName: "verrpc.Versioner", + HandlerType: (*VersionerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetVersion", + Handler: _Versioner_GetVersion_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "verrpc/verrpc.proto", +} diff --git a/lnrpc/verrpc/verrpc.proto b/lnrpc/verrpc/verrpc.proto new file mode 100644 index 0000000000..1ec3a24119 --- /dev/null +++ b/lnrpc/verrpc/verrpc.proto @@ -0,0 +1,41 @@ +syntax = "proto3"; + +package verrpc; + +option go_package = "github.com/lightningnetwork/lnd/lnrpc/verrpc"; + +service Versioner { + rpc GetVersion (VersionRequest) returns (Version); +}; + +message VersionRequest { +}; + +message Version { + /// A verbose description of the daemon's commit. + string commit = 1; + + /// The SHA1 commit hash that the daemon is compiled with. + string commit_hash = 2; + + /// The semantic version. + string version = 3; + + /// The major application version. + uint32 app_major = 4; + + /// The minor application version. + uint32 app_minor = 5; + + /// The application patch number. + uint32 app_patch = 6; + + /// The application pre-release modifier, possibly empty. + string app_pre_release = 7; + + /// The list of build tags that were supplied during compilation. + repeated string build_tags = 8; + + /// The version of go that compiled the executable. + string go_version = 9; +}; diff --git a/lnrpc/walletrpc/config_active.go b/lnrpc/walletrpc/config_active.go index 9804bc1193..aada57dcd7 100644 --- a/lnrpc/walletrpc/config_active.go +++ b/lnrpc/walletrpc/config_active.go @@ -5,6 +5,7 @@ package walletrpc import ( "github.com/lightningnetwork/lnd/keychain" "github.com/lightningnetwork/lnd/lnwallet" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" "github.com/lightningnetwork/lnd/macaroons" "github.com/lightningnetwork/lnd/sweep" ) @@ -30,7 +31,7 @@ type Config struct { // FeeEstimator is an instance of the primary fee estimator instance // the WalletKit will use to respond to fee estimation requests. - FeeEstimator lnwallet.FeeEstimator + FeeEstimator chainfee.Estimator // Wallet is the primary wallet that the WalletKit will use to proxy // any relevant requests to. diff --git a/lnrpc/walletrpc/walletkit.pb.go b/lnrpc/walletrpc/walletkit.pb.go index 9737602c58..ce1e081375 100644 --- a/lnrpc/walletrpc/walletkit.pb.go +++ b/lnrpc/walletrpc/walletkit.pb.go @@ -84,6 +84,10 @@ const ( //A witness type that allows us to sweep an output that sends to a nested P2SH //script that pays to a key solely under our control. WitnessType_NESTED_WITNESS_KEY_HASH WitnessType = 12 + // + //A witness type that allows us to spend our anchor on the commitment + //transaction. + WitnessType_COMMITMENT_ANCHOR WitnessType = 13 ) var WitnessType_name = map[int32]string{ @@ -100,6 +104,7 @@ var WitnessType_name = map[int32]string{ 10: "HTLC_SECOND_LEVEL_REVOKE", 11: "WITNESS_KEY_HASH", 12: "NESTED_WITNESS_KEY_HASH", + 13: "COMMITMENT_ANCHOR", } var WitnessType_value = map[string]int32{ @@ -116,6 +121,7 @@ var WitnessType_value = map[string]int32{ "HTLC_SECOND_LEVEL_REVOKE": 10, "WITNESS_KEY_HASH": 11, "NESTED_WITNESS_KEY_HASH": 12, + "COMMITMENT_ANCHOR": 13, } func (x WitnessType) String() string { @@ -518,20 +524,28 @@ type PendingSweep struct { // The outpoint of the output we're attempting to sweep. Outpoint *lnrpc.OutPoint `protobuf:"bytes,1,opt,name=outpoint,proto3" json:"outpoint,omitempty"` // The witness type of the output we're attempting to sweep. - WitnessType WitnessType `protobuf:"varint,2,opt,name=witness_type,proto3,enum=walletrpc.WitnessType" json:"witness_type,omitempty"` + WitnessType WitnessType `protobuf:"varint,2,opt,name=witness_type,json=witnessType,proto3,enum=walletrpc.WitnessType" json:"witness_type,omitempty"` // The value of the output we're attempting to sweep. - AmountSat uint32 `protobuf:"varint,3,opt,name=amount_sat,proto3" json:"amount_sat,omitempty"` + AmountSat uint32 `protobuf:"varint,3,opt,name=amount_sat,json=amountSat,proto3" json:"amount_sat,omitempty"` // //The fee rate we'll use to sweep the output. The fee rate is only determined //once a sweeping transaction for the output is created, so it's possible for //this to be 0 before this. - SatPerByte uint32 `protobuf:"varint,4,opt,name=sat_per_byte,proto3" json:"sat_per_byte,omitempty"` + SatPerByte uint32 `protobuf:"varint,4,opt,name=sat_per_byte,json=satPerByte,proto3" json:"sat_per_byte,omitempty"` // The number of broadcast attempts we've made to sweep the output. - BroadcastAttempts uint32 `protobuf:"varint,5,opt,name=broadcast_attempts,proto3" json:"broadcast_attempts,omitempty"` + BroadcastAttempts uint32 `protobuf:"varint,5,opt,name=broadcast_attempts,json=broadcastAttempts,proto3" json:"broadcast_attempts,omitempty"` // //The next height of the chain at which we'll attempt to broadcast the //sweep transaction of the output. - NextBroadcastHeight uint32 `protobuf:"varint,6,opt,name=next_broadcast_height,proto3" json:"next_broadcast_height,omitempty"` + NextBroadcastHeight uint32 `protobuf:"varint,6,opt,name=next_broadcast_height,json=nextBroadcastHeight,proto3" json:"next_broadcast_height,omitempty"` + // The requested confirmation target for this output. + RequestedConfTarget uint32 `protobuf:"varint,8,opt,name=requested_conf_target,json=requestedConfTarget,proto3" json:"requested_conf_target,omitempty"` + // The requested fee rate, expressed in sat/byte, for this output. + RequestedSatPerByte uint32 `protobuf:"varint,9,opt,name=requested_sat_per_byte,json=requestedSatPerByte,proto3" json:"requested_sat_per_byte,omitempty"` + //* + //Whether this input must be force-swept. This means that it is swept even + //if it has a negative yield. + Force bool `protobuf:"varint,7,opt,name=force,proto3" json:"force,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -604,6 +618,27 @@ func (m *PendingSweep) GetNextBroadcastHeight() uint32 { return 0 } +func (m *PendingSweep) GetRequestedConfTarget() uint32 { + if m != nil { + return m.RequestedConfTarget + } + return 0 +} + +func (m *PendingSweep) GetRequestedSatPerByte() uint32 { + if m != nil { + return m.RequestedSatPerByte + } + return 0 +} + +func (m *PendingSweep) GetForce() bool { + if m != nil { + return m.Force + } + return false +} + type PendingSweepsRequest struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -638,7 +673,7 @@ var xxx_messageInfo_PendingSweepsRequest proto.InternalMessageInfo type PendingSweepsResponse struct { // //The set of outputs currently being swept by lnd's central batching engine. - PendingSweeps []*PendingSweep `protobuf:"bytes,1,rep,name=pending_sweeps,proto3" json:"pending_sweeps,omitempty"` + PendingSweeps []*PendingSweep `protobuf:"bytes,1,rep,name=pending_sweeps,json=pendingSweeps,proto3" json:"pending_sweeps,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -680,11 +715,15 @@ type BumpFeeRequest struct { // The input we're attempting to bump the fee of. Outpoint *lnrpc.OutPoint `protobuf:"bytes,1,opt,name=outpoint,proto3" json:"outpoint,omitempty"` // The target number of blocks that the input should be spent within. - TargetConf uint32 `protobuf:"varint,2,opt,name=target_conf,proto3" json:"target_conf,omitempty"` + TargetConf uint32 `protobuf:"varint,2,opt,name=target_conf,json=targetConf,proto3" json:"target_conf,omitempty"` // //The fee rate, expressed in sat/byte, that should be used to spend the input //with. - SatPerByte uint32 `protobuf:"varint,3,opt,name=sat_per_byte,proto3" json:"sat_per_byte,omitempty"` + SatPerByte uint32 `protobuf:"varint,3,opt,name=sat_per_byte,json=satPerByte,proto3" json:"sat_per_byte,omitempty"` + //* + //Whether this input must be force-swept. This means that it is swept even + //if it has a negative yield. + Force bool `protobuf:"varint,4,opt,name=force,proto3" json:"force,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -736,6 +775,13 @@ func (m *BumpFeeRequest) GetSatPerByte() uint32 { return 0 } +func (m *BumpFeeRequest) GetForce() bool { + if m != nil { + return m.Force + } + return false +} + type BumpFeeResponse struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -788,68 +834,73 @@ func init() { func init() { proto.RegisterFile("walletrpc/walletkit.proto", fileDescriptor_6cc6942ac78249e5) } var fileDescriptor_6cc6942ac78249e5 = []byte{ - // 976 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x56, 0xed, 0x6e, 0xe2, 0x46, - 0x14, 0x2d, 0x21, 0x61, 0xc3, 0x05, 0x12, 0x67, 0x08, 0x89, 0x97, 0xcd, 0x6e, 0xa8, 0xfb, 0x21, - 0xd4, 0x56, 0xa0, 0x66, 0xdb, 0xaa, 0x6a, 0x7f, 0xb4, 0x59, 0x70, 0x44, 0xc4, 0x87, 0xa9, 0xed, - 0x6c, 0xba, 0x55, 0xa5, 0x91, 0x81, 0x59, 0xb0, 0x00, 0xdb, 0x3b, 0x1e, 0x0a, 0xfc, 0x6d, 0x9f, - 0xa4, 0xaf, 0xd1, 0xa7, 0xab, 0x3c, 0xb6, 0xc9, 0x18, 0x92, 0x4a, 0xfd, 0x15, 0xe7, 0x9c, 0x73, - 0xcf, 0xdc, 0xb9, 0x33, 0x73, 0x04, 0x3c, 0x5f, 0x5a, 0xb3, 0x19, 0x61, 0xd4, 0x1b, 0xd6, 0xc3, - 0xaf, 0xa9, 0xcd, 0x6a, 0x1e, 0x75, 0x99, 0x8b, 0xb2, 0x1b, 0xaa, 0x9c, 0xa5, 0xde, 0x30, 0x44, - 0xcb, 0xa7, 0xbe, 0x3d, 0x76, 0x02, 0x79, 0xf0, 0x97, 0xd0, 0x10, 0x55, 0x7e, 0x81, 0x4c, 0x9b, - 0xac, 0x75, 0xf2, 0x01, 0x55, 0x41, 0x9a, 0x92, 0x35, 0x7e, 0x6f, 0x3b, 0x63, 0x42, 0xb1, 0x47, - 0x6d, 0x87, 0xc9, 0xa9, 0x4a, 0xaa, 0x7a, 0xa0, 0x1f, 0x4d, 0xc9, 0xfa, 0x86, 0xc3, 0xfd, 0x00, - 0x45, 0x2f, 0x01, 0xb8, 0xd2, 0x9a, 0xdb, 0xb3, 0xb5, 0xbc, 0xc7, 0x35, 0xd9, 0x40, 0xc3, 0x01, - 0xa5, 0x00, 0xb9, 0xeb, 0xd1, 0x88, 0xea, 0xe4, 0xc3, 0x82, 0xf8, 0x4c, 0x51, 0x20, 0x1f, 0xfe, - 0xeb, 0x7b, 0xae, 0xe3, 0x13, 0x84, 0x60, 0xdf, 0x1a, 0x8d, 0x28, 0xf7, 0xce, 0xea, 0xfc, 0x5b, - 0xf9, 0x14, 0x72, 0x26, 0xb5, 0x1c, 0xdf, 0x1a, 0x32, 0xdb, 0x75, 0x50, 0x09, 0x32, 0x6c, 0x85, - 0x27, 0x64, 0xc5, 0x45, 0x79, 0xfd, 0x80, 0xad, 0x5a, 0x64, 0xa5, 0x7c, 0x07, 0xc7, 0xfd, 0xc5, - 0x60, 0x66, 0xfb, 0x93, 0x8d, 0xd9, 0x27, 0x50, 0xf0, 0x42, 0x08, 0x13, 0x4a, 0xdd, 0xd8, 0x35, - 0x1f, 0x81, 0x6a, 0x80, 0x29, 0xbf, 0x03, 0x32, 0x88, 0x33, 0xd2, 0x16, 0xcc, 0x5b, 0x30, 0x3f, - 0xea, 0x0b, 0x5d, 0x00, 0xf8, 0x16, 0xc3, 0x1e, 0xa1, 0x78, 0xba, 0xe4, 0x75, 0x69, 0xfd, 0xd0, - 0xb7, 0x58, 0x9f, 0xd0, 0xf6, 0x12, 0x55, 0xe1, 0x99, 0x1b, 0xea, 0xe5, 0xbd, 0x4a, 0xba, 0x9a, - 0xbb, 0x3a, 0xaa, 0x45, 0xf3, 0xab, 0x99, 0x2b, 0x6d, 0xc1, 0xf4, 0x98, 0x56, 0xbe, 0x82, 0x62, - 0xc2, 0x3d, 0xea, 0xac, 0x04, 0x19, 0x6a, 0x2d, 0x31, 0xdb, 0xec, 0x81, 0x5a, 0x4b, 0x73, 0xa5, - 0x7c, 0x0b, 0x48, 0xf5, 0x99, 0x3d, 0xb7, 0x18, 0xb9, 0x21, 0x24, 0xee, 0xe5, 0x12, 0x72, 0x43, - 0xd7, 0x79, 0x8f, 0x99, 0x45, 0xc7, 0x24, 0x1e, 0x3b, 0x04, 0x90, 0xc9, 0x11, 0xe5, 0x35, 0x14, - 0x13, 0x65, 0xd1, 0x22, 0xff, 0xb9, 0x07, 0xe5, 0xef, 0x3d, 0xc8, 0xf7, 0x89, 0x33, 0xb2, 0x9d, - 0xb1, 0xb1, 0x24, 0xc4, 0x43, 0x5f, 0xc2, 0x61, 0xd0, 0xb5, 0x1b, 0x1f, 0x6d, 0xee, 0xea, 0xb8, - 0x36, 0xe3, 0x7b, 0xd2, 0x16, 0xac, 0x1f, 0xc0, 0xfa, 0x46, 0x80, 0x7e, 0x80, 0xfc, 0xd2, 0x66, - 0x0e, 0xf1, 0x7d, 0xcc, 0xd6, 0x1e, 0xe1, 0xe7, 0x7c, 0x74, 0x75, 0x56, 0xdb, 0x5c, 0xae, 0xda, - 0x7d, 0x48, 0x9b, 0x6b, 0x8f, 0xe8, 0x09, 0x2d, 0x7a, 0x05, 0x60, 0xcd, 0xdd, 0x85, 0xc3, 0xb0, - 0x6f, 0x31, 0x39, 0x5d, 0x49, 0x55, 0x0b, 0xba, 0x80, 0x20, 0x05, 0xf2, 0x71, 0xdf, 0x83, 0x35, - 0x23, 0xf2, 0x3e, 0x57, 0x24, 0x30, 0x54, 0x03, 0x34, 0xa0, 0xae, 0x35, 0x1a, 0x5a, 0x3e, 0xc3, - 0x16, 0x63, 0x64, 0xee, 0x31, 0x5f, 0x3e, 0xe0, 0xca, 0x47, 0x18, 0xf4, 0x0d, 0x94, 0x1c, 0xb2, - 0x62, 0xf8, 0x81, 0x9a, 0x10, 0x7b, 0x3c, 0x61, 0x72, 0x86, 0x97, 0x3c, 0x4e, 0x2a, 0x67, 0x70, - 0x2a, 0x8e, 0x28, 0xbe, 0x1d, 0xca, 0xaf, 0x50, 0xda, 0xc2, 0xa3, 0x91, 0xff, 0x04, 0x47, 0x5e, - 0x48, 0x60, 0x9f, 0x33, 0x72, 0x8a, 0xdf, 0x8f, 0x73, 0x61, 0x30, 0x62, 0xa5, 0xbe, 0x25, 0x57, - 0xfe, 0x4a, 0xc1, 0xd1, 0x9b, 0xc5, 0xdc, 0x13, 0x8e, 0xff, 0x7f, 0x9d, 0x4b, 0x05, 0x72, 0xe1, - 0x35, 0xc1, 0xc1, 0xfd, 0xe0, 0xc7, 0x52, 0xd0, 0x45, 0x68, 0x67, 0xba, 0xe9, 0xdd, 0xe9, 0x2a, - 0x27, 0x70, 0xbc, 0x69, 0x22, 0xdc, 0xd9, 0x17, 0x7f, 0xa6, 0x21, 0x27, 0x1c, 0x29, 0x2a, 0xc2, - 0xf1, 0x5d, 0xaf, 0xdd, 0xd3, 0xee, 0x7b, 0xf8, 0xfe, 0xd6, 0xec, 0xa9, 0x86, 0x21, 0x7d, 0x84, - 0x64, 0x38, 0x6d, 0x68, 0xdd, 0xee, 0xad, 0xd9, 0x55, 0x7b, 0x26, 0x36, 0x6f, 0xbb, 0x2a, 0xee, - 0x68, 0x8d, 0xb6, 0x94, 0x42, 0xe7, 0x50, 0x14, 0x98, 0x9e, 0x86, 0x9b, 0x6a, 0xe7, 0xfa, 0x9d, - 0xb4, 0x87, 0x4a, 0x70, 0x22, 0x10, 0xba, 0xfa, 0x56, 0x6b, 0xab, 0x52, 0x3a, 0xd0, 0xb7, 0xcc, - 0x4e, 0x03, 0x6b, 0x37, 0x37, 0xaa, 0xae, 0x36, 0x63, 0x62, 0x3f, 0x58, 0x82, 0x13, 0xd7, 0x8d, - 0x86, 0xda, 0x37, 0x1f, 0x98, 0x03, 0xf4, 0x19, 0x7c, 0x9c, 0x28, 0x09, 0x96, 0xd7, 0xee, 0x4c, - 0x6c, 0xa8, 0x0d, 0xad, 0xd7, 0xc4, 0x1d, 0xf5, 0xad, 0xda, 0x91, 0x32, 0xe8, 0x73, 0x50, 0x92, - 0x06, 0xc6, 0x5d, 0xa3, 0xa1, 0x1a, 0x46, 0x52, 0xf7, 0x0c, 0x5d, 0xc2, 0x8b, 0xad, 0x0e, 0xba, - 0x9a, 0xa9, 0xc6, 0xae, 0xd2, 0x21, 0xaa, 0xc0, 0xc5, 0x76, 0x27, 0x5c, 0x11, 0xf9, 0x49, 0x59, - 0x74, 0x01, 0x32, 0x57, 0x88, 0xce, 0x71, 0xbf, 0x80, 0x4e, 0x41, 0x8a, 0x26, 0x87, 0xdb, 0xea, - 0x3b, 0xdc, 0xba, 0x36, 0x5a, 0x52, 0x0e, 0xbd, 0x80, 0xf3, 0x9e, 0x6a, 0x04, 0x76, 0x3b, 0x64, - 0xfe, 0xea, 0x9f, 0x7d, 0xc8, 0xde, 0xf3, 0x8b, 0xd4, 0xb6, 0x83, 0x37, 0x58, 0x68, 0x12, 0x6a, - 0xff, 0x41, 0x7a, 0x64, 0xc5, 0xda, 0x64, 0x8d, 0x4e, 0x84, 0x5b, 0x16, 0xe6, 0x76, 0xf9, 0x6c, - 0x13, 0x4c, 0x6d, 0xb2, 0x6e, 0x12, 0x7f, 0x48, 0x6d, 0x8f, 0xb9, 0x14, 0x7d, 0x0f, 0xd9, 0xb0, - 0x36, 0xa8, 0x2b, 0x8a, 0xa2, 0x8e, 0x3b, 0xb4, 0x98, 0x4b, 0x9f, 0xac, 0xfc, 0x11, 0x0e, 0x83, - 0xf5, 0x82, 0xd4, 0x46, 0xe2, 0x7b, 0x17, 0x52, 0xbd, 0x7c, 0xbe, 0x83, 0x47, 0xef, 0xa3, 0x05, - 0x28, 0x0a, 0x69, 0x31, 0xd1, 0x45, 0x1b, 0x01, 0x2f, 0x97, 0xc5, 0x57, 0xb3, 0x95, 0xed, 0x1d, - 0xc8, 0x09, 0xc1, 0x8a, 0x5e, 0x0a, 0xd2, 0xdd, 0x38, 0x2f, 0xbf, 0x7a, 0x8a, 0x7e, 0x70, 0x13, - 0x12, 0x34, 0xe1, 0xb6, 0x1b, 0xc8, 0x09, 0xb7, 0xc7, 0x82, 0x57, 0x87, 0x42, 0x22, 0x1e, 0xd0, - 0xe5, 0x13, 0xcf, 0x7f, 0xd3, 0x5f, 0xe5, 0x69, 0x41, 0xe4, 0xf9, 0x33, 0x3c, 0x8b, 0x9e, 0x24, - 0x7a, 0x2e, 0x88, 0x93, 0x59, 0x91, 0x98, 0xd8, 0xd6, 0x0b, 0x7e, 0xf3, 0xf5, 0x6f, 0xf5, 0xb1, - 0xcd, 0x26, 0x8b, 0x41, 0x6d, 0xe8, 0xce, 0xeb, 0xb3, 0x20, 0xe0, 0x1c, 0xdb, 0x19, 0x3b, 0x84, - 0x2d, 0x5d, 0x3a, 0xad, 0xcf, 0x9c, 0x51, 0x9d, 0xc7, 0x4a, 0x7d, 0x63, 0x31, 0xc8, 0xf0, 0x9f, - 0x01, 0xaf, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xd3, 0xc7, 0x77, 0x11, 0x4f, 0x08, 0x00, 0x00, + // 1055 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x56, 0x6b, 0x6f, 0xe2, 0x46, + 0x14, 0x2d, 0x21, 0x21, 0x70, 0x81, 0xc4, 0x19, 0xf2, 0xf0, 0xb2, 0xd9, 0x86, 0xba, 0x0f, 0xa1, + 0x3e, 0x40, 0x4d, 0xd4, 0xaa, 0x0f, 0xa9, 0x2a, 0x21, 0x8e, 0x88, 0x20, 0x98, 0xda, 0xce, 0x46, + 0x5b, 0x55, 0x1a, 0x39, 0x30, 0x21, 0x56, 0xc0, 0xf6, 0x8e, 0x87, 0x02, 0x7f, 0xa4, 0xd2, 0xfe, + 0x95, 0xfe, 0xba, 0xca, 0xe3, 0x07, 0x63, 0x68, 0x2a, 0xf5, 0x53, 0xf0, 0x39, 0xe7, 0x1e, 0xdf, + 0xb9, 0x73, 0x7d, 0x6f, 0xe0, 0xd5, 0xdc, 0x9a, 0x4c, 0x08, 0xa3, 0xde, 0xb0, 0x19, 0xfe, 0x7a, + 0xb6, 0x59, 0xc3, 0xa3, 0x2e, 0x73, 0x51, 0x21, 0xa1, 0xaa, 0x05, 0xea, 0x0d, 0x43, 0xb4, 0x7a, + 0xe8, 0xdb, 0x63, 0x27, 0x90, 0x07, 0x7f, 0x09, 0x0d, 0x51, 0xe5, 0x37, 0xc8, 0x75, 0xc9, 0x52, + 0x27, 0xef, 0x51, 0x1d, 0xa4, 0x67, 0xb2, 0xc4, 0x8f, 0xb6, 0x33, 0x26, 0x14, 0x7b, 0xd4, 0x76, + 0x98, 0x9c, 0xa9, 0x65, 0xea, 0x3b, 0xfa, 0xde, 0x33, 0x59, 0x5e, 0x73, 0x78, 0x10, 0xa0, 0xe8, + 0x0d, 0x00, 0x57, 0x5a, 0x53, 0x7b, 0xb2, 0x94, 0xb7, 0xb8, 0xa6, 0x10, 0x68, 0x38, 0xa0, 0x94, + 0xa1, 0xd8, 0x1a, 0x8d, 0xa8, 0x4e, 0xde, 0xcf, 0x88, 0xcf, 0x14, 0x05, 0x4a, 0xe1, 0xa3, 0xef, + 0xb9, 0x8e, 0x4f, 0x10, 0x82, 0x6d, 0x6b, 0x34, 0xa2, 0xdc, 0xbb, 0xa0, 0xf3, 0xdf, 0xca, 0x67, + 0x50, 0x34, 0xa9, 0xe5, 0xf8, 0xd6, 0x90, 0xd9, 0xae, 0x83, 0x8e, 0x20, 0xc7, 0x16, 0xf8, 0x89, + 0x2c, 0xb8, 0xa8, 0xa4, 0xef, 0xb0, 0x45, 0x87, 0x2c, 0x94, 0xef, 0x61, 0x7f, 0x30, 0x7b, 0x98, + 0xd8, 0xfe, 0x53, 0x62, 0xf6, 0x29, 0x94, 0xbd, 0x10, 0xc2, 0x84, 0x52, 0x37, 0x76, 0x2d, 0x45, + 0xa0, 0x1a, 0x60, 0xca, 0x1f, 0x80, 0x0c, 0xe2, 0x8c, 0xb4, 0x19, 0xf3, 0x66, 0xcc, 0x8f, 0xf2, + 0x42, 0xa7, 0x00, 0xbe, 0xc5, 0xb0, 0x47, 0x28, 0x7e, 0x9e, 0xf3, 0xb8, 0xac, 0x9e, 0xf7, 0x2d, + 0x36, 0x20, 0xb4, 0x3b, 0x47, 0x75, 0xd8, 0x75, 0x43, 0xbd, 0xbc, 0x55, 0xcb, 0xd6, 0x8b, 0xe7, + 0x7b, 0x8d, 0xa8, 0x7e, 0x0d, 0x73, 0xa1, 0xcd, 0x98, 0x1e, 0xd3, 0xca, 0xd7, 0x50, 0x49, 0xb9, + 0x47, 0x99, 0x1d, 0x41, 0x8e, 0x5a, 0x73, 0xcc, 0x92, 0x33, 0x50, 0x6b, 0x6e, 0x2e, 0x94, 0xef, + 0x00, 0xa9, 0x3e, 0xb3, 0xa7, 0x16, 0x23, 0xd7, 0x84, 0xc4, 0xb9, 0x9c, 0x41, 0x71, 0xe8, 0x3a, + 0x8f, 0x98, 0x59, 0x74, 0x4c, 0xe2, 0xb2, 0x43, 0x00, 0x99, 0x1c, 0x51, 0x2e, 0xa0, 0x92, 0x0a, + 0x8b, 0x5e, 0xf2, 0x9f, 0x67, 0x50, 0x3e, 0x64, 0xa1, 0x34, 0x20, 0xce, 0xc8, 0x76, 0xc6, 0xc6, + 0x9c, 0x10, 0x0f, 0x7d, 0x05, 0xf9, 0x20, 0x6b, 0x37, 0xbe, 0xda, 0xe2, 0xf9, 0x7e, 0x63, 0xc2, + 0xcf, 0xa4, 0xcd, 0xd8, 0x20, 0x80, 0xf5, 0x44, 0x80, 0x7e, 0x84, 0xd2, 0xdc, 0x66, 0x0e, 0xf1, + 0x7d, 0xcc, 0x96, 0x1e, 0xe1, 0xf7, 0xbc, 0x77, 0x7e, 0xdc, 0x48, 0x9a, 0xab, 0x71, 0x1f, 0xd2, + 0xe6, 0xd2, 0x23, 0x7a, 0x71, 0xbe, 0x7a, 0x08, 0x1a, 0xc4, 0x9a, 0xba, 0x33, 0x87, 0x61, 0xdf, + 0x62, 0x72, 0xb6, 0x96, 0xa9, 0x97, 0xf5, 0x42, 0x88, 0x18, 0x16, 0x43, 0x35, 0x28, 0xc5, 0x59, + 0x3f, 0x2c, 0x19, 0x91, 0xb7, 0xb9, 0x00, 0xc2, 0xbc, 0x2f, 0x97, 0x8c, 0xa0, 0x6f, 0x00, 0x3d, + 0x50, 0xd7, 0x1a, 0x0d, 0x2d, 0x9f, 0x61, 0x8b, 0x31, 0x32, 0xf5, 0x98, 0x2f, 0xef, 0x70, 0xdd, + 0x41, 0xc2, 0xb4, 0x22, 0x02, 0x9d, 0xc3, 0x91, 0x43, 0x16, 0x0c, 0xaf, 0x62, 0x9e, 0x88, 0x3d, + 0x7e, 0x62, 0x72, 0x8e, 0x47, 0x54, 0x02, 0xf2, 0x32, 0xe6, 0x3a, 0x9c, 0x0a, 0x62, 0x68, 0x58, + 0x7d, 0x32, 0xc2, 0x62, 0xf1, 0xf3, 0x61, 0x4c, 0x42, 0xb6, 0x93, 0x5b, 0x40, 0x17, 0x70, 0xbc, + 0x8a, 0x49, 0x1d, 0xa1, 0xb0, 0x16, 0x64, 0xac, 0xce, 0x72, 0x08, 0x3b, 0x8f, 0x2e, 0x1d, 0x12, + 0x79, 0xb7, 0x96, 0xa9, 0xe7, 0xf5, 0xf0, 0x41, 0x39, 0x86, 0x43, 0xf1, 0x6a, 0xe2, 0xae, 0x54, + 0xee, 0xe1, 0x68, 0x0d, 0x8f, 0xae, 0xfa, 0x17, 0xd8, 0xf3, 0x42, 0x02, 0xfb, 0x9c, 0x91, 0x33, + 0xbc, 0x2f, 0x4f, 0x84, 0x0b, 0x11, 0x23, 0xf5, 0xb2, 0x27, 0xfa, 0x28, 0x7f, 0x65, 0x60, 0xef, + 0x72, 0x36, 0xf5, 0x84, 0xae, 0xfb, 0x5f, 0xed, 0x70, 0x06, 0xc5, 0xb0, 0x40, 0xbc, 0x58, 0xbc, + 0x1b, 0xca, 0x3a, 0x84, 0x50, 0x50, 0xa2, 0x8d, 0x5b, 0xcd, 0x6e, 0xdc, 0x6a, 0x52, 0x89, 0x6d, + 0xb1, 0x12, 0x07, 0xb0, 0x9f, 0xe4, 0x15, 0x9e, 0xf5, 0xcb, 0x0f, 0x59, 0x28, 0x0a, 0xcd, 0x85, + 0x2a, 0xb0, 0x7f, 0xd7, 0xef, 0xf6, 0xb5, 0xfb, 0x3e, 0xbe, 0xbf, 0x31, 0xfb, 0xaa, 0x61, 0x48, + 0x1f, 0x21, 0x19, 0x0e, 0xdb, 0xda, 0xed, 0xed, 0x8d, 0x79, 0xab, 0xf6, 0x4d, 0x6c, 0xde, 0xdc, + 0xaa, 0xb8, 0xa7, 0xb5, 0xbb, 0x52, 0x06, 0x9d, 0x40, 0x45, 0x60, 0xfa, 0x1a, 0xbe, 0x52, 0x7b, + 0xad, 0x77, 0xd2, 0x16, 0x3a, 0x82, 0x03, 0x81, 0xd0, 0xd5, 0xb7, 0x5a, 0x57, 0x95, 0xb2, 0x81, + 0xbe, 0x63, 0xf6, 0xda, 0x58, 0xbb, 0xbe, 0x56, 0x75, 0xf5, 0x2a, 0x26, 0xb6, 0x83, 0x57, 0x70, + 0xa2, 0xd5, 0x6e, 0xab, 0x03, 0x73, 0xc5, 0xec, 0xa0, 0xcf, 0xe1, 0x93, 0x54, 0x48, 0xf0, 0x7a, + 0xed, 0xce, 0xc4, 0x86, 0xda, 0xd6, 0xfa, 0x57, 0xb8, 0xa7, 0xbe, 0x55, 0x7b, 0x52, 0x0e, 0x7d, + 0x01, 0x4a, 0xda, 0xc0, 0xb8, 0x6b, 0xb7, 0x55, 0xc3, 0x48, 0xeb, 0x76, 0xd1, 0x19, 0xbc, 0x5e, + 0xcb, 0xe0, 0x56, 0x33, 0xd5, 0xd8, 0x55, 0xca, 0xa3, 0x1a, 0x9c, 0xae, 0x67, 0xc2, 0x15, 0x91, + 0x9f, 0x54, 0x40, 0xa7, 0x20, 0x73, 0x85, 0xe8, 0x1c, 0xe7, 0x0b, 0xe8, 0x10, 0xa4, 0xa8, 0x72, + 0xb8, 0xab, 0xbe, 0xc3, 0x9d, 0x96, 0xd1, 0x91, 0x8a, 0xe8, 0x35, 0x9c, 0xf4, 0x55, 0x23, 0xb0, + 0xdb, 0x20, 0x4b, 0x6b, 0xc5, 0x6a, 0xf5, 0xdb, 0x1d, 0x4d, 0x97, 0xca, 0xe7, 0x7f, 0x6f, 0x43, + 0xe1, 0x9e, 0x77, 0x5c, 0xd7, 0x66, 0xe8, 0x27, 0x28, 0x5f, 0x11, 0x6a, 0xff, 0x49, 0xfa, 0x64, + 0xc1, 0xba, 0x64, 0x89, 0x0e, 0x84, 0x76, 0x0c, 0x17, 0x4b, 0xf5, 0x38, 0x99, 0x9c, 0x5d, 0xb2, + 0xbc, 0x22, 0xfe, 0x90, 0xda, 0x1e, 0x73, 0x29, 0xfa, 0x01, 0x0a, 0x61, 0x6c, 0x10, 0x57, 0x11, + 0x45, 0x3d, 0x77, 0x68, 0x31, 0x97, 0xbe, 0x18, 0xf9, 0x33, 0xe4, 0x83, 0xf7, 0x05, 0x6b, 0x05, + 0x89, 0x03, 0x49, 0x58, 0x3b, 0xd5, 0x93, 0x0d, 0x3c, 0xfa, 0x90, 0x3a, 0x80, 0xa2, 0x2d, 0x22, + 0xae, 0x1c, 0xd1, 0x46, 0xc0, 0xab, 0x55, 0xf1, 0xf3, 0x5a, 0x5b, 0x3e, 0x3d, 0x28, 0x0a, 0x93, + 0x1f, 0xbd, 0x11, 0xa4, 0x9b, 0xfb, 0xa6, 0xfa, 0xf1, 0x4b, 0xf4, 0xca, 0x4d, 0x18, 0xf1, 0x29, + 0xb7, 0xcd, 0x8d, 0x91, 0x72, 0xfb, 0xb7, 0xcd, 0xa0, 0x43, 0x39, 0x35, 0x47, 0xd0, 0xd9, 0x0b, + 0x73, 0x22, 0xc9, 0xaf, 0xf6, 0xb2, 0x20, 0xf2, 0xfc, 0x15, 0x76, 0xa3, 0x2f, 0x15, 0xbd, 0x12, + 0xc4, 0xe9, 0xa9, 0x92, 0xaa, 0xd8, 0xda, 0x87, 0x7d, 0xf9, 0xed, 0xef, 0xcd, 0xb1, 0xcd, 0x9e, + 0x66, 0x0f, 0x8d, 0xa1, 0x3b, 0x6d, 0x4e, 0x82, 0x41, 0xec, 0xd8, 0xce, 0xd8, 0x21, 0x6c, 0xee, + 0xd2, 0xe7, 0xe6, 0xc4, 0x19, 0x35, 0xf9, 0x00, 0x6a, 0x26, 0x16, 0x0f, 0x39, 0xfe, 0x7f, 0xca, + 0xc5, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf1, 0x3f, 0xcd, 0xa5, 0xf0, 0x08, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/lnrpc/walletrpc/walletkit.proto b/lnrpc/walletrpc/walletkit.proto index 96441025b2..96f2ce0678 100644 --- a/lnrpc/walletrpc/walletkit.proto +++ b/lnrpc/walletrpc/walletkit.proto @@ -22,7 +22,7 @@ message KeyReq { int32 key_family = 2; } -message AddrRequest{ +message AddrRequest { // No fields, as we always give out a p2wkh address. } message AddrResponse { @@ -164,33 +164,51 @@ enum WitnessType { script that pays to a key solely under our control. */ NESTED_WITNESS_KEY_HASH = 12; + + /* + A witness type that allows us to spend our anchor on the commitment + transaction. + */ + COMMITMENT_ANCHOR = 13; } message PendingSweep { // The outpoint of the output we're attempting to sweep. - lnrpc.OutPoint outpoint = 1 [json_name = "outpoint"]; + lnrpc.OutPoint outpoint = 1; // The witness type of the output we're attempting to sweep. - WitnessType witness_type = 2 [json_name = "witness_type"]; + WitnessType witness_type = 2; // The value of the output we're attempting to sweep. - uint32 amount_sat = 3 [json_name = "amount_sat"]; + uint32 amount_sat = 3; /* The fee rate we'll use to sweep the output. The fee rate is only determined once a sweeping transaction for the output is created, so it's possible for this to be 0 before this. */ - uint32 sat_per_byte = 4 [json_name = "sat_per_byte"]; + uint32 sat_per_byte = 4; // The number of broadcast attempts we've made to sweep the output. - uint32 broadcast_attempts = 5 [json_name = "broadcast_attempts"]; + uint32 broadcast_attempts = 5; /* The next height of the chain at which we'll attempt to broadcast the sweep transaction of the output. */ - uint32 next_broadcast_height = 6 [json_name = "next_broadcast_height"]; + uint32 next_broadcast_height = 6; + + // The requested confirmation target for this output. + uint32 requested_conf_target = 8; + + // The requested fee rate, expressed in sat/byte, for this output. + uint32 requested_sat_per_byte = 9; + + /** + Whether this input must be force-swept. This means that it is swept even + if it has a negative yield. + */ + bool force = 7; } message PendingSweepsRequest { @@ -200,21 +218,27 @@ message PendingSweepsResponse { /* The set of outputs currently being swept by lnd's central batching engine. */ - repeated PendingSweep pending_sweeps = 1 [json_name = "pending_sweeps"]; + repeated PendingSweep pending_sweeps = 1; } message BumpFeeRequest { // The input we're attempting to bump the fee of. - lnrpc.OutPoint outpoint = 1 [json_name = "outpoint"]; + lnrpc.OutPoint outpoint = 1; // The target number of blocks that the input should be spent within. - uint32 target_conf = 2 [json_name = "target_conf"]; + uint32 target_conf = 2; /* The fee rate, expressed in sat/byte, that should be used to spend the input with. */ - uint32 sat_per_byte = 3 [json_name = "sat_per_byte"]; + uint32 sat_per_byte = 3; + + /** + Whether this input must be force-swept. This means that it is swept even + if it has a negative yield. + */ + bool force = 4; } message BumpFeeResponse { @@ -226,18 +250,18 @@ service WalletKit { (account in BIP43) specified. This method should return the next external child within this branch. */ - rpc DeriveNextKey(KeyReq) returns (signrpc.KeyDescriptor); + rpc DeriveNextKey (KeyReq) returns (signrpc.KeyDescriptor); /** DeriveKey attempts to derive an arbitrary key specified by the passed - KeyLocator. + KeyLocator. */ - rpc DeriveKey(signrpc.KeyLocator) returns (signrpc.KeyDescriptor); + rpc DeriveKey (signrpc.KeyLocator) returns (signrpc.KeyDescriptor); /** NextAddr returns the next unused address within the wallet. */ - rpc NextAddr(AddrRequest) returns (AddrResponse); + rpc NextAddr (AddrRequest) returns (AddrResponse); /** PublishTransaction attempts to publish the passed transaction to the @@ -245,21 +269,21 @@ service WalletKit { attempt to re-broadcast the transaction on start up, until it enters the chain. */ - rpc PublishTransaction(Transaction) returns (PublishResponse); + rpc PublishTransaction (Transaction) returns (PublishResponse); /** SendOutputs is similar to the existing sendmany call in Bitcoind, and allows the caller to create a transaction that sends to several outputs at once. This is ideal when wanting to batch create a set of transactions. */ - rpc SendOutputs(SendOutputsRequest) returns (SendOutputsResponse); + rpc SendOutputs (SendOutputsRequest) returns (SendOutputsResponse); /** EstimateFee attempts to query the internal fee estimator of the wallet to determine the fee (in sat/kw) to attach to a transaction in order to achieve the confirmation target. */ - rpc EstimateFee(EstimateFeeRequest) returns (EstimateFeeResponse); + rpc EstimateFee (EstimateFeeRequest) returns (EstimateFeeResponse); /* PendingSweeps returns lists of on-chain outputs that lnd is currently @@ -271,7 +295,7 @@ service WalletKit { remain supported. This is an advanced API that depends on the internals of the UtxoSweeper, so things may change. */ - rpc PendingSweeps(PendingSweepsRequest) returns (PendingSweepsResponse); + rpc PendingSweeps (PendingSweepsRequest) returns (PendingSweepsResponse); /* BumpFee bumps the fee of an arbitrary input within a transaction. This RPC @@ -300,5 +324,5 @@ service WalletKit { fee preference being provided. For now, the responsibility of ensuring that the new fee preference is sufficient is delegated to the user. */ - rpc BumpFee(BumpFeeRequest) returns (BumpFeeResponse); + rpc BumpFee (BumpFeeRequest) returns (BumpFeeResponse); } diff --git a/lnrpc/walletrpc/walletkit_server.go b/lnrpc/walletrpc/walletkit_server.go index 023782f62a..edfeb35cc1 100644 --- a/lnrpc/walletrpc/walletkit_server.go +++ b/lnrpc/walletrpc/walletkit_server.go @@ -19,6 +19,7 @@ import ( "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lnrpc/signrpc" "github.com/lightningnetwork/lnd/lnwallet" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" "github.com/lightningnetwork/lnd/sweep" "google.golang.org/grpc" "gopkg.in/macaroon-bakery.v2/bakery" @@ -303,7 +304,7 @@ func (w *WalletKit) SendOutputs(ctx context.Context, // Now that we have the outputs mapped, we can request that the wallet // attempt to create this transaction. tx, err := w.cfg.Wallet.SendOutputs( - outputsToCreate, lnwallet.SatPerKWeight(req.SatPerKw), + outputsToCreate, chainfee.SatPerKWeight(req.SatPerKw), ) if err != nil { return nil, err @@ -389,6 +390,8 @@ func (w *WalletKit) PendingSweeps(ctx context.Context, witnessType = WitnessType_WITNESS_KEY_HASH case input.NestedWitnessKeyHash: witnessType = WitnessType_NESTED_WITNESS_KEY_HASH + case input.CommitmentAnchor: + witnessType = WitnessType_COMMITMENT_ANCHOR default: log.Warnf("Unhandled witness type %v for input %v", pendingInput.WitnessType, pendingInput.OutPoint) @@ -403,6 +406,9 @@ func (w *WalletKit) PendingSweeps(ctx context.Context, broadcastAttempts := uint32(pendingInput.BroadcastAttempts) nextBroadcastHeight := uint32(pendingInput.NextBroadcastHeight) + requestedFee := pendingInput.Params.Fee + requestedFeeRate := uint32(requestedFee.FeeRate.FeePerKVByte() / 1000) + rpcPendingSweeps = append(rpcPendingSweeps, &PendingSweep{ Outpoint: op, WitnessType: witnessType, @@ -410,6 +416,9 @@ func (w *WalletKit) PendingSweeps(ctx context.Context, SatPerByte: satPerByte, BroadcastAttempts: broadcastAttempts, NextBroadcastHeight: nextBroadcastHeight, + RequestedSatPerByte: requestedFeeRate, + RequestedConfTarget: requestedFee.ConfTarget, + Force: pendingInput.Params.Force, }) } @@ -468,7 +477,7 @@ func (w *WalletKit) BumpFee(ctx context.Context, } // Construct the request's fee preference. - satPerKw := lnwallet.SatPerKVByte(in.SatPerByte * 1000).FeePerKWeight() + satPerKw := chainfee.SatPerKVByte(in.SatPerByte * 1000).FeePerKWeight() feePreference := sweep.FeePreference{ ConfTarget: uint32(in.TargetConf), FeeRate: satPerKw, @@ -479,7 +488,12 @@ func (w *WalletKit) BumpFee(ctx context.Context, // bump its fee, which will result in a replacement transaction (RBF) // being broadcast. If it is not aware of the input however, // lnwallet.ErrNotMine is returned. - _, err = w.cfg.Sweeper.BumpFee(*op, feePreference) + params := sweep.ParamsUpdate{ + Fee: feePreference, + Force: in.Force, + } + + _, err = w.cfg.Sweeper.UpdateParams(*op, params) switch err { case nil: return &BumpFeeResponse{}, nil @@ -535,7 +549,7 @@ func (w *WalletKit) BumpFee(ctx context.Context, } input := input.NewBaseInput(op, witnessType, signDesc, uint32(currentHeight)) - if _, err = w.cfg.Sweeper.SweepInput(input, feePreference); err != nil { + if _, err = w.cfg.Sweeper.SweepInput(input, sweep.Params{Fee: feePreference}); err != nil { return nil, err } diff --git a/lnrpc/watchtowerrpc/watchtower.pb.go b/lnrpc/watchtowerrpc/watchtower.pb.go index 2a010f1ca1..3b19e6ee48 100644 --- a/lnrpc/watchtowerrpc/watchtower.pb.go +++ b/lnrpc/watchtowerrpc/watchtower.pb.go @@ -150,7 +150,7 @@ const _ = grpc.SupportPackageIsVersion4 type WatchtowerClient interface { //* lncli: tower info //GetInfo returns general information concerning the companion watchtower - //including it's public key and URIs where the server is currently + //including its public key and URIs where the server is currently //listening for clients. GetInfo(ctx context.Context, in *GetInfoRequest, opts ...grpc.CallOption) (*GetInfoResponse, error) } @@ -176,7 +176,7 @@ func (c *watchtowerClient) GetInfo(ctx context.Context, in *GetInfoRequest, opts type WatchtowerServer interface { //* lncli: tower info //GetInfo returns general information concerning the companion watchtower - //including it's public key and URIs where the server is currently + //including its public key and URIs where the server is currently //listening for clients. GetInfo(context.Context, *GetInfoRequest) (*GetInfoResponse, error) } diff --git a/lnrpc/watchtowerrpc/watchtower.proto b/lnrpc/watchtowerrpc/watchtower.proto index 71c7648636..818807ef23 100644 --- a/lnrpc/watchtowerrpc/watchtower.proto +++ b/lnrpc/watchtowerrpc/watchtower.proto @@ -5,24 +5,24 @@ package watchtowerrpc; option go_package = "github.com/lightningnetwork/lnd/lnrpc/watchtowerrpc"; service Watchtower { - /** lncli: tower info - GetInfo returns general information concerning the companion watchtower - including it's public key and URIs where the server is currently - listening for clients. - */ - rpc GetInfo(GetInfoRequest) returns (GetInfoResponse); + /** lncli: tower info + GetInfo returns general information concerning the companion watchtower + including its public key and URIs where the server is currently + listening for clients. + */ + rpc GetInfo (GetInfoRequest) returns (GetInfoResponse); } -message GetInfoRequest{ +message GetInfoRequest { } message GetInfoResponse { - /// The public key of the watchtower. - bytes pubkey = 1 [json_name = "pubkey"]; + /// The public key of the watchtower. + bytes pubkey = 1; - /// The listening addresses of the watchtower. - repeated string listeners = 2 [json_name = "listeners"]; + /// The listening addresses of the watchtower. + repeated string listeners = 2; - /// The URIs of the watchtower. - repeated string uris = 3 [json_name = "uris" ]; + /// The URIs of the watchtower. + repeated string uris = 3; } diff --git a/lnrpc/wtclientrpc/wtclient.pb.go b/lnrpc/wtclientrpc/wtclient.pb.go index f99dbc6e32..3ccff8d3a8 100644 --- a/lnrpc/wtclientrpc/wtclient.pb.go +++ b/lnrpc/wtclientrpc/wtclient.pb.go @@ -189,7 +189,7 @@ type GetTowerInfoRequest struct { // The identifying public key of the watchtower to retrieve information for. Pubkey []byte `protobuf:"bytes,1,opt,name=pubkey,proto3" json:"pubkey,omitempty"` // Whether we should include sessions with the watchtower in the response. - IncludeSessions bool `protobuf:"varint,2,opt,name=include_sessions,proto3" json:"include_sessions,omitempty"` + IncludeSessions bool `protobuf:"varint,2,opt,name=include_sessions,json=includeSessions,proto3" json:"include_sessions,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -238,17 +238,17 @@ type TowerSession struct { // //The total number of successful backups that have been made to the //watchtower session. - NumBackups uint32 `protobuf:"varint,1,opt,name=num_backups,proto3" json:"num_backups,omitempty"` + NumBackups uint32 `protobuf:"varint,1,opt,name=num_backups,json=numBackups,proto3" json:"num_backups,omitempty"` // //The total number of backups in the session that are currently pending to be //acknowledged by the watchtower. - NumPendingBackups uint32 `protobuf:"varint,2,opt,name=num_pending_backups,proto3" json:"num_pending_backups,omitempty"` + NumPendingBackups uint32 `protobuf:"varint,2,opt,name=num_pending_backups,json=numPendingBackups,proto3" json:"num_pending_backups,omitempty"` // The maximum number of backups allowed by the watchtower session. - MaxBackups uint32 `protobuf:"varint,3,opt,name=max_backups,proto3" json:"max_backups,omitempty"` + MaxBackups uint32 `protobuf:"varint,3,opt,name=max_backups,json=maxBackups,proto3" json:"max_backups,omitempty"` // //The fee rate, in satoshis per vbyte, that will be used by the watchtower for //the justice transaction in the event of a channel breach. - SweepSatPerByte uint32 `protobuf:"varint,4,opt,name=sweep_sat_per_byte,proto3" json:"sweep_sat_per_byte,omitempty"` + SweepSatPerByte uint32 `protobuf:"varint,4,opt,name=sweep_sat_per_byte,json=sweepSatPerByte,proto3" json:"sweep_sat_per_byte,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -313,9 +313,9 @@ type Tower struct { // The list of addresses the watchtower is reachable over. Addresses []string `protobuf:"bytes,2,rep,name=addresses,proto3" json:"addresses,omitempty"` // Whether the watchtower is currently a candidate for new sessions. - ActiveSessionCandidate bool `protobuf:"varint,3,opt,name=active_session_candidate,proto3" json:"active_session_candidate,omitempty"` + ActiveSessionCandidate bool `protobuf:"varint,3,opt,name=active_session_candidate,json=activeSessionCandidate,proto3" json:"active_session_candidate,omitempty"` // The number of sessions that have been negotiated with the watchtower. - NumSessions uint32 `protobuf:"varint,4,opt,name=num_sessions,proto3" json:"num_sessions,omitempty"` + NumSessions uint32 `protobuf:"varint,4,opt,name=num_sessions,json=numSessions,proto3" json:"num_sessions,omitempty"` // The list of sessions that have been negotiated with the watchtower. Sessions []*TowerSession `protobuf:"bytes,5,rep,name=sessions,proto3" json:"sessions,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -385,7 +385,7 @@ func (m *Tower) GetSessions() []*TowerSession { type ListTowersRequest struct { // Whether we should include sessions with the watchtower in the response. - IncludeSessions bool `protobuf:"varint,1,opt,name=include_sessions,proto3" json:"include_sessions,omitempty"` + IncludeSessions bool `protobuf:"varint,1,opt,name=include_sessions,json=includeSessions,proto3" json:"include_sessions,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -498,19 +498,19 @@ type StatsResponse struct { // //The total number of backups made to all active and exhausted watchtower //sessions. - NumBackups uint32 `protobuf:"varint,1,opt,name=num_backups,proto3" json:"num_backups,omitempty"` + NumBackups uint32 `protobuf:"varint,1,opt,name=num_backups,json=numBackups,proto3" json:"num_backups,omitempty"` // //The total number of backups that are pending to be acknowledged by all //active and exhausted watchtower sessions. - NumPendingBackups uint32 `protobuf:"varint,2,opt,name=num_pending_backups,proto3" json:"num_pending_backups,omitempty"` + NumPendingBackups uint32 `protobuf:"varint,2,opt,name=num_pending_backups,json=numPendingBackups,proto3" json:"num_pending_backups,omitempty"` // //The total number of backups that all active and exhausted watchtower //sessions have failed to acknowledge. - NumFailedBackups uint32 `protobuf:"varint,3,opt,name=num_failed_backups,proto3" json:"num_failed_backups,omitempty"` + NumFailedBackups uint32 `protobuf:"varint,3,opt,name=num_failed_backups,json=numFailedBackups,proto3" json:"num_failed_backups,omitempty"` // The total number of new sessions made to watchtowers. - NumSessionsAcquired uint32 `protobuf:"varint,4,opt,name=num_sessions_acquired,proto3" json:"num_sessions_acquired,omitempty"` + NumSessionsAcquired uint32 `protobuf:"varint,4,opt,name=num_sessions_acquired,json=numSessionsAcquired,proto3" json:"num_sessions_acquired,omitempty"` // The total number of watchtower sessions that have been exhausted. - NumSessionsExhausted uint32 `protobuf:"varint,5,opt,name=num_sessions_exhausted,proto3" json:"num_sessions_exhausted,omitempty"` + NumSessionsExhausted uint32 `protobuf:"varint,5,opt,name=num_sessions_exhausted,json=numSessionsExhausted,proto3" json:"num_sessions_exhausted,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -611,11 +611,11 @@ type PolicyResponse struct { // //The maximum number of updates each session we negotiate with watchtowers //should allow. - MaxUpdates uint32 `protobuf:"varint,1,opt,name=max_updates,proto3" json:"max_updates,omitempty"` + MaxUpdates uint32 `protobuf:"varint,1,opt,name=max_updates,json=maxUpdates,proto3" json:"max_updates,omitempty"` // //The fee rate, in satoshis per vbyte, that will be used by watchtowers for //justice transactions in response to channel breaches. - SweepSatPerByte uint32 `protobuf:"varint,2,opt,name=sweep_sat_per_byte,proto3" json:"sweep_sat_per_byte,omitempty"` + SweepSatPerByte uint32 `protobuf:"varint,2,opt,name=sweep_sat_per_byte,json=sweepSatPerByte,proto3" json:"sweep_sat_per_byte,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -679,47 +679,50 @@ func init() { func init() { proto.RegisterFile("wtclientrpc/wtclient.proto", fileDescriptor_b5f4e7d95a641af2) } var fileDescriptor_b5f4e7d95a641af2 = []byte{ - // 634 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0xcb, 0x6e, 0xd3, 0x40, - 0x14, 0x95, 0x13, 0x12, 0xd2, 0x9b, 0xf4, 0xc1, 0xad, 0x5a, 0x19, 0x53, 0x68, 0xe4, 0x55, 0xd4, - 0x45, 0x02, 0x2d, 0xb0, 0x60, 0x01, 0x94, 0x22, 0x2a, 0x24, 0x90, 0x2a, 0x17, 0x09, 0xc1, 0xc6, - 0xf2, 0x63, 0x9a, 0x8c, 0xea, 0x8c, 0x5d, 0xcf, 0xb8, 0x69, 0x97, 0xfc, 0x13, 0xbf, 0xc0, 0x1f, - 0xf0, 0x41, 0xc8, 0xe3, 0x47, 0xc6, 0x8d, 0x2d, 0x16, 0x88, 0x5d, 0xe6, 0x9c, 0x93, 0x33, 0x37, - 0xf7, 0x9e, 0xdc, 0x01, 0x63, 0x21, 0xbc, 0x80, 0x12, 0x26, 0xe2, 0xc8, 0x9b, 0x14, 0x9f, 0xc7, - 0x51, 0x1c, 0x8a, 0x10, 0xfb, 0x0a, 0x67, 0x9e, 0xc0, 0xe6, 0xb1, 0xef, 0x7f, 0x09, 0x17, 0x24, - 0xb6, 0xc8, 0x55, 0x42, 0xb8, 0xc0, 0x5d, 0xe8, 0x46, 0x89, 0x7b, 0x49, 0x6e, 0x75, 0x6d, 0xa8, - 0x8d, 0x06, 0x56, 0x7e, 0x42, 0x1d, 0xee, 0x3b, 0xbe, 0x1f, 0x13, 0xce, 0xf5, 0xd6, 0x50, 0x1b, - 0xad, 0x59, 0xc5, 0xd1, 0x44, 0xd8, 0x5a, 0x9a, 0xf0, 0x28, 0x64, 0x9c, 0x98, 0x1f, 0x00, 0x2d, - 0x32, 0x0f, 0xaf, 0xc9, 0x3f, 0x7a, 0xef, 0xc0, 0x76, 0xc5, 0x27, 0xb7, 0xff, 0x06, 0xdb, 0xa7, - 0x44, 0x48, 0xec, 0x23, 0xbb, 0x08, 0xff, 0xe6, 0x7f, 0x00, 0x5b, 0x94, 0x79, 0x41, 0xe2, 0x13, - 0x9b, 0x13, 0xce, 0x69, 0xc8, 0xb2, 0x8b, 0x7a, 0xd6, 0x0a, 0x6e, 0xfe, 0xd4, 0x60, 0x20, 0x8d, - 0xcf, 0x33, 0x04, 0x87, 0xd0, 0x67, 0xc9, 0xdc, 0x76, 0x1d, 0xef, 0x32, 0x89, 0xb8, 0x74, 0x5e, - 0xb7, 0x54, 0x08, 0x9f, 0xc2, 0x76, 0x7a, 0x8c, 0x08, 0xf3, 0x29, 0x9b, 0x96, 0xca, 0x96, 0x54, - 0xd6, 0x51, 0xa9, 0xe7, 0xdc, 0xb9, 0x29, 0x95, 0xed, 0xcc, 0x53, 0x81, 0x70, 0x0c, 0xc8, 0x17, - 0x84, 0x44, 0x36, 0x77, 0x84, 0x1d, 0x91, 0xd8, 0x76, 0x6f, 0x05, 0xd1, 0xef, 0x49, 0x61, 0x0d, - 0x63, 0xfe, 0xd6, 0xa0, 0x23, 0xcb, 0x6e, 0x6c, 0xc2, 0x1e, 0xac, 0xe5, 0x5d, 0x25, 0x69, 0x6d, - 0xed, 0xd1, 0x9a, 0xb5, 0x04, 0xf0, 0x15, 0xe8, 0x8e, 0x27, 0xe8, 0x75, 0xd9, 0x09, 0xdb, 0x73, - 0x98, 0x4f, 0x7d, 0x47, 0x10, 0x59, 0x5e, 0xcf, 0x6a, 0xe4, 0xd1, 0x84, 0x41, 0xfa, 0x23, 0xcb, - 0xd6, 0x66, 0x55, 0x56, 0x30, 0x7c, 0x01, 0xbd, 0x92, 0xef, 0x0c, 0xdb, 0xa3, 0xfe, 0xe1, 0xc3, - 0xb1, 0x92, 0xc4, 0xb1, 0xda, 0x72, 0xab, 0x94, 0x9a, 0x6f, 0xe0, 0xc1, 0x27, 0xca, 0xb3, 0x49, - 0xf3, 0x62, 0xcc, 0x75, 0xe3, 0xd4, 0x1a, 0xc6, 0xf9, 0x16, 0x50, 0x35, 0xc8, 0xf2, 0x83, 0x07, - 0xd0, 0x15, 0x12, 0xd1, 0x35, 0x59, 0x0b, 0xae, 0xd6, 0x62, 0xe5, 0x0a, 0x73, 0x03, 0x06, 0xe7, - 0xc2, 0x11, 0xc5, 0xed, 0xe6, 0x8f, 0x16, 0xac, 0xe7, 0x40, 0xee, 0xf6, 0x3f, 0x12, 0x32, 0x06, - 0x4c, 0xe1, 0x0b, 0x87, 0x06, 0xc4, 0xbf, 0x13, 0x94, 0x1a, 0x06, 0x9f, 0xc3, 0x8e, 0xda, 0x6f, - 0xdb, 0xf1, 0xae, 0x12, 0x1a, 0x13, 0x3f, 0x1f, 0x46, 0x3d, 0x89, 0x2f, 0x61, 0xb7, 0x42, 0x90, - 0x9b, 0x99, 0x93, 0x70, 0x41, 0x7c, 0xbd, 0x23, 0xbf, 0xd6, 0xc0, 0x9a, 0x9b, 0xb0, 0x7e, 0x16, - 0x06, 0xd4, 0xbb, 0x2d, 0x9a, 0xe2, 0xc2, 0x46, 0x01, 0x2c, 0x9b, 0x92, 0xe6, 0x39, 0x89, 0xd2, - 0x88, 0x94, 0x4d, 0x51, 0xa0, 0x86, 0x88, 0xb7, 0x9a, 0x22, 0x7e, 0xf8, 0xab, 0x0d, 0x5b, 0x5f, - 0x1d, 0xe1, 0xcd, 0xe4, 0x60, 0x4e, 0xe4, 0xb8, 0xf0, 0x14, 0x7a, 0xc5, 0xf2, 0xc1, 0xbd, 0xca, - 0x14, 0xef, 0x2c, 0x36, 0xe3, 0x71, 0x03, 0x9b, 0xd7, 0x7b, 0x06, 0x7d, 0x65, 0xd3, 0xe0, 0x7e, - 0x45, 0xbd, 0xba, 0xcb, 0x8c, 0x61, 0xb3, 0x20, 0x77, 0xfc, 0x0c, 0xb0, 0x8c, 0x1e, 0x3e, 0xa9, - 0xe8, 0x57, 0x42, 0x6d, 0xec, 0x37, 0xf2, 0xb9, 0xdd, 0x7b, 0x18, 0xa8, 0x3b, 0x0f, 0xab, 0x05, - 0xd4, 0xac, 0x43, 0xa3, 0x26, 0xd5, 0xf8, 0x1a, 0x3a, 0x32, 0xbc, 0x58, 0xfd, 0xfb, 0xa9, 0x09, - 0x37, 0x8c, 0x3a, 0x2a, 0xaf, 0xe2, 0x18, 0xba, 0xd9, 0xa0, 0xb1, 0xaa, 0xaa, 0xc4, 0xc1, 0x78, - 0x54, 0xcb, 0x65, 0x16, 0xef, 0x8e, 0xbe, 0x3f, 0x9b, 0x52, 0x31, 0x4b, 0xdc, 0xb1, 0x17, 0xce, - 0x27, 0x01, 0x9d, 0xce, 0x04, 0xa3, 0x6c, 0xca, 0x88, 0x58, 0x84, 0xf1, 0xe5, 0x24, 0x60, 0xfe, - 0x24, 0x60, 0xea, 0xcb, 0x15, 0x47, 0x9e, 0xdb, 0x95, 0xaf, 0xd7, 0xd1, 0x9f, 0x00, 0x00, 0x00, - 0xff, 0xff, 0xdd, 0x33, 0x97, 0x54, 0xdb, 0x06, 0x00, 0x00, + // 682 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0x4d, 0x6f, 0xd3, 0x40, + 0x10, 0x95, 0x1b, 0x12, 0xd2, 0x49, 0xda, 0xa4, 0x1b, 0x5a, 0x19, 0x53, 0x48, 0xf0, 0x29, 0x7c, + 0x28, 0x11, 0x2d, 0x48, 0x9c, 0x2a, 0xda, 0x42, 0x2b, 0x24, 0x90, 0x22, 0x17, 0x04, 0xe2, 0x80, + 0xb5, 0xb1, 0xb7, 0x89, 0x55, 0x7b, 0xed, 0x7a, 0xd7, 0x4d, 0xf2, 0xa3, 0xf8, 0x19, 0xfc, 0x00, + 0xfe, 0x0d, 0x47, 0xe4, 0xf5, 0xda, 0xb1, 0x1b, 0x47, 0x1c, 0xe0, 0x16, 0xcf, 0x7b, 0xfb, 0x3c, + 0x7e, 0xf3, 0x32, 0x0b, 0xda, 0x8c, 0x5b, 0xae, 0x43, 0x28, 0x0f, 0x03, 0x6b, 0x98, 0xfe, 0x1e, + 0x04, 0xa1, 0xcf, 0x7d, 0xd4, 0xc8, 0x61, 0xfa, 0x29, 0xb4, 0x8e, 0x6d, 0xfb, 0x93, 0x3f, 0x23, + 0xa1, 0x41, 0xae, 0x23, 0xc2, 0x38, 0xda, 0x83, 0x5a, 0x10, 0x8d, 0xaf, 0xc8, 0x42, 0x55, 0x7a, + 0x4a, 0xbf, 0x69, 0xc8, 0x27, 0xa4, 0xc2, 0x5d, 0x6c, 0xdb, 0x21, 0x61, 0x4c, 0xdd, 0xe8, 0x29, + 0xfd, 0x4d, 0x23, 0x7d, 0xd4, 0x11, 0xb4, 0x97, 0x22, 0x2c, 0xf0, 0x29, 0x23, 0xfa, 0x19, 0x20, + 0x83, 0x78, 0xfe, 0x0d, 0xf9, 0x47, 0xed, 0x5d, 0xe8, 0x14, 0x74, 0xa4, 0xfc, 0x57, 0xe8, 0x9c, + 0x13, 0x2e, 0x6a, 0xef, 0xe9, 0xa5, 0xff, 0x37, 0xfd, 0x27, 0xd0, 0x76, 0xa8, 0xe5, 0x46, 0x36, + 0x31, 0x19, 0x61, 0xcc, 0xf1, 0x69, 0xf2, 0xa2, 0xba, 0xd1, 0x92, 0xf5, 0x0b, 0x59, 0xd6, 0x7f, + 0x28, 0xd0, 0x14, 0xba, 0xb2, 0x82, 0xba, 0xd0, 0xa0, 0x91, 0x67, 0x8e, 0xb1, 0x75, 0x15, 0x05, + 0x4c, 0x08, 0x6f, 0x19, 0x40, 0x23, 0xef, 0x24, 0xa9, 0xa0, 0x01, 0x74, 0x62, 0x42, 0x40, 0xa8, + 0xed, 0xd0, 0x49, 0x46, 0xdc, 0x10, 0xc4, 0x1d, 0x1a, 0x79, 0xa3, 0x04, 0x49, 0xf9, 0x5d, 0x68, + 0x78, 0x78, 0x9e, 0xf1, 0x2a, 0x89, 0xa0, 0x87, 0xe7, 0x29, 0xe1, 0x19, 0x20, 0x36, 0x23, 0x24, + 0x30, 0x19, 0xe6, 0x66, 0x40, 0x42, 0x73, 0xbc, 0xe0, 0x44, 0xbd, 0x23, 0x78, 0x2d, 0x81, 0x5c, + 0x60, 0x3e, 0x22, 0xe1, 0xc9, 0x82, 0x13, 0xfd, 0x97, 0x02, 0x55, 0xd1, 0xef, 0xda, 0x8f, 0xdf, + 0x87, 0x4d, 0xe9, 0x26, 0x89, 0xbb, 0xaa, 0xf4, 0x37, 0x8d, 0x65, 0x01, 0xbd, 0x06, 0x15, 0x5b, + 0xdc, 0xb9, 0xc9, 0x9c, 0x31, 0x2d, 0x4c, 0x6d, 0xc7, 0xc6, 0x9c, 0x88, 0xd6, 0xea, 0xc6, 0x5e, + 0x82, 0x4b, 0x3f, 0x4e, 0x53, 0x14, 0x3d, 0x86, 0x66, 0xfc, 0xdd, 0x99, 0xa1, 0x49, 0x83, 0xb1, + 0x59, 0xa9, 0x99, 0xe8, 0x15, 0xd4, 0x33, 0xb8, 0xda, 0xab, 0xf4, 0x1b, 0x07, 0xf7, 0x07, 0xb9, + 0xf8, 0x0d, 0xf2, 0x46, 0x1b, 0x19, 0x55, 0x3f, 0x82, 0x9d, 0x0f, 0x0e, 0x4b, 0xc6, 0xcb, 0xd2, + 0xd9, 0x96, 0xcd, 0x50, 0x29, 0x9f, 0xe1, 0x1b, 0x40, 0xf9, 0xf3, 0x49, 0x66, 0xd0, 0x53, 0xa8, + 0x71, 0x51, 0x51, 0x15, 0xd1, 0x0a, 0x5a, 0x6d, 0xc5, 0x90, 0x0c, 0x7d, 0x1b, 0x9a, 0x17, 0x1c, + 0xf3, 0xf4, 0xe5, 0xfa, 0x6f, 0x05, 0xb6, 0x64, 0x41, 0xaa, 0xfd, 0xf7, 0x58, 0x3c, 0x07, 0x14, + 0xf3, 0x2f, 0xb1, 0xe3, 0x12, 0xfb, 0x56, 0x3a, 0xda, 0x34, 0xf2, 0xce, 0x04, 0x90, 0xb2, 0x0f, + 0x60, 0x37, 0x6f, 0xbe, 0x89, 0xad, 0xeb, 0xc8, 0x09, 0x89, 0x2d, 0xa7, 0xd0, 0xc9, 0x4d, 0xe1, + 0x58, 0x42, 0xe8, 0x25, 0xec, 0x15, 0xce, 0x90, 0xf9, 0x14, 0x47, 0x8c, 0x13, 0x5b, 0xad, 0x8a, + 0x43, 0xf7, 0x72, 0x87, 0xde, 0xa5, 0x98, 0xde, 0x82, 0xad, 0x91, 0xef, 0x3a, 0xd6, 0x22, 0xf5, + 0xe2, 0x3b, 0x6c, 0xa7, 0x85, 0xa5, 0x17, 0x71, 0xa2, 0xa3, 0x20, 0xce, 0x45, 0xe6, 0x85, 0x87, + 0xe7, 0x9f, 0x93, 0xca, 0x9a, 0x44, 0x6f, 0x94, 0x26, 0xfa, 0xe0, 0x67, 0x05, 0xda, 0x5f, 0x30, + 0xb7, 0xa6, 0x62, 0x16, 0xa7, 0x62, 0x42, 0xe8, 0x1c, 0xea, 0xe9, 0x8e, 0x41, 0xfb, 0x85, 0xc1, + 0xdd, 0xda, 0x5f, 0xda, 0xc3, 0x35, 0xa8, 0xec, 0x75, 0x04, 0x8d, 0xdc, 0x42, 0x41, 0xdd, 0x02, + 0x7b, 0x75, 0x65, 0x69, 0xbd, 0xf5, 0x04, 0xa9, 0xf8, 0x11, 0x60, 0x99, 0x36, 0xf4, 0xa8, 0xc0, + 0x5f, 0x89, 0xb1, 0xd6, 0x5d, 0x8b, 0x4b, 0xb9, 0xb7, 0xd0, 0xcc, 0xaf, 0x36, 0x54, 0x6c, 0xa0, + 0x64, 0xeb, 0x69, 0x25, 0x41, 0x46, 0x47, 0x50, 0x15, 0x79, 0x45, 0xc5, 0x3f, 0x5c, 0x3e, 0xd4, + 0x9a, 0x56, 0x06, 0xc9, 0x2e, 0x8e, 0xa1, 0x96, 0x0c, 0x19, 0x15, 0x59, 0x85, 0x28, 0x68, 0x0f, + 0x4a, 0xb1, 0x44, 0xe2, 0xe4, 0xf0, 0xdb, 0x8b, 0x89, 0xc3, 0xa7, 0xd1, 0x78, 0x60, 0xf9, 0xde, + 0xd0, 0x75, 0x26, 0x53, 0x4e, 0x1d, 0x3a, 0xa1, 0x84, 0xcf, 0xfc, 0xf0, 0x6a, 0xe8, 0x52, 0x7b, + 0xe8, 0xd2, 0xfc, 0x05, 0x15, 0x06, 0xd6, 0xb8, 0x26, 0x2e, 0xa9, 0xc3, 0x3f, 0x01, 0x00, 0x00, + 0xff, 0xff, 0x5d, 0xba, 0x03, 0x17, 0xc2, 0x06, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/lnrpc/wtclientrpc/wtclient.proto b/lnrpc/wtclientrpc/wtclient.proto index fa2910e938..d264d4b170 100644 --- a/lnrpc/wtclientrpc/wtclient.proto +++ b/lnrpc/wtclientrpc/wtclient.proto @@ -6,10 +6,10 @@ option go_package = "github.com/lightningnetwork/lnd/lnrpc/wtclientrpc"; message AddTowerRequest { // The identifying public key of the watchtower to add. - bytes pubkey = 1 [json_name = "pubkey"]; + bytes pubkey = 1; // A network address the watchtower is reachable over. - string address = 2 [json_name = "address"]; + string address = 2; } message AddTowerResponse { @@ -17,14 +17,14 @@ message AddTowerResponse { message RemoveTowerRequest { // The identifying public key of the watchtower to remove. - bytes pubkey = 1 [json_name = "pubkey"]; + bytes pubkey = 1; /* If set, then the record for this address will be removed, indicating that is is stale. Otherwise, the watchtower will no longer be used for future session negotiations and backups. */ - string address = 2 [json_name = "address"]; + string address = 2; } message RemoveTowerResponse { @@ -32,10 +32,10 @@ message RemoveTowerResponse { message GetTowerInfoRequest { // The identifying public key of the watchtower to retrieve information for. - bytes pubkey = 1 [json_name = "pubkey"]; + bytes pubkey = 1; // Whether we should include sessions with the watchtower in the response. - bool include_sessions = 2 [json_name = "include_sessions"]; + bool include_sessions = 2; } message TowerSession { @@ -43,49 +43,49 @@ message TowerSession { The total number of successful backups that have been made to the watchtower session. */ - uint32 num_backups = 1 [json_name = "num_backups"]; + uint32 num_backups = 1; /* The total number of backups in the session that are currently pending to be acknowledged by the watchtower. */ - uint32 num_pending_backups = 2 [json_name = "num_pending_backups"]; + uint32 num_pending_backups = 2; // The maximum number of backups allowed by the watchtower session. - uint32 max_backups = 3 [json_name = "max_backups"]; + uint32 max_backups = 3; /* The fee rate, in satoshis per vbyte, that will be used by the watchtower for the justice transaction in the event of a channel breach. */ - uint32 sweep_sat_per_byte = 4 [json_name = "sweep_sat_per_byte"]; + uint32 sweep_sat_per_byte = 4; } message Tower { // The identifying public key of the watchtower. - bytes pubkey = 1 [json_name = "pubkey"]; + bytes pubkey = 1; // The list of addresses the watchtower is reachable over. - repeated string addresses = 2 [json_name = "addresses"]; + repeated string addresses = 2; // Whether the watchtower is currently a candidate for new sessions. - bool active_session_candidate = 3 [json_name = "active_session_candidate"]; + bool active_session_candidate = 3; // The number of sessions that have been negotiated with the watchtower. - uint32 num_sessions = 4 [json_name = "num_sessions"]; + uint32 num_sessions = 4; // The list of sessions that have been negotiated with the watchtower. - repeated TowerSession sessions = 5 [json_name = "sessions"]; + repeated TowerSession sessions = 5; } message ListTowersRequest { // Whether we should include sessions with the watchtower in the response. - bool include_sessions = 1 [json_name = "include_sessions"]; + bool include_sessions = 1; } message ListTowersResponse { // The list of watchtowers available for new backups. - repeated Tower towers = 1 [json_name = "towers"]; + repeated Tower towers = 1; } message StatsRequest { @@ -96,25 +96,25 @@ message StatsResponse { The total number of backups made to all active and exhausted watchtower sessions. */ - uint32 num_backups = 1 [json_name = "num_backups"]; + uint32 num_backups = 1; /* The total number of backups that are pending to be acknowledged by all active and exhausted watchtower sessions. */ - uint32 num_pending_backups = 2 [json_name = "num_pending_backups"]; + uint32 num_pending_backups = 2; /* The total number of backups that all active and exhausted watchtower sessions have failed to acknowledge. */ - uint32 num_failed_backups = 3 [json_name = "num_failed_backups"]; + uint32 num_failed_backups = 3; // The total number of new sessions made to watchtowers. - uint32 num_sessions_acquired = 4 [json_name = "num_sessions_acquired"]; + uint32 num_sessions_acquired = 4; // The total number of watchtower sessions that have been exhausted. - uint32 num_sessions_exhausted = 5 [json_name = "num_sessions_exhausted"]; + uint32 num_sessions_exhausted = 5; } message PolicyRequest { @@ -125,13 +125,13 @@ message PolicyResponse { The maximum number of updates each session we negotiate with watchtowers should allow. */ - uint32 max_updates = 1 [json_name = "max_updates"]; + uint32 max_updates = 1; /* The fee rate, in satoshis per vbyte, that will be used by watchtowers for justice transactions in response to channel breaches. */ - uint32 sweep_sat_per_byte = 2 [json_name = "sweep_sat_per_byte"]; + uint32 sweep_sat_per_byte = 2; } service WatchtowerClient { @@ -141,7 +141,7 @@ service WatchtowerClient { any new addresses included will be considered when dialing it for session negotiations and backups. */ - rpc AddTower(AddTowerRequest) returns (AddTowerResponse); + rpc AddTower (AddTowerRequest) returns (AddTowerResponse); /* RemoveTower removes a watchtower from being considered for future session @@ -149,17 +149,17 @@ service WatchtowerClient { again. If an address is provided, then this RPC only serves as a way of removing the address from the watchtower instead. */ - rpc RemoveTower(RemoveTowerRequest) returns (RemoveTowerResponse); + rpc RemoveTower (RemoveTowerRequest) returns (RemoveTowerResponse); - // ListTowers returns the list of watchtowers registered with the client. - rpc ListTowers(ListTowersRequest) returns (ListTowersResponse); + // ListTowers returns the list of watchtowers registered with the client. + rpc ListTowers (ListTowersRequest) returns (ListTowersResponse); // GetTowerInfo retrieves information for a registered watchtower. - rpc GetTowerInfo(GetTowerInfoRequest) returns (Tower); + rpc GetTowerInfo (GetTowerInfoRequest) returns (Tower); // Stats returns the in-memory statistics of the client since startup. - rpc Stats(StatsRequest) returns (StatsResponse); + rpc Stats (StatsRequest) returns (StatsResponse); // Policy returns the active watchtower client policy configuration. - rpc Policy(PolicyRequest) returns (PolicyResponse); + rpc Policy (PolicyRequest) returns (PolicyResponse); } diff --git a/lntest/harness.go b/lntest/harness.go index 3f742c7552..045fd4780e 100644 --- a/lntest/harness.go +++ b/lntest/harness.go @@ -13,17 +13,17 @@ import ( "sync" "time" - "google.golang.org/grpc/grpclog" - "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/integration/rpctest" "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcutil" + "github.com/lightningnetwork/lnd" "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lnwire" + "google.golang.org/grpc/grpclog" ) // DefaultCSV is the CSV delay (remotedelay) we will start our test nodes with. @@ -35,6 +35,10 @@ const DefaultCSV = 4 type NetworkHarness struct { netParams *chaincfg.Params + // lndBinary is the full path to the lnd binary that was specifically + // compiled with all required itest flags. + lndBinary string + // Miner is a reference to a running full node that can be used to create // new blocks on the network. Miner *rpctest.Harness @@ -68,7 +72,9 @@ type NetworkHarness struct { // TODO(roasbeef): add option to use golang's build library to a binary of the // current repo. This will save developers from having to manually `go install` // within the repo each time before changes -func NewNetworkHarness(r *rpctest.Harness, b BackendConfig) (*NetworkHarness, error) { +func NewNetworkHarness(r *rpctest.Harness, b BackendConfig, lndBinary string) ( + *NetworkHarness, error) { + n := NetworkHarness{ activeNodes: make(map[int]*HarnessNode), nodesByPub: make(map[string]*HarnessNode), @@ -79,6 +85,7 @@ func NewNetworkHarness(r *rpctest.Harness, b BackendConfig) (*NetworkHarness, er Miner: r, BackendCfg: b, quit: make(chan struct{}), + lndBinary: lndBinary, } go n.networkWatcher() return &n, nil @@ -343,7 +350,7 @@ func (n *NetworkHarness) RestoreNodeWithSeed(name string, extraArgs []string, func (n *NetworkHarness) newNode(name string, extraArgs []string, hasSeed bool, password []byte) (*HarnessNode, error) { - node, err := newNode(nodeConfig{ + node, err := newNode(NodeConfig{ Name: name, HasSeed: hasSeed, Password: password, @@ -361,14 +368,14 @@ func (n *NetworkHarness) newNode(name string, extraArgs []string, n.activeNodes[node.NodeID] = node n.mtx.Unlock() - if err := node.start(n.lndErrorChan); err != nil { + if err := node.start(n.lndBinary, n.lndErrorChan); err != nil { return nil, err } // If this node is to have a seed, it will need to be unlocked or // initialized via rpc. Delay registering it with the network until it // can be driven via an unlocked rpc connection. - if node.cfg.HasSeed { + if node.Cfg.HasSeed { return node, nil } @@ -395,7 +402,7 @@ func (n *NetworkHarness) connect(ctx context.Context, tryconnect: if _, err := a.ConnectPeer(ctx, req); err != nil { // If the chain backend is still syncing, retry. - if strings.Contains(err.Error(), "still syncing") { + if err == lnd.ErrServerNotActive { select { case <-time.After(100 * time.Millisecond): goto tryconnect @@ -431,7 +438,7 @@ func (n *NetworkHarness) EnsureConnected(ctx context.Context, a, b *HarnessNode) req := &lnrpc.ConnectPeerRequest{ Addr: &lnrpc.LightningAddress{ Pubkey: bInfo.IdentityPubkey, - Host: b.cfg.P2PAddr(), + Host: b.Cfg.P2PAddr(), }, } @@ -536,7 +543,7 @@ func (n *NetworkHarness) ConnectNodes(ctx context.Context, a, b *HarnessNode) er req := &lnrpc.ConnectPeerRequest{ Addr: &lnrpc.LightningAddress{ Pubkey: bobInfo.IdentityPubkey, - Host: b.cfg.P2PAddr(), + Host: b.Cfg.P2PAddr(), }, } @@ -612,20 +619,20 @@ func (n *NetworkHarness) RestartNode(node *HarnessNode, callback func() error, } } - if err := node.start(n.lndErrorChan); err != nil { + if err := node.start(n.lndBinary, n.lndErrorChan); err != nil { return err } // If the node doesn't have a password set, then we can exit here as we // don't need to unlock it. - if len(node.cfg.Password) == 0 { + if len(node.Cfg.Password) == 0 { return nil } // Otherwise, we'll unlock the wallet, then complete the final steps // for the node initialization process. unlockReq := &lnrpc.UnlockWalletRequest{ - WalletPassword: node.cfg.Password, + WalletPassword: node.Cfg.Password, } if len(chanBackups) != 0 { unlockReq.ChannelBackups = chanBackups[0] @@ -643,7 +650,7 @@ func (n *NetworkHarness) SuspendNode(node *HarnessNode) (func() error, error) { } restart := func() error { - return node.start(n.lndErrorChan) + return node.start(n.lndBinary, n.lndErrorChan) } return restart, nil @@ -687,13 +694,13 @@ func saveProfilesPage(node *HarnessNode) error { resp, err := http.Get( fmt.Sprintf( "http://localhost:%d/debug/pprof/goroutine?debug=1", - node.cfg.ProfilePort, + node.Cfg.ProfilePort, ), ) if err != nil { return fmt.Errorf("Failed to get profile page "+ "(node_id=%d, name=%s): %v\n", - node.NodeID, node.cfg.Name, err) + node.NodeID, node.Cfg.Name, err) } defer resp.Body.Close() @@ -701,11 +708,11 @@ func saveProfilesPage(node *HarnessNode) error { if err != nil { return fmt.Errorf("Failed to read profile page "+ "(node_id=%d, name=%s): %v\n", - node.NodeID, node.cfg.Name, err) + node.NodeID, node.Cfg.Name, err) } fileName := fmt.Sprintf( - "pprof-%d-%s-%s.log", node.NodeID, node.cfg.Name, + "pprof-%d-%s-%s.log", node.NodeID, node.Cfg.Name, hex.EncodeToString(node.PubKey[:logPubKeyBytes]), ) @@ -713,7 +720,7 @@ func saveProfilesPage(node *HarnessNode) error { if err != nil { return fmt.Errorf("Failed to create file for profile page "+ "(node_id=%d, name=%s): %v\n", - node.NodeID, node.cfg.Name, err) + node.NodeID, node.Cfg.Name, err) } defer logFile.Close() @@ -721,7 +728,7 @@ func saveProfilesPage(node *HarnessNode) error { if err != nil { return fmt.Errorf("Failed to save profile page "+ "(node_id=%d, name=%s): %v\n", - node.NodeID, node.cfg.Name, err) + node.NodeID, node.Cfg.Name, err) } return nil } @@ -844,6 +851,10 @@ type OpenChannelParams struct { // MinHtlc is the htlc_minimum_msat value set when opening the channel. MinHtlc lnwire.MilliSatoshi + + // FundingShim is an optional funding shim that the caller can specify + // in order to modify the channel funding workflow. + FundingShim *lnrpc.FundingShim } // OpenChannel attempts to open a channel between srcNode and destNode with the @@ -879,6 +890,7 @@ func (n *NetworkHarness) OpenChannel(ctx context.Context, MinConfs: minConfs, SpendUnconfirmed: p.SpendUnconfirmed, MinHtlcMsat: int64(p.MinHtlc), + FundingShim: p.FundingShim, } respStream, err := srcNode.OpenChannel(ctx, openReq) @@ -1195,28 +1207,25 @@ func (n *NetworkHarness) AssertChannelExists(ctx context.Context, req := &lnrpc.ListChannelsRequest{} - var predErr error - pred := func() bool { + return wait.NoError(func() error { resp, err := node.ListChannels(ctx, req) if err != nil { - predErr = fmt.Errorf("unable fetch node's channels: %v", err) - return false + return fmt.Errorf("unable fetch node's channels: %v", err) } for _, channel := range resp.Channels { if channel.ChannelPoint == chanPoint.String() { - return channel.Active - } + if channel.Active { + return nil + } + return fmt.Errorf("channel %s inactive", + chanPoint) + } } - return false - } - if err := wait.Predicate(pred, time.Second*15); err != nil { - return fmt.Errorf("channel not found: %v", predErr) - } - - return nil + return fmt.Errorf("channel %s not found", chanPoint) + }, 15*time.Second) } // DumpLogs reads the current logs generated by the passed node, and returns @@ -1225,7 +1234,7 @@ func (n *NetworkHarness) AssertChannelExists(ctx context.Context, // Logs from lightning node being generated with delay - you should // add time.Sleep() in order to get all logs. func (n *NetworkHarness) DumpLogs(node *HarnessNode) (string, error) { - logFile := fmt.Sprintf("%v/simnet/lnd.log", node.cfg.LogDir) + logFile := fmt.Sprintf("%v/simnet/lnd.log", node.Cfg.LogDir) buf, err := ioutil.ReadFile(logFile) if err != nil { @@ -1322,7 +1331,7 @@ func (n *NetworkHarness) sendCoins(ctx context.Context, amt btcutil.Amount, err = wait.NoError(func() error { // Since neutrino doesn't support unconfirmed outputs, skip // this check. - if target.cfg.BackendCfg.Name() == "neutrino" { + if target.Cfg.BackendCfg.Name() == "neutrino" { return nil } diff --git a/lntest/itest/lnd_mpp_test.go b/lntest/itest/lnd_mpp_test.go new file mode 100644 index 0000000000..0b015b5919 --- /dev/null +++ b/lntest/itest/lnd_mpp_test.go @@ -0,0 +1,398 @@ +// +build rpctest + +package itest + +import ( + "bytes" + "context" + "fmt" + "time" + + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" + "github.com/lightningnetwork/lnd" + "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/lnrpc/routerrpc" + "github.com/lightningnetwork/lnd/lntest" + "github.com/lightningnetwork/lnd/routing/route" +) + +// testSendToRouteMultiPath tests that we are able to successfully route a +// payment using multiple shards across different paths, by using SendToRoute. +func testSendToRouteMultiPath(net *lntest.NetworkHarness, t *harnessTest) { + ctxb := context.Background() + + ctx := newMppTestContext(t, net) + defer ctx.shutdownNodes() + + // To ensure the payment goes through seperate paths, we'll set a + // channel size that can only carry one shard at a time. We'll divide + // the payment into 3 shards. + const ( + paymentAmt = btcutil.Amount(300000) + shardAmt = paymentAmt / 3 + chanAmt = shardAmt * 3 / 2 + ) + + // Set up a network with three different paths Alice <-> Bob. + // _ Eve _ + // / \ + // Alice -- Carol ---- Bob + // \ / + // \__ Dave ____/ + // + ctx.openChannel(ctx.carol, ctx.bob, chanAmt) + ctx.openChannel(ctx.dave, ctx.bob, chanAmt) + ctx.openChannel(ctx.alice, ctx.dave, chanAmt) + ctx.openChannel(ctx.eve, ctx.bob, chanAmt) + ctx.openChannel(ctx.carol, ctx.eve, chanAmt) + + // Since the channel Alice-> Carol will have to carry two + // shards, we make it larger. + ctx.openChannel(ctx.alice, ctx.carol, chanAmt+shardAmt) + + defer ctx.closeChannels() + + ctx.waitForChannels() + + // Make Bob create an invoice for Alice to pay. + payReqs, rHashes, invoices, err := createPayReqs( + net.Bob, paymentAmt, 1, + ) + if err != nil { + t.Fatalf("unable to create pay reqs: %v", err) + } + + rHash := rHashes[0] + payReq := payReqs[0] + + ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) + decodeResp, err := net.Bob.DecodePayReq( + ctxt, &lnrpc.PayReqString{PayReq: payReq}, + ) + if err != nil { + t.Fatalf("decode pay req: %v", err) + } + + payAddr := decodeResp.PaymentAddr + + // Helper function for Alice to build a route from pubkeys. + buildRoute := func(amt btcutil.Amount, hops []*lntest.HarnessNode) ( + *lnrpc.Route, error) { + + rpcHops := make([][]byte, 0, len(hops)) + for _, hop := range hops { + k := hop.PubKeyStr + pubkey, err := route.NewVertexFromStr(k) + if err != nil { + return nil, fmt.Errorf("error parsing %v: %v", + k, err) + } + rpcHops = append(rpcHops, pubkey[:]) + } + + req := &routerrpc.BuildRouteRequest{ + AmtMsat: int64(amt * 1000), + FinalCltvDelta: lnd.DefaultBitcoinTimeLockDelta, + HopPubkeys: rpcHops, + } + + ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) + routeResp, err := net.Alice.RouterClient.BuildRoute(ctxt, req) + if err != nil { + return nil, err + } + + return routeResp.Route, nil + } + + // We'll send shards along three routes from Alice. + sendRoutes := [][]*lntest.HarnessNode{ + {ctx.carol, ctx.bob}, + {ctx.dave, ctx.bob}, + {ctx.carol, ctx.eve, ctx.bob}, + } + + responses := make(chan *routerrpc.SendToRouteResponse, len(sendRoutes)) + for _, hops := range sendRoutes { + // Build a route for the specified hops. + r, err := buildRoute(shardAmt, hops) + if err != nil { + t.Fatalf("unable to build route: %v", err) + } + + // Set the MPP records to indicate this is a payment shard. + hop := r.Hops[len(r.Hops)-1] + hop.TlvPayload = true + hop.MppRecord = &lnrpc.MPPRecord{ + PaymentAddr: payAddr, + TotalAmtMsat: int64(paymentAmt * 1000), + } + + // Send the shard. + sendReq := &routerrpc.SendToRouteRequest{ + PaymentHash: rHash, + Route: r, + } + + // We'll send all shards in their own goroutine, since SendToRoute will + // block as long as the payment is in flight. + go func() { + ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) + resp, err := net.Alice.RouterClient.SendToRoute(ctxt, sendReq) + if err != nil { + t.Fatalf("unable to send payment: %v", err) + } + + responses <- resp + }() + } + + // Wait for all responses to be back, and check that they all + // succeeded. + for range sendRoutes { + var resp *routerrpc.SendToRouteResponse + select { + case resp = <-responses: + case <-time.After(defaultTimeout): + t.Fatalf("response not received") + } + + if resp.Failure != nil { + t.Fatalf("received payment failure : %v", resp.Failure) + } + + // All shards should come back with the preimage. + if !bytes.Equal(resp.Preimage, invoices[0].RPreimage) { + t.Fatalf("preimage doesn't match") + } + } + + // assertNumHtlcs is a helper that checks the node's latest payment, + // and asserts it was split into num shards. + assertNumHtlcs := func(node *lntest.HarnessNode, num int) { + req := &lnrpc.ListPaymentsRequest{ + IncludeIncomplete: true, + } + ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) + paymentsResp, err := node.ListPayments(ctxt, req) + if err != nil { + t.Fatalf("error when obtaining payments: %v", + err) + } + + payments := paymentsResp.Payments + if len(payments) == 0 { + t.Fatalf("no payments found") + } + + payment := payments[len(payments)-1] + htlcs := payment.Htlcs + if len(htlcs) == 0 { + t.Fatalf("no htlcs") + } + + succeeded := 0 + for _, htlc := range htlcs { + if htlc.Status == lnrpc.HTLCAttempt_SUCCEEDED { + succeeded++ + } + } + + if succeeded != num { + t.Fatalf("expected %v succussful HTLCs, got %v", num, + succeeded) + } + } + + // assertSettledInvoice checks that the invoice for the given payment + // hash is settled, and has been paid using num HTLCs. + assertSettledInvoice := func(node *lntest.HarnessNode, rhash []byte, + num int) { + + found := false + offset := uint64(0) + for !found { + ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) + invoicesResp, err := node.ListInvoices( + ctxt, &lnrpc.ListInvoiceRequest{ + IndexOffset: offset, + }, + ) + if err != nil { + t.Fatalf("error when obtaining payments: %v", + err) + } + + if len(invoicesResp.Invoices) == 0 { + break + } + + for _, inv := range invoicesResp.Invoices { + if !bytes.Equal(inv.RHash, rhash) { + continue + } + + // Assert that the amount paid to the invoice is + // correct. + if inv.AmtPaidSat != int64(paymentAmt) { + t.Fatalf("incorrect payment amt for "+ + "invoicewant: %d, got %d", + paymentAmt, inv.AmtPaidSat) + } + + if inv.State != lnrpc.Invoice_SETTLED { + t.Fatalf("Invoice not settled: %v", + inv.State) + } + + if len(inv.Htlcs) != num { + t.Fatalf("expected invoice to be "+ + "settled with %v HTLCs, had %v", + num, len(inv.Htlcs)) + } + + found = true + break + } + + offset = invoicesResp.LastIndexOffset + } + + if !found { + t.Fatalf("invoice not found") + } + } + + // Finally check that the payment shows up with three settled HTLCs in + // Alice's list of payments... + assertNumHtlcs(net.Alice, 3) + + // ...and in Bob's list of paid invoices. + assertSettledInvoice(net.Bob, rHash, 3) +} + +type mppTestContext struct { + t *harnessTest + net *lntest.NetworkHarness + + // Keep a list of all our active channels. + networkChans []*lnrpc.ChannelPoint + closeChannelFuncs []func() + + alice, bob, carol, dave, eve *lntest.HarnessNode + nodes []*lntest.HarnessNode +} + +func newMppTestContext(t *harnessTest, + net *lntest.NetworkHarness) *mppTestContext { + + ctxb := context.Background() + + // Create a five-node context consisting of Alice, Bob and three new + // nodes. + carol, err := net.NewNode("carol", nil) + if err != nil { + t.Fatalf("unable to create carol: %v", err) + } + + dave, err := net.NewNode("dave", nil) + if err != nil { + t.Fatalf("unable to create dave: %v", err) + } + + eve, err := net.NewNode("eve", nil) + if err != nil { + t.Fatalf("unable to create eve: %v", err) + } + + // Connect nodes to ensure propagation of channels. + nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol, dave, eve} + for i := 0; i < len(nodes); i++ { + for j := i + 1; j < len(nodes); j++ { + ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) + if err := net.EnsureConnected(ctxt, nodes[i], nodes[j]); err != nil { + t.Fatalf("unable to connect nodes: %v", err) + } + } + } + + ctx := mppTestContext{ + t: t, + net: net, + alice: net.Alice, + bob: net.Bob, + carol: carol, + dave: dave, + eve: eve, + nodes: nodes, + } + + return &ctx +} + +// openChannel is a helper to open a channel from->to. +func (c *mppTestContext) openChannel(from, to *lntest.HarnessNode, chanSize btcutil.Amount) { + ctxb := context.Background() + + ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) + err := c.net.SendCoins(ctxt, btcutil.SatoshiPerBitcoin, from) + if err != nil { + c.t.Fatalf("unable to send coins : %v", err) + } + + ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) + chanPoint := openChannelAndAssert( + ctxt, c.t, c.net, from, to, + lntest.OpenChannelParams{ + Amt: chanSize, + }, + ) + + c.closeChannelFuncs = append(c.closeChannelFuncs, func() { + ctxt, _ := context.WithTimeout(ctxb, channelCloseTimeout) + closeChannelAndAssert( + ctxt, c.t, c.net, from, chanPoint, false, + ) + }) + + c.networkChans = append(c.networkChans, chanPoint) +} + +func (c *mppTestContext) closeChannels() { + for _, f := range c.closeChannelFuncs { + f() + } +} + +func (c *mppTestContext) shutdownNodes() { + shutdownAndAssert(c.net, c.t, c.carol) + shutdownAndAssert(c.net, c.t, c.dave) + shutdownAndAssert(c.net, c.t, c.eve) +} + +func (c *mppTestContext) waitForChannels() { + ctxb := context.Background() + + // Wait for all nodes to have seen all channels. + for _, chanPoint := range c.networkChans { + for _, node := range c.nodes { + txid, err := lnd.GetChanPointFundingTxid(chanPoint) + if err != nil { + c.t.Fatalf("unable to get txid: %v", err) + } + point := wire.OutPoint{ + Hash: *txid, + Index: chanPoint.OutputIndex, + } + + ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) + err = node.WaitForNetworkChannelOpen(ctxt, chanPoint) + if err != nil { + c.t.Fatalf("(%d): timeout waiting for "+ + "channel(%s) open: %v", + node.NodeID, point, err) + } + } + } +} diff --git a/lntest/itest/lnd_multi-hop_htlc_local_chain_claim_test.go b/lntest/itest/lnd_multi-hop_htlc_local_chain_claim_test.go index 0b155c9776..16ac398763 100644 --- a/lntest/itest/lnd_multi-hop_htlc_local_chain_claim_test.go +++ b/lntest/itest/lnd_multi-hop_htlc_local_chain_claim_test.go @@ -18,17 +18,19 @@ import ( ) // testMultiHopHtlcLocalChainClaim tests that in a multi-hop HTLC scenario, if -// we're forced to go to chain with an incoming HTLC, then when we find out the -// preimage via the witness beacon, we properly settle the HTLC on-chain in -// order to ensure we don't lose any funds. -func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest) { +// we force close a channel with an incoming HTLC, and later find out the +// preimage via the witness beacon, we properly settle the HTLC on-chain using +// the HTLC success transaction in order to ensure we don't lose any funds. +func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest, + alice, bob *lntest.HarnessNode, c commitType) { + ctxb := context.Background() // First, we'll create a three hop network: Alice -> Bob -> Carol, with // Carol refusing to actually settle or directly cancel any HTLC's // self. aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork( - t, net, false, + t, net, alice, bob, false, c, ) // Clean up carol's node when the test finishes. @@ -59,7 +61,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest) ctx, cancel := context.WithCancel(ctxb) defer cancel() - alicePayStream, err := net.Alice.SendPayment(ctx) + alicePayStream, err := alice.SendPayment(ctx) if err != nil { t.Fatalf("unable to create payment stream for alice: %v", err) } @@ -73,7 +75,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest) // At this point, all 3 nodes should now have an active channel with // the created HTLC pending on all of them. var predErr error - nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol} + nodes := []*lntest.HarnessNode{alice, bob, carol} err = wait.Predicate(func() bool { predErr = assertActiveHtlcs(nodes, payHash[:]) if predErr != nil { @@ -94,18 +96,25 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest) // At this point, Bob decides that he wants to exit the channel // immediately, so he force closes his commitment transaction. ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - bobForceClose := closeChannelAndAssert(ctxt, t, net, net.Bob, - aliceChanPoint, true) + bobForceClose := closeChannelAndAssertType(ctxt, t, net, bob, + aliceChanPoint, c == commitTypeAnchors, true) - // Alice will sweep her output immediately. - _, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) + // Alice will sweep her commitment output immediately. If there are + // anchors, Alice will also sweep hers. + expectedTxes := 1 + if c == commitTypeAnchors { + expectedTxes = 2 + } + _, err = waitForNTxsInMempool( + net.Miner.Node, expectedTxes, minerMempoolTimeout, + ) if err != nil { t.Fatalf("unable to find alice's sweep tx in miner mempool: %v", err) } // Suspend Bob to force Carol to go to chain. - restartBob, err := net.SuspendNode(net.Bob) + restartBob, err := net.SuspendNode(bob) if err != nil { t.Fatalf("unable to suspend bob: %v", err) } @@ -133,8 +142,11 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest) t.Fatalf("unable to generate blocks") } - // Carol's commitment transaction should now be in the mempool. - txids, err := waitForNTxsInMempool(net.Miner.Node, 1, minerMempoolTimeout) + // Carol's commitment transaction should now be in the mempool. If there + // is an anchor, Carol will sweep that too. + _, err = waitForNTxsInMempool( + net.Miner.Node, expectedTxes, minerMempoolTimeout, + ) if err != nil { t.Fatalf("transactions not found in mempool: %v", err) } @@ -147,63 +159,61 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest) Index: bobChanPoint.OutputIndex, } - // The tx should be spending from the funding transaction, - commitHash := txids[0] - tx1, err := net.Miner.Node.GetRawTransaction(commitHash) - if err != nil { - t.Fatalf("unable to get txn: %v", err) - } - if tx1.MsgTx().TxIn[0].PreviousOutPoint != carolFundingPoint { - t.Fatalf("commit transaction not spending fundingtx: %v", - spew.Sdump(tx1)) - } + // Look up the closing transaction. It should be spending from the + // funding transaction, + closingTx := getSpendingTxInMempool( + t, net.Miner.Node, minerMempoolTimeout, carolFundingPoint, + ) + closingTxid := closingTx.TxHash() - // Mine a block that should confirm the commit tx. - block := mineBlocks(t, net, 1, 1)[0] - if len(block.Transactions) != 2 { - t.Fatalf("expected 2 transactions in block, got %v", - len(block.Transactions)) + // Mine a block that should confirm the commit tx, the anchor if present + // and the coinbase. + block := mineBlocks(t, net, 1, expectedTxes)[0] + if len(block.Transactions) != expectedTxes+1 { + t.Fatalf("expected %v transactions in block, got %v", + expectedTxes+1, len(block.Transactions)) } - assertTxInBlock(t, block, commitHash) + assertTxInBlock(t, block, &closingTxid) // Restart bob again. if err := restartBob(); err != nil { t.Fatalf("unable to restart bob: %v", err) } - // After the force close transacion is mined, Carol should broadcast - // her second level HTLC transacion. Bob will broadcast a sweep tx to - // sweep his output in the channel with Carol. He can do this - // immediately, as the output is not timelocked since Carol was the one - // force closing. - commitSpends, err := waitForNTxsInMempool(net.Miner.Node, 2, - minerMempoolTimeout) + // After the force close transacion is mined, Carol should broadcast her + // second level HTLC transacion. Bob will broadcast a sweep tx to sweep + // his output in the channel with Carol. He can do this immediately, as + // the output is not timelocked since Carol was the one force closing. + // If there are anchors on the commitment, Bob will also sweep his + // anchor. + expectedTxes = 2 + if c == commitTypeAnchors { + expectedTxes = 3 + } + txes, err := getNTxsFromMempool( + net.Miner.Node, expectedTxes, minerMempoolTimeout, + ) if err != nil { t.Fatalf("transactions not found in mempool: %v", err) } // Both Carol's second level transaction and Bob's sweep should be // spending from the commitment transaction. - for _, txid := range commitSpends { - tx, err := net.Miner.Node.GetRawTransaction(txid) - if err != nil { - t.Fatalf("unable to get txn: %v", err) - } + assertAllTxesSpendFrom(t, txes, closingTxid) - if tx.MsgTx().TxIn[0].PreviousOutPoint.Hash != *commitHash { - t.Fatalf("tx did not spend from commitment tx") - } + // At this point we suspend Alice to make sure she'll handle the + // on-chain settle after a restart. + restartAlice, err := net.SuspendNode(alice) + if err != nil { + t.Fatalf("unable to suspend alice: %v", err) } // Mine a block to confirm the two transactions (+ the coinbase). - block = mineBlocks(t, net, 1, 2)[0] - if len(block.Transactions) != 3 { + block = mineBlocks(t, net, 1, expectedTxes)[0] + if len(block.Transactions) != expectedTxes+1 { t.Fatalf("expected 3 transactions in block, got %v", len(block.Transactions)) } - for _, txid := range commitSpends { - assertTxInBlock(t, block, txid) - } // Keep track of the second level tx maturity. carolSecondLevelCSV := uint32(defaultCSV) @@ -232,7 +242,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest) pendingChansRequest := &lnrpc.PendingChannelsRequest{} err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Bob.PendingChannels( + pendingChanResp, err := bob.PendingChannels( ctxt, pendingChansRequest, ) if err != nil { @@ -285,6 +295,12 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest) bobSecondLevelCSV := uint32(defaultCSV) carolSecondLevelCSV-- + // Now that the preimage from Bob has hit the chain, restart Alice to + // ensure she'll pick it up. + if err := restartAlice(); err != nil { + t.Fatalf("unable to restart alice: %v", err) + } + // If we then mine 3 additional blocks, Carol's second level tx should // mature, and she can pull the funds from it with a sweep tx. if _, err := net.Miner.Node.Generate(carolSecondLevelCSV); err != nil { @@ -324,7 +340,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest) err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Bob.PendingChannels( + pendingChanResp, err := bob.PendingChannels( ctxt, pendingChansRequest, ) if err != nil { @@ -339,7 +355,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest) } req := &lnrpc.ListChannelsRequest{} ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - chanInfo, err := net.Bob.ListChannels(ctxt, req) + chanInfo, err := bob.ListChannels(ctxt, req) if err != nil { predErr = fmt.Errorf("unable to query for open "+ "channels: %v", err) @@ -393,31 +409,14 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest) if err != nil { t.Fatalf(predErr.Error()) } -} -// waitForInvoiceAccepted waits until the specified invoice moved to the -// accepted state by the node. -func waitForInvoiceAccepted(t *harnessTest, node *lntest.HarnessNode, - payHash lntypes.Hash) { - - ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout) - defer cancel() - invoiceUpdates, err := node.SubscribeSingleInvoice(ctx, - &invoicesrpc.SubscribeSingleInvoiceRequest{ - RHash: payHash[:], - }, + // Finally, check that the Alice's payment is correctly marked + // succeeded. + ctxt, _ = context.WithTimeout(ctxt, defaultTimeout) + err = checkPaymentStatus( + ctxt, alice, preimage, lnrpc.Payment_SUCCEEDED, ) if err != nil { - t.Fatalf("subscribe single invoice: %v", err) - } - - for { - update, err := invoiceUpdates.Recv() - if err != nil { - t.Fatalf("invoice update err: %v", err) - } - if update.State == lnrpc.Invoice_ACCEPTED { - break - } + t.Fatalf(err.Error()) } } diff --git a/lntest/itest/lnd_multi-hop_htlc_local_timeout_test.go b/lntest/itest/lnd_multi-hop_htlc_local_timeout_test.go new file mode 100644 index 0000000000..1a7feed46d --- /dev/null +++ b/lntest/itest/lnd_multi-hop_htlc_local_timeout_test.go @@ -0,0 +1,289 @@ +// +build rpctest + +package itest + +import ( + "context" + "fmt" + "time" + + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" + "github.com/davecgh/go-spew/spew" + "github.com/lightningnetwork/lnd" + "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/lntest" + "github.com/lightningnetwork/lnd/lntest/wait" +) + +// testMultiHopHtlcLocalTimeout tests that in a multi-hop HTLC scenario, if the +// outgoing HTLC is about to time out, then we'll go to chain in order to claim +// it using the HTLC timeout transaction. Any dust HTLC's should be immediately +// canceled backwards. Once the timeout has been reached, then we should sweep +// it on-chain, and cancel the HTLC backwards. +func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest, + alice, bob *lntest.HarnessNode, c commitType) { + + ctxb := context.Background() + + // First, we'll create a three hop network: Alice -> Bob -> Carol, with + // Carol refusing to actually settle or directly cancel any HTLC's + // self. + aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork( + t, net, alice, bob, true, c, + ) + + // Clean up carol's node when the test finishes. + defer shutdownAndAssert(net, t, carol) + + time.Sleep(time.Second * 1) + + // Now that our channels are set up, we'll send two HTLC's from Alice + // to Carol. The first HTLC will be universally considered "dust", + // while the second will be a proper fully valued HTLC. + const ( + dustHtlcAmt = btcutil.Amount(100) + htlcAmt = btcutil.Amount(30000) + finalCltvDelta = 40 + ) + + ctx, cancel := context.WithCancel(ctxb) + defer cancel() + + alicePayStream, err := alice.SendPayment(ctx) + if err != nil { + t.Fatalf("unable to create payment stream for alice: %v", err) + } + + // We'll create two random payment hashes unknown to carol, then send + // each of them by manually specifying the HTLC details. + carolPubKey := carol.PubKey[:] + dustPayHash := makeFakePayHash(t) + payHash := makeFakePayHash(t) + err = alicePayStream.Send(&lnrpc.SendRequest{ + Dest: carolPubKey, + Amt: int64(dustHtlcAmt), + PaymentHash: dustPayHash, + FinalCltvDelta: finalCltvDelta, + }) + if err != nil { + t.Fatalf("unable to send alice htlc: %v", err) + } + err = alicePayStream.Send(&lnrpc.SendRequest{ + Dest: carolPubKey, + Amt: int64(htlcAmt), + PaymentHash: payHash, + FinalCltvDelta: finalCltvDelta, + }) + if err != nil { + t.Fatalf("unable to send alice htlc: %v", err) + } + + // Verify that all nodes in the path now have two HTLC's with the + // proper parameters. + var predErr error + nodes := []*lntest.HarnessNode{alice, bob, carol} + err = wait.Predicate(func() bool { + predErr = assertActiveHtlcs(nodes, dustPayHash, payHash) + if predErr != nil { + return false + } + + return true + }, time.Second*15) + if err != nil { + t.Fatalf("htlc mismatch: %v", predErr) + } + + // We'll now mine enough blocks to trigger Bob's broadcast of his + // commitment transaction due to the fact that the HTLC is about to + // timeout. With the default outgoing broadcast delta of zero, this will + // be the same height as the htlc expiry height. + numBlocks := padCLTV( + uint32(finalCltvDelta - lnd.DefaultOutgoingBroadcastDelta), + ) + if _, err := net.Miner.Node.Generate(numBlocks); err != nil { + t.Fatalf("unable to generate blocks: %v", err) + } + + // Bob's force close transaction should now be found in the mempool. If + // there are anchors, we also expect Bob's anchor sweep. + expectedTxes := 1 + if c == commitTypeAnchors { + expectedTxes = 2 + } + + bobFundingTxid, err := lnd.GetChanPointFundingTxid(bobChanPoint) + if err != nil { + t.Fatalf("unable to get txid: %v", err) + } + _, err = waitForNTxsInMempool( + net.Miner.Node, expectedTxes, minerMempoolTimeout, + ) + if err != nil { + t.Fatalf("unable to find closing txid: %v", err) + } + closeTx := getSpendingTxInMempool( + t, net.Miner.Node, minerMempoolTimeout, wire.OutPoint{ + Hash: *bobFundingTxid, + Index: bobChanPoint.OutputIndex, + }, + ) + closeTxid := closeTx.TxHash() + + // Mine a block to confirm the closing transaction. + mineBlocks(t, net, 1, expectedTxes) + + // At this point, Bob should have canceled backwards the dust HTLC + // that we sent earlier. This means Alice should now only have a single + // HTLC on her channel. + nodes = []*lntest.HarnessNode{alice} + err = wait.Predicate(func() bool { + predErr = assertActiveHtlcs(nodes, payHash) + if predErr != nil { + return false + } + + return true + }, time.Second*15) + if err != nil { + t.Fatalf("htlc mismatch: %v", predErr) + } + + // With the closing transaction confirmed, we should expect Bob's HTLC + // timeout transaction to be broadcast due to the expiry being reached. + // If there are anchors, we also expect Carol's anchor sweep now. + txes, err := getNTxsFromMempool(net.Miner.Node, expectedTxes, minerMempoolTimeout) + if err != nil { + t.Fatalf("unable to find bob's htlc timeout tx: %v", err) + } + + // Lookup the timeout transaction that is expected to spend from the + // closing tx. We distinguish it from a possibly anchor sweep by value. + var htlcTimeout *chainhash.Hash + for _, tx := range txes { + prevOp := tx.TxIn[0].PreviousOutPoint + if prevOp.Hash != closeTxid { + t.Fatalf("tx not spending from closing tx") + } + + // Assume that the timeout tx doesn't spend an output of exactly + // the size of the anchor. + if closeTx.TxOut[prevOp.Index].Value != anchorSize { + hash := tx.TxHash() + htlcTimeout = &hash + } + } + if htlcTimeout == nil { + t.Fatalf("htlc timeout tx not found in mempool") + } + + // We'll mine the remaining blocks in order to generate the sweep + // transaction of Bob's commitment output. + mineBlocks(t, net, defaultCSV, expectedTxes) + + // Check that the sweep spends from the mined commitment. + txes, err = getNTxsFromMempool(net.Miner.Node, 1, minerMempoolTimeout) + if err != nil { + t.Fatalf("sweep not found: %v", err) + } + assertAllTxesSpendFrom(t, txes, closeTxid) + + // Bob's pending channel report should show that he has a commitment + // output awaiting sweeping, and also that there's an outgoing HTLC + // output pending. + pendingChansRequest := &lnrpc.PendingChannelsRequest{} + ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) + pendingChanResp, err := bob.PendingChannels(ctxt, pendingChansRequest) + if err != nil { + t.Fatalf("unable to query for pending channels: %v", err) + } + + if len(pendingChanResp.PendingForceClosingChannels) == 0 { + t.Fatalf("bob should have pending for close chan but doesn't") + } + forceCloseChan := pendingChanResp.PendingForceClosingChannels[0] + if forceCloseChan.LimboBalance == 0 { + t.Fatalf("bob should have nonzero limbo balance instead "+ + "has: %v", forceCloseChan.LimboBalance) + } + if len(forceCloseChan.PendingHtlcs) == 0 { + t.Fatalf("bob should have pending htlc but doesn't") + } + + // Now we'll mine an additional block, which should confirm Bob's commit + // sweep. This block should also prompt Bob to broadcast their second + // layer sweep due to the CSV on the HTLC timeout output. + mineBlocks(t, net, 1, 1) + assertSpendingTxInMempool( + t, net.Miner.Node, minerMempoolTimeout, wire.OutPoint{ + Hash: *htlcTimeout, + Index: 0, + }, + ) + + // The block should have confirmed Bob's HTLC timeout transaction. + // Therefore, at this point, there should be no active HTLC's on the + // commitment transaction from Alice -> Bob. + nodes = []*lntest.HarnessNode{alice} + err = wait.Predicate(func() bool { + predErr = assertNumActiveHtlcs(nodes, 0) + if predErr != nil { + return false + } + return true + }, time.Second*15) + if err != nil { + t.Fatalf("alice's channel still has active htlc's: %v", predErr) + } + + // At this point, Bob should show that the pending HTLC has advanced to + // the second stage and is to be swept. + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + pendingChanResp, err = bob.PendingChannels(ctxt, pendingChansRequest) + if err != nil { + t.Fatalf("unable to query for pending channels: %v", err) + } + forceCloseChan = pendingChanResp.PendingForceClosingChannels[0] + if forceCloseChan.PendingHtlcs[0].Stage != 2 { + t.Fatalf("bob's htlc should have advanced to the second stage: %v", err) + } + + // Next, we'll mine a final block that should confirm the second-layer + // sweeping transaction. + if _, err := net.Miner.Node.Generate(1); err != nil { + t.Fatalf("unable to generate blocks: %v", err) + } + + // Once this transaction has been confirmed, Bob should detect that he + // no longer has any pending channels. + err = wait.Predicate(func() bool { + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + pendingChanResp, err = bob.PendingChannels(ctxt, pendingChansRequest) + if err != nil { + predErr = fmt.Errorf("unable to query for pending "+ + "channels: %v", err) + return false + } + if len(pendingChanResp.PendingForceClosingChannels) != 0 { + predErr = fmt.Errorf("bob still has pending "+ + "channels but shouldn't: %v", + spew.Sdump(pendingChanResp)) + return false + } + + return true + + }, time.Second*15) + if err != nil { + t.Fatalf(predErr.Error()) + } + + // Coop close channel, expect no anchors. + ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) + closeChannelAndAssertType( + ctxt, t, net, alice, aliceChanPoint, false, + false, + ) +} diff --git a/lntest/itest/lnd_multi-hop_htlc_receiver_chain_claim_test.go b/lntest/itest/lnd_multi-hop_htlc_receiver_chain_claim_test.go index fc497aec1a..d7335311e1 100644 --- a/lntest/itest/lnd_multi-hop_htlc_receiver_chain_claim_test.go +++ b/lntest/itest/lnd_multi-hop_htlc_receiver_chain_claim_test.go @@ -7,7 +7,6 @@ import ( "fmt" "time" - "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" "github.com/lightningnetwork/lnd" @@ -20,17 +19,20 @@ import ( // testMultiHopReceiverChainClaim tests that in the multi-hop setting, if the // receiver of an HTLC knows the preimage, but wasn't able to settle the HTLC -// off-chain, then it goes on chain to claim the HTLC. In this scenario, the -// node that sent the outgoing HTLC should extract the preimage from the sweep -// transaction, and finish settling the HTLC backwards into the route. -func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest) { +// off-chain, then it goes on chain to claim the HTLC uing the HTLC success +// transaction. In this scenario, the node that sent the outgoing HTLC should +// extract the preimage from the sweep transaction, and finish settling the +// HTLC backwards into the route. +func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest, + alice, bob *lntest.HarnessNode, c commitType) { + ctxb := context.Background() // First, we'll create a three hop network: Alice -> Bob -> Carol, with // Carol refusing to actually settle or directly cancel any HTLC's // self. aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork( - t, net, false, + t, net, alice, bob, false, c, ) // Clean up carol's node when the test finishes. @@ -61,7 +63,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest) ctx, cancel := context.WithCancel(ctxb) defer cancel() - alicePayStream, err := net.Alice.SendPayment(ctx) + alicePayStream, err := alice.SendPayment(ctx) if err != nil { t.Fatalf("unable to create payment stream for alice: %v", err) } @@ -75,7 +77,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest) // At this point, all 3 nodes should now have an active channel with // the created HTLC pending on all of them. var predErr error - nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol} + nodes := []*lntest.HarnessNode{alice, bob, carol} err = wait.Predicate(func() bool { predErr = assertActiveHtlcs(nodes, payHash[:]) if predErr != nil { @@ -93,7 +95,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest) // hop logic. waitForInvoiceAccepted(t, carol, payHash) - restartBob, err := net.SuspendNode(net.Bob) + restartBob, err := net.SuspendNode(bob) if err != nil { t.Fatalf("unable to suspend bob: %v", err) } @@ -123,8 +125,15 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest) } // At this point, Carol should broadcast her active commitment - // transaction in order to go to the chain and sweep her HTLC. - txids, err := waitForNTxsInMempool(net.Miner.Node, 1, minerMempoolTimeout) + // transaction in order to go to the chain and sweep her HTLC. If there + // are anchors, Carol also sweeps hers. + expectedTxes := 1 + if c == commitTypeAnchors { + expectedTxes = 2 + } + txes, err := getNTxsFromMempool( + net.Miner.Node, expectedTxes, minerMempoolTimeout, + ) if err != nil { t.Fatalf("expected transaction not found in mempool: %v", err) } @@ -141,20 +150,13 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest) // The commitment transaction should be spending from the funding // transaction. - commitHash := txids[0] - tx, err := net.Miner.Node.GetRawTransaction(commitHash) - if err != nil { - t.Fatalf("unable to get txn: %v", err) - } - commitTx := tx.MsgTx() - - if commitTx.TxIn[0].PreviousOutPoint != carolFundingPoint { - t.Fatalf("commit transaction not spending from expected "+ - "outpoint: %v", spew.Sdump(commitTx)) - } + closingTx := getSpendingTxInMempool( + t, net.Miner.Node, minerMempoolTimeout, carolFundingPoint, + ) + closingTxid := closingTx.TxHash() // Confirm the commitment. - mineBlocks(t, net, 1, 1) + mineBlocks(t, net, 1, expectedTxes) // Restart bob again. if err := restartBob(); err != nil { @@ -164,30 +166,21 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest) // After the force close transaction is mined, Carol should broadcast // her second level HTLC transaction. Bob will broadcast a sweep tx to // sweep his output in the channel with Carol. When Bob notices Carol's - // second level transaction in the mempool, he will extract the - // preimage and settle the HTLC back off-chain. - secondLevelHashes, err := waitForNTxsInMempool(net.Miner.Node, 2, - minerMempoolTimeout) + // second level transaction in the mempool, he will extract the preimage + // and settle the HTLC back off-chain. Bob will also sweep his anchor, + // if present. + expectedTxes = 2 + if c == commitTypeAnchors { + expectedTxes = 3 + } + txes, err = getNTxsFromMempool(net.Miner.Node, + expectedTxes, minerMempoolTimeout) if err != nil { t.Fatalf("transactions not found in mempool: %v", err) } - // Carol's second level transaction should be spending from - // the commitment transaction. - var secondLevelHash *chainhash.Hash - for _, txid := range secondLevelHashes { - tx, err := net.Miner.Node.GetRawTransaction(txid) - if err != nil { - t.Fatalf("unable to get txn: %v", err) - } - - if tx.MsgTx().TxIn[0].PreviousOutPoint.Hash == *commitHash { - secondLevelHash = txid - } - } - if secondLevelHash == nil { - t.Fatalf("Carol's second level tx not found") - } + // All transactions should be spending from the commitment transaction. + assertAllTxesSpendFrom(t, txes, closingTxid) // We'll now mine an additional block which should confirm both the // second layer transactions. @@ -230,7 +223,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest) // Once the second-level transaction confirmed, Bob should have // extracted the preimage from the chain, and sent it back to Alice, // clearing the HTLC off-chain. - nodes = []*lntest.HarnessNode{net.Alice} + nodes = []*lntest.HarnessNode{alice} err = wait.Predicate(func() bool { predErr = assertNumActiveHtlcs(nodes, 0) if predErr != nil { @@ -298,8 +291,21 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest) "%d sat", invoiceAmt, invoice.AmtPaidSat) } + // Finally, check that the Alice's payment is correctly marked + // succeeded. + ctxt, _ = context.WithTimeout(ctxt, defaultTimeout) + err = checkPaymentStatus( + ctxt, alice, preimage, lnrpc.Payment_SUCCEEDED, + ) + if err != nil { + t.Fatalf(err.Error()) + } + // We'll close out the channel between Alice and Bob, then shutdown // carol to conclude the test. ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, aliceChanPoint, false) + closeChannelAndAssertType( + ctxt, t, net, alice, aliceChanPoint, + false, false, + ) } diff --git a/lntest/itest/lnd_multi-hop_htlc_remote_chain_claim_test.go b/lntest/itest/lnd_multi-hop_htlc_remote_chain_claim_test.go index 14894e99cc..c73a066bf3 100644 --- a/lntest/itest/lnd_multi-hop_htlc_remote_chain_claim_test.go +++ b/lntest/itest/lnd_multi-hop_htlc_remote_chain_claim_test.go @@ -20,15 +20,18 @@ import ( // testMultiHopHtlcRemoteChainClaim tests that in the multi-hop HTLC scenario, // if the remote party goes to chain while we have an incoming HTLC, then when // we found out the preimage via the witness beacon, we properly settle the -// HTLC on-chain in order to ensure that we don't lose any funds. -func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest) { +// HTLC directly on-chain using the preimage in order to ensure that we don't +// lose any funds. +func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest, + alice, bob *lntest.HarnessNode, c commitType) { + ctxb := context.Background() // First, we'll create a three hop network: Alice -> Bob -> Carol, with // Carol refusing to actually settle or directly cancel any HTLC's // self. aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork( - t, net, false, + t, net, alice, bob, false, c, ) // Clean up carol's node when the test finishes. @@ -58,7 +61,7 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest ctx, cancel := context.WithCancel(ctxb) defer cancel() - alicePayStream, err := net.Alice.SendPayment(ctx) + alicePayStream, err := alice.SendPayment(ctx) if err != nil { t.Fatalf("unable to create payment stream for alice: %v", err) } @@ -72,7 +75,7 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest // At this point, all 3 nodes should now have an active channel with // the created HTLC pending on all of them. var predErr error - nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol} + nodes := []*lntest.HarnessNode{alice, bob, carol} err = wait.Predicate(func() bool { predErr = assertActiveHtlcs(nodes, payHash[:]) if predErr != nil { @@ -94,12 +97,12 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest // immediately force close the channel by broadcast her commitment // transaction. ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - aliceForceClose := closeChannelAndAssert(ctxt, t, net, net.Alice, - aliceChanPoint, true) + aliceForceClose := closeChannelAndAssertType(ctxt, t, net, alice, + aliceChanPoint, c == commitTypeAnchors, true) // Wait for the channel to be marked pending force close. ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = waitForChannelPendingForceClose(ctxt, net.Alice, aliceChanPoint) + err = waitForChannelPendingForceClose(ctxt, alice, aliceChanPoint) if err != nil { t.Fatalf("channel not pending force close: %v", err) } @@ -111,14 +114,19 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest t.Fatalf("unable to generate blocks: %v", err) } - // Alice should now sweep her funds. - _, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) + // Alice should now sweep her funds. If there are anchors, Alice should + // also sweep hers. + expectedTxes := 1 + if c == commitTypeAnchors { + expectedTxes = 2 + } + _, err = waitForNTxsInMempool(net.Miner.Node, expectedTxes, minerMempoolTimeout) if err != nil { t.Fatalf("unable to find sweeping tx in mempool: %v", err) } // Suspend bob, so Carol is forced to go on chain. - restartBob, err := net.SuspendNode(net.Bob) + restartBob, err := net.SuspendNode(bob) if err != nil { t.Fatalf("unable to suspend bob: %v", err) } @@ -146,10 +154,13 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest t.Fatalf("unable to generate blocks") } - // Carol's commitment transaction should now be in the mempool. - txids, err := waitForNTxsInMempool(net.Miner.Node, 1, minerMempoolTimeout) + // Carol's commitment transaction should now be in the mempool. If there + // are anchors, Carol also sweeps her anchor. + _, err = waitForNTxsInMempool( + net.Miner.Node, expectedTxes, minerMempoolTimeout, + ) if err != nil { - t.Fatalf("transactions not found in mempool: %v", err) + t.Fatalf("unable to find carol's txes: %v", err) } bobFundingTxid, err := lnd.GetChanPointFundingTxid(bobChanPoint) if err != nil { @@ -160,63 +171,51 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest Index: bobChanPoint.OutputIndex, } - // The transaction should be spending from the funding transaction - commitHash := txids[0] - tx1, err := net.Miner.Node.GetRawTransaction(commitHash) - if err != nil { - t.Fatalf("unable to get txn: %v", err) - } - if tx1.MsgTx().TxIn[0].PreviousOutPoint != carolFundingPoint { - t.Fatalf("commit transaction not spending fundingtx: %v", - spew.Sdump(tx1)) - } + // The closing transaction should be spending from the funding + // transaction. + closingTx := getSpendingTxInMempool( + t, net.Miner.Node, minerMempoolTimeout, carolFundingPoint, + ) + closingTxid := closingTx.TxHash() - // Mine a block, which should contain the commitment. - block := mineBlocks(t, net, 1, 1)[0] - if len(block.Transactions) != 2 { - t.Fatalf("expected 2 transactions in block, got %v", - len(block.Transactions)) + // Mine a block, which should contain: the commitment, possibly an + // anchor sweep and the coinbase tx. + block := mineBlocks(t, net, 1, expectedTxes)[0] + if len(block.Transactions) != expectedTxes+1 { + t.Fatalf("expected %v transactions in block, got %v", + expectedTxes, len(block.Transactions)) } - assertTxInBlock(t, block, commitHash) + assertTxInBlock(t, block, &closingTxid) // Restart bob again. if err := restartBob(); err != nil { t.Fatalf("unable to restart bob: %v", err) } - // After the force close transacion is mined, Carol should broadcast - // her second level HTLC transacion. Bob will broadcast a sweep tx to - // sweep his output in the channel with Carol. He can do this - // immediately, as the output is not timelocked since Carol was the one - // force closing. - commitSpends, err := waitForNTxsInMempool(net.Miner.Node, 2, + // After the force close transacion is mined, Carol should broadcast her + // second level HTLC transacion. Bob will broadcast a sweep tx to sweep + // his output in the channel with Carol. He can do this immediately, as + // the output is not timelocked since Carol was the one force closing. + // If there are anchors, Bob should also sweep his. + expectedTxes = 2 + if c == commitTypeAnchors { + expectedTxes = 3 + } + txes, err := getNTxsFromMempool(net.Miner.Node, expectedTxes, minerMempoolTimeout) if err != nil { t.Fatalf("transactions not found in mempool: %v", err) } - // Both Carol's second level transaction and Bob's sweep should be - // spending from the commitment transaction. - for _, txid := range commitSpends { - tx, err := net.Miner.Node.GetRawTransaction(txid) - if err != nil { - t.Fatalf("unable to get txn: %v", err) - } - - if tx.MsgTx().TxIn[0].PreviousOutPoint.Hash != *commitHash { - t.Fatalf("tx did not spend from commitment tx") - } - } + // All transactions should be pending from the commitment transaction. + assertAllTxesSpendFrom(t, txes, closingTxid) // Mine a block to confirm the two transactions (+ coinbase). - block = mineBlocks(t, net, 1, 2)[0] - if len(block.Transactions) != 3 { + block = mineBlocks(t, net, 1, expectedTxes)[0] + if len(block.Transactions) != expectedTxes+1 { t.Fatalf("expected 3 transactions in block, got %v", len(block.Transactions)) } - for _, txid := range commitSpends { - assertTxInBlock(t, block, txid) - } // Keep track of the second level tx maturity. carolSecondLevelCSV := uint32(defaultCSV) @@ -255,7 +254,7 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest pendingChansRequest := &lnrpc.PendingChannelsRequest{} err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Bob.PendingChannels( + pendingChanResp, err := bob.PendingChannels( ctxt, pendingChansRequest, ) if err != nil { @@ -332,4 +331,14 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest t.Fatalf("expected invoice to be settled with %d sat, got "+ "%d sat", invoiceAmt, invoice.AmtPaidSat) } + + // Finally, check that the Alice's payment is correctly marked + // succeeded. + ctxt, _ = context.WithTimeout(ctxt, defaultTimeout) + err = checkPaymentStatus( + ctxt, alice, preimage, lnrpc.Payment_SUCCEEDED, + ) + if err != nil { + t.Fatalf(err.Error()) + } } diff --git a/lntest/itest/lnd_multi-hop_local_force_close_on_chain_htlc_timeout_test.go b/lntest/itest/lnd_multi-hop_local_force_close_on_chain_htlc_timeout_test.go new file mode 100644 index 0000000000..d51d2c3d2c --- /dev/null +++ b/lntest/itest/lnd_multi-hop_local_force_close_on_chain_htlc_timeout_test.go @@ -0,0 +1,293 @@ +// +build rpctest + +package itest + +import ( + "context" + "fmt" + "time" + + "github.com/btcsuite/btcutil" + "github.com/davecgh/go-spew/spew" + "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/lntest" + "github.com/lightningnetwork/lnd/lntest/wait" +) + +// testMultiHopLocalForceCloseOnChainHtlcTimeout tests that in a multi-hop HTLC +// scenario, if the node that extended the HTLC to the final node closes their +// commitment on-chain early, then it eventually recognizes this HTLC as one +// that's timed out. At this point, the node should timeout the HTLC using the +// HTLC timeout transaction, then cancel it backwards as normal. +func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, + t *harnessTest, alice, bob *lntest.HarnessNode, c commitType) { + + ctxb := context.Background() + + // First, we'll create a three hop network: Alice -> Bob -> Carol, with + // Carol refusing to actually settle or directly cancel any HTLC's + // self. + aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork( + t, net, alice, bob, true, c, + ) + + // Clean up carol's node when the test finishes. + defer shutdownAndAssert(net, t, carol) + + // With our channels set up, we'll then send a single HTLC from Alice + // to Carol. As Carol is in hodl mode, she won't settle this HTLC which + // opens up the base for out tests. + const ( + finalCltvDelta = 40 + htlcAmt = btcutil.Amount(30000) + ) + ctx, cancel := context.WithCancel(ctxb) + defer cancel() + + alicePayStream, err := alice.SendPayment(ctx) + if err != nil { + t.Fatalf("unable to create payment stream for alice: %v", err) + } + + // We'll now send a single HTLC across our multi-hop network. + carolPubKey := carol.PubKey[:] + payHash := makeFakePayHash(t) + err = alicePayStream.Send(&lnrpc.SendRequest{ + Dest: carolPubKey, + Amt: int64(htlcAmt), + PaymentHash: payHash, + FinalCltvDelta: finalCltvDelta, + }) + if err != nil { + t.Fatalf("unable to send alice htlc: %v", err) + } + + // Once the HTLC has cleared, all channels in our mini network should + // have the it locked in. + var predErr error + nodes := []*lntest.HarnessNode{alice, bob, carol} + err = wait.Predicate(func() bool { + predErr = assertActiveHtlcs(nodes, payHash) + if predErr != nil { + return false + } + + return true + }, time.Second*15) + if err != nil { + t.Fatalf("htlc mismatch: %v", err) + } + + // Now that all parties have the HTLC locked in, we'll immediately + // force close the Bob -> Carol channel. This should trigger contract + // resolution mode for both of them. + ctxt, _ := context.WithTimeout(ctxb, channelCloseTimeout) + closeChannelAndAssertType( + ctxt, t, net, bob, bobChanPoint, c == commitTypeAnchors, true, + ) + + // At this point, Bob should have a pending force close channel as he + // just went to chain. + pendingChansRequest := &lnrpc.PendingChannelsRequest{} + err = wait.Predicate(func() bool { + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + pendingChanResp, err := bob.PendingChannels(ctxt, + pendingChansRequest) + if err != nil { + predErr = fmt.Errorf("unable to query for pending "+ + "channels: %v", err) + return false + } + if len(pendingChanResp.PendingForceClosingChannels) == 0 { + predErr = fmt.Errorf("bob should have pending for " + + "close chan but doesn't") + return false + } + + forceCloseChan := pendingChanResp.PendingForceClosingChannels[0] + if forceCloseChan.LimboBalance == 0 { + predErr = fmt.Errorf("bob should have nonzero limbo "+ + "balance instead has: %v", + forceCloseChan.LimboBalance) + return false + } + + return true + }, time.Second*15) + if err != nil { + t.Fatalf(predErr.Error()) + } + + // We'll mine defaultCSV blocks in order to generate the sweep + // transaction of Bob's funding output. If there are anchors, mine + // Carol's anchor sweep too. + if c == commitTypeAnchors { + _, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) + if err != nil { + t.Fatalf("unable to find carol's anchor sweep tx: %v", err) + } + } + + if _, err := net.Miner.Node.Generate(defaultCSV); err != nil { + t.Fatalf("unable to generate blocks: %v", err) + } + + _, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) + if err != nil { + t.Fatalf("unable to find bob's funding output sweep tx: %v", err) + } + + // We'll now mine enough blocks for the HTLC to expire. After this, Bob + // should hand off the now expired HTLC output to the utxo nursery. + numBlocks := padCLTV(uint32(finalCltvDelta - defaultCSV - 1)) + if _, err := net.Miner.Node.Generate(numBlocks); err != nil { + t.Fatalf("unable to generate blocks: %v", err) + } + + // Bob's pending channel report should show that he has a single HTLC + // that's now in stage one. + err = wait.Predicate(func() bool { + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + pendingChanResp, err := bob.PendingChannels( + ctxt, pendingChansRequest, + ) + if err != nil { + predErr = fmt.Errorf("unable to query for pending "+ + "channels: %v", err) + return false + } + + if len(pendingChanResp.PendingForceClosingChannels) == 0 { + predErr = fmt.Errorf("bob should have pending force " + + "close chan but doesn't") + return false + } + + forceCloseChan := pendingChanResp.PendingForceClosingChannels[0] + if len(forceCloseChan.PendingHtlcs) != 1 { + predErr = fmt.Errorf("bob should have pending htlc " + + "but doesn't") + return false + } + if forceCloseChan.PendingHtlcs[0].Stage != 1 { + predErr = fmt.Errorf("bob's htlc should have "+ + "advanced to the first stage: %v", err) + return false + } + + return true + }, time.Second*15) + if err != nil { + t.Fatalf("bob didn't hand off time-locked HTLC: %v", predErr) + } + + // We should also now find a transaction in the mempool, as Bob should + // have broadcast his second layer timeout transaction. + timeoutTx, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) + if err != nil { + t.Fatalf("unable to find bob's htlc timeout tx: %v", err) + } + + // Next, we'll mine an additional block. This should serve to confirm + // the second layer timeout transaction. + block := mineBlocks(t, net, 1, 1)[0] + assertTxInBlock(t, block, timeoutTx) + + // With the second layer timeout transaction confirmed, Bob should have + // canceled backwards the HTLC that carol sent. + nodes = []*lntest.HarnessNode{alice} + err = wait.Predicate(func() bool { + predErr = assertNumActiveHtlcs(nodes, 0) + if predErr != nil { + return false + } + return true + }, time.Second*15) + if err != nil { + t.Fatalf("alice's channel still has active htlc's: %v", predErr) + } + + // Additionally, Bob should now show that HTLC as being advanced to the + // second stage. + err = wait.Predicate(func() bool { + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + pendingChanResp, err := bob.PendingChannels( + ctxt, pendingChansRequest, + ) + if err != nil { + predErr = fmt.Errorf("unable to query for pending "+ + "channels: %v", err) + return false + } + + if len(pendingChanResp.PendingForceClosingChannels) == 0 { + predErr = fmt.Errorf("bob should have pending for " + + "close chan but doesn't") + return false + } + + forceCloseChan := pendingChanResp.PendingForceClosingChannels[0] + if len(forceCloseChan.PendingHtlcs) != 1 { + predErr = fmt.Errorf("bob should have pending htlc " + + "but doesn't") + return false + } + if forceCloseChan.PendingHtlcs[0].Stage != 2 { + predErr = fmt.Errorf("bob's htlc should have "+ + "advanced to the second stage: %v", err) + return false + } + + return true + }, time.Second*15) + if err != nil { + t.Fatalf("bob didn't hand off time-locked HTLC: %v", predErr) + } + + // We'll now mine 4 additional blocks. This should be enough for Bob's + // CSV timelock to expire and the sweeping transaction of the HTLC to be + // broadcast. + if _, err := net.Miner.Node.Generate(defaultCSV); err != nil { + t.Fatalf("unable to mine blocks: %v", err) + } + + sweepTx, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) + if err != nil { + t.Fatalf("unable to find bob's htlc sweep tx: %v", err) + } + + // We'll then mine a final block which should confirm this second layer + // sweep transaction. + block = mineBlocks(t, net, 1, 1)[0] + assertTxInBlock(t, block, sweepTx) + + // At this point, Bob should no longer show any channels as pending + // close. + err = wait.Predicate(func() bool { + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + pendingChanResp, err := bob.PendingChannels( + ctxt, pendingChansRequest, + ) + if err != nil { + predErr = fmt.Errorf("unable to query for pending "+ + "channels: %v", err) + return false + } + if len(pendingChanResp.PendingForceClosingChannels) != 0 { + predErr = fmt.Errorf("bob still has pending channels "+ + "but shouldn't: %v", spew.Sdump(pendingChanResp)) + return false + } + + return true + }, time.Second*15) + if err != nil { + t.Fatalf(predErr.Error()) + } + + // Coop close, no anchors. + ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) + closeChannelAndAssertType( + ctxt, t, net, alice, aliceChanPoint, false, false, + ) +} diff --git a/lntest/itest/lnd_multi-hop_remote_force_close_on_chain_htlc_timeout_test.go b/lntest/itest/lnd_multi-hop_remote_force_close_on_chain_htlc_timeout_test.go new file mode 100644 index 0000000000..ebff918781 --- /dev/null +++ b/lntest/itest/lnd_multi-hop_remote_force_close_on_chain_htlc_timeout_test.go @@ -0,0 +1,252 @@ +// +build rpctest + +package itest + +import ( + "context" + "fmt" + "time" + + "github.com/btcsuite/btcutil" + "github.com/davecgh/go-spew/spew" + "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/lntest" + "github.com/lightningnetwork/lnd/lntest/wait" +) + +// testMultiHopRemoteForceCloseOnChainHtlcTimeout tests that if we extend a +// multi-hop HTLC, and the final destination of the HTLC force closes the +// channel, then we properly timeout the HTLC directly on *their* commitment +// transaction once the timeout has expired. Once we sweep the transaction, we +// should also cancel back the initial HTLC. +func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, + t *harnessTest, alice, bob *lntest.HarnessNode, c commitType) { + + ctxb := context.Background() + + // First, we'll create a three hop network: Alice -> Bob -> Carol, with + // Carol refusing to actually settle or directly cancel any HTLC's + // self. + aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork( + t, net, alice, bob, true, c, + ) + + // Clean up carol's node when the test finishes. + defer shutdownAndAssert(net, t, carol) + + // With our channels set up, we'll then send a single HTLC from Alice + // to Carol. As Carol is in hodl mode, she won't settle this HTLC which + // opens up the base for out tests. + const ( + finalCltvDelta = 40 + htlcAmt = btcutil.Amount(30000) + ) + + ctx, cancel := context.WithCancel(ctxb) + defer cancel() + + alicePayStream, err := alice.SendPayment(ctx) + if err != nil { + t.Fatalf("unable to create payment stream for alice: %v", err) + } + + // We'll now send a single HTLC across our multi-hop network. + carolPubKey := carol.PubKey[:] + payHash := makeFakePayHash(t) + err = alicePayStream.Send(&lnrpc.SendRequest{ + Dest: carolPubKey, + Amt: int64(htlcAmt), + PaymentHash: payHash, + FinalCltvDelta: finalCltvDelta, + }) + if err != nil { + t.Fatalf("unable to send alice htlc: %v", err) + } + + // Once the HTLC has cleared, all the nodes in our mini network should + // show that the HTLC has been locked in. + var predErr error + nodes := []*lntest.HarnessNode{alice, bob, carol} + err = wait.Predicate(func() bool { + predErr = assertActiveHtlcs(nodes, payHash) + if predErr != nil { + return false + } + + return true + }, time.Second*15) + if err != nil { + t.Fatalf("htlc mismatch: %v", predErr) + } + + // At this point, we'll now instruct Carol to force close the + // transaction. This will let us exercise that Bob is able to sweep the + // expired HTLC on Carol's version of the commitment transaction. If + // Carol has an anchor, it will be swept too. + ctxt, _ := context.WithTimeout(ctxb, channelCloseTimeout) + closeChannelAndAssertType( + ctxt, t, net, carol, bobChanPoint, c == commitTypeAnchors, + true, + ) + + // At this point, Bob should have a pending force close channel as + // Carol has gone directly to chain. + pendingChansRequest := &lnrpc.PendingChannelsRequest{} + err = wait.Predicate(func() bool { + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + pendingChanResp, err := bob.PendingChannels( + ctxt, pendingChansRequest, + ) + if err != nil { + predErr = fmt.Errorf("unable to query for "+ + "pending channels: %v", err) + return false + } + if len(pendingChanResp.PendingForceClosingChannels) == 0 { + predErr = fmt.Errorf("bob should have pending " + + "force close channels but doesn't") + return false + } + + return true + }, time.Second*15) + if err != nil { + t.Fatalf(predErr.Error()) + } + + // Bob can sweep his output immediately. If there is an anchor, Bob will + // sweep that as well. + expectedTxes := 1 + if c == commitTypeAnchors { + expectedTxes = 2 + } + + _, err = waitForNTxsInMempool( + net.Miner.Node, expectedTxes, minerMempoolTimeout, + ) + if err != nil { + t.Fatalf("failed to find txes in miner mempool: %v", err) + } + + // Next, we'll mine enough blocks for the HTLC to expire. At this + // point, Bob should hand off the output to his internal utxo nursery, + // which will broadcast a sweep transaction. + numBlocks := padCLTV(finalCltvDelta - 1) + if _, err := net.Miner.Node.Generate(numBlocks); err != nil { + t.Fatalf("unable to generate blocks: %v", err) + } + + // If we check Bob's pending channel report, it should show that he has + // a single HTLC that's now in the second stage, as skip the initial + // first stage since this is a direct HTLC. + err = wait.Predicate(func() bool { + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + pendingChanResp, err := bob.PendingChannels( + ctxt, pendingChansRequest, + ) + if err != nil { + predErr = fmt.Errorf("unable to query for pending "+ + "channels: %v", err) + return false + } + + if len(pendingChanResp.PendingForceClosingChannels) == 0 { + predErr = fmt.Errorf("bob should have pending for " + + "close chan but doesn't") + return false + } + + forceCloseChan := pendingChanResp.PendingForceClosingChannels[0] + if len(forceCloseChan.PendingHtlcs) != 1 { + predErr = fmt.Errorf("bob should have pending htlc " + + "but doesn't") + return false + } + if forceCloseChan.PendingHtlcs[0].Stage != 2 { + predErr = fmt.Errorf("bob's htlc should have "+ + "advanced to the second stage: %v", err) + return false + } + + return true + }, time.Second*15) + if err != nil { + t.Fatalf("bob didn't hand off time-locked HTLC: %v", predErr) + } + + // Bob's sweeping transaction should now be found in the mempool at + // this point. + sweepTx, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) + if err != nil { + // If Bob's transaction isn't yet in the mempool, then due to + // internal message passing and the low period between blocks + // being mined, it may have been detected as a late + // registration. As a result, we'll mine another block and + // repeat the check. If it doesn't go through this time, then + // we'll fail. + // TODO(halseth): can we use waitForChannelPendingForceClose to + // avoid this hack? + if _, err := net.Miner.Node.Generate(1); err != nil { + t.Fatalf("unable to generate block: %v", err) + } + sweepTx, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) + if err != nil { + t.Fatalf("unable to find bob's sweeping transaction: "+ + "%v", err) + } + } + + // If we mine an additional block, then this should confirm Bob's + // transaction which sweeps the direct HTLC output. + block := mineBlocks(t, net, 1, 1)[0] + assertTxInBlock(t, block, sweepTx) + + // Now that the sweeping transaction has been confirmed, Bob should + // cancel back that HTLC. As a result, Alice should not know of any + // active HTLC's. + nodes = []*lntest.HarnessNode{alice} + err = wait.Predicate(func() bool { + predErr = assertNumActiveHtlcs(nodes, 0) + if predErr != nil { + return false + } + return true + }, time.Second*15) + if err != nil { + t.Fatalf("alice's channel still has active htlc's: %v", predErr) + } + + // Now we'll check Bob's pending channel report. Since this was Carol's + // commitment, he doesn't have to wait for any CSV delays. As a result, + // he should show no additional pending transactions. + err = wait.Predicate(func() bool { + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + pendingChanResp, err := bob.PendingChannels( + ctxt, pendingChansRequest, + ) + if err != nil { + predErr = fmt.Errorf("unable to query for pending "+ + "channels: %v", err) + return false + } + if len(pendingChanResp.PendingForceClosingChannels) != 0 { + predErr = fmt.Errorf("bob still has pending channels "+ + "but shouldn't: %v", spew.Sdump(pendingChanResp)) + return false + } + + return true + }, time.Second*15) + if err != nil { + t.Fatalf(predErr.Error()) + } + + // We'll close out the test by closing the channel from Alice to Bob, + // and then shutting down the new node we created as its no longer + // needed. Coop close, no anchors. + ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) + closeChannelAndAssertType( + ctxt, t, net, alice, aliceChanPoint, false, + false, + ) +} diff --git a/lntest/itest/lnd_multi-hop_test.go b/lntest/itest/lnd_multi-hop_test.go new file mode 100644 index 0000000000..ec73e18770 --- /dev/null +++ b/lntest/itest/lnd_multi-hop_test.go @@ -0,0 +1,315 @@ +// +build rpctest + +package itest + +import ( + "context" + "fmt" + "testing" + + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" + "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/lnrpc/invoicesrpc" + "github.com/lightningnetwork/lnd/lntest" + "github.com/lightningnetwork/lnd/lntypes" +) + +func testMultiHopHtlcClaims(net *lntest.NetworkHarness, t *harnessTest) { + + type testCase struct { + name string + test func(net *lntest.NetworkHarness, t *harnessTest, alice, + bob *lntest.HarnessNode, c commitType) + } + + subTests := []testCase{ + { + // bob: outgoing our commit timeout + // carol: incoming their commit watch and see timeout + name: "local force close immediate expiry", + test: testMultiHopHtlcLocalTimeout, + }, + { + // bob: outgoing watch and see, they sweep on chain + // carol: incoming our commit, know preimage + name: "receiver chain claim", + test: testMultiHopReceiverChainClaim, + }, + { + // bob: outgoing our commit watch and see timeout + // carol: incoming their commit watch and see timeout + name: "local force close on-chain htlc timeout", + test: testMultiHopLocalForceCloseOnChainHtlcTimeout, + }, + { + // bob: outgoing their commit watch and see timeout + // carol: incoming our commit watch and see timeout + name: "remote force close on-chain htlc timeout", + test: testMultiHopRemoteForceCloseOnChainHtlcTimeout, + }, + { + // bob: outgoing our commit watch and see, they sweep on chain + // bob: incoming our commit watch and learn preimage + // carol: incoming their commit know preimage + name: "local chain claim", + test: testMultiHopHtlcLocalChainClaim, + }, + { + // bob: outgoing their commit watch and see, they sweep on chain + // bob: incoming their commit watch and learn preimage + // carol: incoming our commit know preimage + name: "remote chain claim", + test: testMultiHopHtlcRemoteChainClaim, + }, + } + + commitTypes := []commitType{ + commitTypeLegacy, + commitTypeAnchors, + } + + for _, commitType := range commitTypes { + testName := fmt.Sprintf("committype=%v", commitType.String()) + + success := t.t.Run(testName, func(t *testing.T) { + ht := newHarnessTest(t, net) + + args := commitType.Args() + alice, err := net.NewNode("Alice", args) + if err != nil { + t.Fatalf("unable to create new node: %v", err) + } + defer shutdownAndAssert(net, ht, alice) + + bob, err := net.NewNode("Bob", args) + if err != nil { + t.Fatalf("unable to create new node: %v", err) + } + defer shutdownAndAssert(net, ht, bob) + + ctxb := context.Background() + ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) + if err := net.ConnectNodes(ctxt, alice, bob); err != nil { + t.Fatalf("unable to connect alice to bob: %v", err) + } + + for _, subTest := range subTests { + subTest := subTest + + success := ht.t.Run(subTest.name, func(t *testing.T) { + ht := newHarnessTest(t, net) + + subTest.test(net, ht, alice, bob, commitType) + }) + if !success { + return + } + } + }) + if !success { + return + } + } +} + +// waitForInvoiceAccepted waits until the specified invoice moved to the +// accepted state by the node. +func waitForInvoiceAccepted(t *harnessTest, node *lntest.HarnessNode, + payHash lntypes.Hash) { + + ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout) + defer cancel() + invoiceUpdates, err := node.SubscribeSingleInvoice(ctx, + &invoicesrpc.SubscribeSingleInvoiceRequest{ + RHash: payHash[:], + }, + ) + if err != nil { + t.Fatalf("subscribe single invoice: %v", err) + } + + for { + update, err := invoiceUpdates.Recv() + if err != nil { + t.Fatalf("invoice update err: %v", err) + } + if update.State == lnrpc.Invoice_ACCEPTED { + break + } + } +} + +// checkPaymentStatus asserts that the given node list a payment with the given +// preimage has the expected status. +func checkPaymentStatus(ctxt context.Context, node *lntest.HarnessNode, + preimage lntypes.Preimage, status lnrpc.Payment_PaymentStatus) error { + + req := &lnrpc.ListPaymentsRequest{ + IncludeIncomplete: true, + } + paymentsResp, err := node.ListPayments(ctxt, req) + if err != nil { + return fmt.Errorf("error when obtaining Alice payments: %v", + err) + } + + payHash := preimage.Hash() + var found bool + for _, p := range paymentsResp.Payments { + if p.PaymentHash != payHash.String() { + continue + } + + found = true + if p.Status != status { + return fmt.Errorf("expected payment status "+ + "%v, got %v", status, p.Status) + } + + switch status { + + // If this expected status is SUCCEEDED, we expect the final preimage. + case lnrpc.Payment_SUCCEEDED: + if p.PaymentPreimage != preimage.String() { + return fmt.Errorf("preimage doesn't match: %v vs %v", + p.PaymentPreimage, preimage.String()) + } + + // Otherwise we expect an all-zero preimage. + default: + if p.PaymentPreimage != (lntypes.Preimage{}).String() { + return fmt.Errorf("expected zero preimage, got %v", + p.PaymentPreimage) + } + } + + } + + if !found { + return fmt.Errorf("payment with payment hash %v not found "+ + "in response", payHash) + } + + return nil +} + +func createThreeHopNetwork(t *harnessTest, net *lntest.NetworkHarness, + alice, bob *lntest.HarnessNode, carolHodl bool, c commitType) ( + *lnrpc.ChannelPoint, *lnrpc.ChannelPoint, *lntest.HarnessNode) { + + ctxb := context.Background() + + ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) + err := net.EnsureConnected(ctxt, alice, bob) + if err != nil { + t.Fatalf("unable to connect peers: %v", err) + } + + // Make sure there are enough utxos for anchoring. + for i := 0; i < 2; i++ { + ctxt, _ = context.WithTimeout(context.Background(), defaultTimeout) + err = net.SendCoins(ctxt, btcutil.SatoshiPerBitcoin, alice) + if err != nil { + t.Fatalf("unable to send coins to Alice: %v", err) + } + + ctxt, _ = context.WithTimeout(context.Background(), defaultTimeout) + err = net.SendCoins(ctxt, btcutil.SatoshiPerBitcoin, bob) + if err != nil { + t.Fatalf("unable to send coins to Bob: %v", err) + } + } + + // We'll start the test by creating a channel between Alice and Bob, + // which will act as the first leg for out multi-hop HTLC. + const chanAmt = 1000000 + ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) + aliceChanPoint := openChannelAndAssert( + ctxt, t, net, alice, bob, + lntest.OpenChannelParams{ + Amt: chanAmt, + }, + ) + + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + err = alice.WaitForNetworkChannelOpen(ctxt, aliceChanPoint) + if err != nil { + t.Fatalf("alice didn't report channel: %v", err) + } + + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + err = bob.WaitForNetworkChannelOpen(ctxt, aliceChanPoint) + if err != nil { + t.Fatalf("bob didn't report channel: %v", err) + } + + // Next, we'll create a new node "carol" and have Bob connect to her. If + // the carolHodl flag is set, we'll make carol always hold onto the + // HTLC, this way it'll force Bob to go to chain to resolve the HTLC. + carolFlags := c.Args() + if carolHodl { + carolFlags = append(carolFlags, "--hodl.exit-settle") + } + carol, err := net.NewNode("Carol", carolFlags) + if err != nil { + t.Fatalf("unable to create new node: %v", err) + } + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + if err := net.ConnectNodes(ctxt, bob, carol); err != nil { + t.Fatalf("unable to connect bob to carol: %v", err) + } + + // Make sure Carol has enough utxos for anchoring. Because the anchor by + // itself often doesn't meet the dust limit, a utxo from the wallet + // needs to be attached as an additional input. This can still lead to a + // positively-yielding transaction. + for i := 0; i < 2; i++ { + ctxt, _ = context.WithTimeout(context.Background(), defaultTimeout) + err = net.SendCoins(ctxt, btcutil.SatoshiPerBitcoin, carol) + if err != nil { + t.Fatalf("unable to send coins to Alice: %v", err) + } + } + + // We'll then create a channel from Bob to Carol. After this channel is + // open, our topology looks like: A -> B -> C. + ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) + bobChanPoint := openChannelAndAssert( + ctxt, t, net, bob, carol, + lntest.OpenChannelParams{ + Amt: chanAmt, + }, + ) + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + err = bob.WaitForNetworkChannelOpen(ctxt, bobChanPoint) + if err != nil { + t.Fatalf("alice didn't report channel: %v", err) + } + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + err = carol.WaitForNetworkChannelOpen(ctxt, bobChanPoint) + if err != nil { + t.Fatalf("bob didn't report channel: %v", err) + } + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + err = alice.WaitForNetworkChannelOpen(ctxt, bobChanPoint) + if err != nil { + t.Fatalf("bob didn't report channel: %v", err) + } + + return aliceChanPoint, bobChanPoint, carol +} + +// assertAllTxesSpendFrom asserts that all txes in the list spend from the given +// tx. +func assertAllTxesSpendFrom(t *harnessTest, txes []*wire.MsgTx, + prevTxid chainhash.Hash) { + + for _, tx := range txes { + if tx.TxIn[0].PreviousOutPoint.Hash != prevTxid { + t.Fatalf("tx %v did not spend from %v", + tx.TxHash(), prevTxid) + } + } +} diff --git a/lntest/itest/lnd_send_multi_path_payment.go b/lntest/itest/lnd_send_multi_path_payment.go new file mode 100644 index 0000000000..227320001b --- /dev/null +++ b/lntest/itest/lnd_send_multi_path_payment.go @@ -0,0 +1,141 @@ +// +build rpctest + +package itest + +import ( + "context" + "encoding/hex" + + "github.com/btcsuite/btcutil" + "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/lnrpc/routerrpc" + "github.com/lightningnetwork/lnd/lntest" +) + +// testSendMultiPathPayment tests that we are able to successfully route a +// payment using multiple shards across different paths. +func testSendMultiPathPayment(net *lntest.NetworkHarness, t *harnessTest) { + ctxb := context.Background() + + ctx := newMppTestContext(t, net) + defer ctx.shutdownNodes() + + const paymentAmt = btcutil.Amount(300000) + + // Set up a network with three different paths Alice <-> Bob. Channel + // capacities are set such that the payment can only succeed if (at + // least) three paths are used. + // + // _ Eve _ + // / \ + // Alice -- Carol ---- Bob + // \ / + // \__ Dave ____/ + // + ctx.openChannel(ctx.carol, ctx.bob, 135000) + ctx.openChannel(ctx.alice, ctx.carol, 235000) + ctx.openChannel(ctx.dave, ctx.bob, 135000) + ctx.openChannel(ctx.alice, ctx.dave, 135000) + ctx.openChannel(ctx.eve, ctx.bob, 135000) + ctx.openChannel(ctx.carol, ctx.eve, 135000) + + defer ctx.closeChannels() + + ctx.waitForChannels() + + // Increase Dave's fee to make the test deterministic. Otherwise it + // would be unpredictable whether pathfinding would go through Charlie + // or Dave for the first shard. + _, err := ctx.dave.UpdateChannelPolicy( + context.Background(), + &lnrpc.PolicyUpdateRequest{ + Scope: &lnrpc.PolicyUpdateRequest_Global{Global: true}, + BaseFeeMsat: 500000, + FeeRate: 0.001, + TimeLockDelta: 40, + }, + ) + if err != nil { + t.Fatalf("dave policy update: %v", err) + } + // Our first test will be Alice paying Bob using a SendPayment call. + // Let Bob create an invoice for Alice to pay. + payReqs, rHashes, invoices, err := createPayReqs( + net.Bob, paymentAmt, 1, + ) + if err != nil { + t.Fatalf("unable to create pay reqs: %v", err) + } + + rHash := rHashes[0] + payReq := payReqs[0] + + payment := sendAndAssertSuccess( + t, net.Alice, + &routerrpc.SendPaymentRequest{ + PaymentRequest: payReq, + MaxParts: 10, + TimeoutSeconds: 60, + FeeLimitMsat: noFeeLimitMsat, + }, + ) + + // Make sure we got the preimage. + if payment.PaymentPreimage != hex.EncodeToString(invoices[0].RPreimage) { + t.Fatalf("preimage doesn't match") + } + + // Check that Alice split the payment in at least three shards. Because + // the hand-off of the htlc to the link is asynchronous (via a mailbox), + // there is some non-determinism in the process. Depending on whether + // the new pathfinding round is started before or after the htlc is + // locked into the channel, different sharding may occur. Therefore we + // can only check if the number of shards isn't below the theoretical + // minimum. + succeeded := 0 + for _, htlc := range payment.Htlcs { + if htlc.Status == lnrpc.HTLCAttempt_SUCCEEDED { + succeeded++ + } + } + + const minExpectedShards = 3 + if succeeded < minExpectedShards { + t.Fatalf("expected at least %v shards, but got %v", + minExpectedShards, succeeded) + } + + // Make sure Bob show the invoice as settled for the full + // amount. + ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) + inv, err := ctx.bob.LookupInvoice( + ctxt, &lnrpc.PaymentHash{ + RHash: rHash, + }, + ) + if err != nil { + t.Fatalf("error when obtaining invoice: %v", err) + } + + if inv.AmtPaidSat != int64(paymentAmt) { + t.Fatalf("incorrect payment amt for invoice"+ + "want: %d, got %d", + paymentAmt, inv.AmtPaidSat) + } + + if inv.State != lnrpc.Invoice_SETTLED { + t.Fatalf("Invoice not settled: %v", inv.State) + } + + settled := 0 + for _, htlc := range inv.Htlcs { + if htlc.State == lnrpc.InvoiceHTLCState_SETTLED { + settled++ + } + + } + if settled != succeeded { + t.Fatalf("expected invoice to be settled "+ + "with %v HTLCs, had %v", succeeded, settled) + } +} diff --git a/lntest/itest/lnd_single_hop_invoice_test.go b/lntest/itest/lnd_single_hop_invoice_test.go new file mode 100644 index 0000000000..fa907d307c --- /dev/null +++ b/lntest/itest/lnd_single_hop_invoice_test.go @@ -0,0 +1,177 @@ +// +build rpctest + +package itest + +import ( + "bytes" + "context" + "time" + + "github.com/btcsuite/btcutil" + "github.com/davecgh/go-spew/spew" + "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/lntest" + "github.com/lightningnetwork/lnd/lntest/wait" + "github.com/lightningnetwork/lnd/lntypes" + "github.com/lightningnetwork/lnd/record" +) + +func testSingleHopInvoice(net *lntest.NetworkHarness, t *harnessTest) { + ctxb := context.Background() + + // Open a channel with 100k satoshis between Alice and Bob with Alice being + // the sole funder of the channel. + ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) + chanAmt := btcutil.Amount(100000) + chanPoint := openChannelAndAssert( + ctxt, t, net, net.Alice, net.Bob, + lntest.OpenChannelParams{ + Amt: chanAmt, + }, + ) + + // Now that the channel is open, create an invoice for Bob which + // expects a payment of 1000 satoshis from Alice paid via a particular + // preimage. + const paymentAmt = 1000 + preimage := bytes.Repeat([]byte("A"), 32) + invoice := &lnrpc.Invoice{ + Memo: "testing", + RPreimage: preimage, + Value: paymentAmt, + } + ctxt, _ = context.WithTimeout(ctxt, defaultTimeout) + invoiceResp, err := net.Bob.AddInvoice(ctxb, invoice) + if err != nil { + t.Fatalf("unable to add invoice: %v", err) + } + + // Wait for Alice to recognize and advertise the new channel generated + // above. + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + err = net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint) + if err != nil { + t.Fatalf("alice didn't advertise channel before "+ + "timeout: %v", err) + } + err = net.Bob.WaitForNetworkChannelOpen(ctxt, chanPoint) + if err != nil { + t.Fatalf("bob didn't advertise channel before "+ + "timeout: %v", err) + } + + // With the invoice for Bob added, send a payment towards Alice paying + // to the above generated invoice. + sendReq := &lnrpc.SendRequest{ + PaymentRequest: invoiceResp.PaymentRequest, + } + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + resp, err := net.Alice.SendPaymentSync(ctxt, sendReq) + if err != nil { + t.Fatalf("unable to send payment: %v", err) + } + + // Ensure we obtain the proper preimage in the response. + if resp.PaymentError != "" { + t.Fatalf("error when attempting recv: %v", resp.PaymentError) + } else if !bytes.Equal(preimage, resp.PaymentPreimage) { + t.Fatalf("preimage mismatch: expected %v, got %v", preimage, + resp.GetPaymentPreimage()) + } + + // Bob's invoice should now be found and marked as settled. + payHash := &lnrpc.PaymentHash{ + RHash: invoiceResp.RHash, + } + ctxt, _ = context.WithTimeout(ctxt, defaultTimeout) + dbInvoice, err := net.Bob.LookupInvoice(ctxt, payHash) + if err != nil { + t.Fatalf("unable to lookup invoice: %v", err) + } + if !dbInvoice.Settled { + t.Fatalf("bob's invoice should be marked as settled: %v", + spew.Sdump(dbInvoice)) + } + + // With the payment completed all balance related stats should be + // properly updated. + err = wait.NoError( + assertAmountSent(paymentAmt, net.Alice, net.Bob), + 3*time.Second, + ) + if err != nil { + t.Fatalf(err.Error()) + } + + // Create another invoice for Bob, this time leaving off the preimage + // to one will be randomly generated. We'll test the proper + // encoding/decoding of the zpay32 payment requests. + invoice = &lnrpc.Invoice{ + Memo: "test3", + Value: paymentAmt, + } + ctxt, _ = context.WithTimeout(ctxt, defaultTimeout) + invoiceResp, err = net.Bob.AddInvoice(ctxt, invoice) + if err != nil { + t.Fatalf("unable to add invoice: %v", err) + } + + // Next send another payment, but this time using a zpay32 encoded + // invoice rather than manually specifying the payment details. + sendReq = &lnrpc.SendRequest{ + PaymentRequest: invoiceResp.PaymentRequest, + } + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + resp, err = net.Alice.SendPaymentSync(ctxt, sendReq) + if err != nil { + t.Fatalf("unable to send payment: %v", err) + } + if resp.PaymentError != "" { + t.Fatalf("error when attempting recv: %v", resp.PaymentError) + } + + // The second payment should also have succeeded, with the balances + // being update accordingly. + err = wait.NoError( + assertAmountSent(2*paymentAmt, net.Alice, net.Bob), + 3*time.Second, + ) + if err != nil { + t.Fatalf(err.Error()) + } + + // Next send a keysend payment. + keySendPreimage := lntypes.Preimage{3, 4, 5, 11} + keySendHash := keySendPreimage.Hash() + + sendReq = &lnrpc.SendRequest{ + Dest: net.Bob.PubKey[:], + Amt: paymentAmt, + FinalCltvDelta: 40, + PaymentHash: keySendHash[:], + DestCustomRecords: map[uint64][]byte{ + record.KeySendType: keySendPreimage[:], + }, + } + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + resp, err = net.Alice.SendPaymentSync(ctxt, sendReq) + if err != nil { + t.Fatalf("unable to send payment: %v", err) + } + if resp.PaymentError != "" { + t.Fatalf("error when attempting recv: %v", resp.PaymentError) + } + + // The keysend payment should also have succeeded, with the balances + // being update accordingly. + err = wait.NoError( + assertAmountSent(3*paymentAmt, net.Alice, net.Bob), + 3*time.Second, + ) + if err != nil { + t.Fatalf(err.Error()) + } + + ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) + closeChannelAndAssert(ctxt, t, net, net.Alice, chanPoint, false) +} diff --git a/lntest/itest/lnd_test.go b/lntest/itest/lnd_test.go index f96241798e..b5e1eade46 100644 --- a/lntest/itest/lnd_test.go +++ b/lntest/itest/lnd_test.go @@ -32,14 +32,18 @@ import ( "github.com/go-errors/errors" "github.com/lightningnetwork/lnd" "github.com/lightningnetwork/lnd/chanbackup" + "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lnrpc/invoicesrpc" "github.com/lightningnetwork/lnd/lnrpc/routerrpc" + "github.com/lightningnetwork/lnd/lnrpc/signrpc" "github.com/lightningnetwork/lnd/lnrpc/watchtowerrpc" "github.com/lightningnetwork/lnd/lnrpc/wtclientrpc" "github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lntypes" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/routing" ) @@ -55,6 +59,9 @@ const ( minerMempoolTimeout = lntest.MinerMempoolTimeout channelOpenTimeout = lntest.ChannelOpenTimeout channelCloseTimeout = lntest.ChannelCloseTimeout + itestLndBinary = "../../lnd-itest" + anchorSize = 330 + noFeeLimitMsat = math.MaxInt64 ) // harnessTest wraps a regular testing.T providing enhanced error detection @@ -106,7 +113,6 @@ func (h *harnessTest) Fatalf(format string, a ...interface{}) { // RunTestCase executes a harness test case. Any errors or panics will be // represented as fatal. func (h *harnessTest) RunTestCase(testCase *testCase) { - h.testCase = testCase defer func() { h.testCase = nil @@ -200,6 +206,31 @@ func mineBlocks(t *harnessTest, net *lntest.NetworkHarness, return blocks } +// openChannelStream blocks until an OpenChannel request for a channel funding +// by alice succeeds. If it does, a stream client is returned to receive events +// about the opening channel. +func openChannelStream(ctx context.Context, t *harnessTest, + net *lntest.NetworkHarness, alice, bob *lntest.HarnessNode, + p lntest.OpenChannelParams) lnrpc.Lightning_OpenChannelClient { + + t.t.Helper() + + // Wait until we are able to fund a channel successfully. This wait + // prevents us from erroring out when trying to create a channel while + // the node is starting up. + var chanOpenUpdate lnrpc.Lightning_OpenChannelClient + err := wait.NoError(func() error { + var err error + chanOpenUpdate, err = net.OpenChannel(ctx, alice, bob, p) + return err + }, defaultTimeout) + if err != nil { + t.Fatalf("unable to open channel: %v", err) + } + + return chanOpenUpdate +} + // openChannelAndAssert attempts to open a channel with the specified // parameters extended from Alice to Bob. Additionally, two items are asserted // after the channel is considered open: the funding transaction should be @@ -209,12 +240,9 @@ func openChannelAndAssert(ctx context.Context, t *harnessTest, net *lntest.NetworkHarness, alice, bob *lntest.HarnessNode, p lntest.OpenChannelParams) *lnrpc.ChannelPoint { - chanOpenUpdate, err := net.OpenChannel( - ctx, alice, bob, p, - ) - if err != nil { - t.Fatalf("unable to open channel: %v", err) - } + t.t.Helper() + + chanOpenUpdate := openChannelStream(ctx, t, net, alice, bob, p) // Mine 6 blocks, then wait for Alice's node to notify us that the // channel has been opened. The funding transaction should be found @@ -263,6 +291,13 @@ func closeChannelAndAssert(ctx context.Context, t *harnessTest, net *lntest.NetworkHarness, node *lntest.HarnessNode, fundingChanPoint *lnrpc.ChannelPoint, force bool) *chainhash.Hash { + return closeChannelAndAssertType(ctx, t, net, node, fundingChanPoint, false, force) +} + +func closeChannelAndAssertType(ctx context.Context, t *harnessTest, + net *lntest.NetworkHarness, node *lntest.HarnessNode, + fundingChanPoint *lnrpc.ChannelPoint, anchors, force bool) *chainhash.Hash { + // Fetch the current channel policy. If the channel is currently // enabled, we will register for graph notifications before closing to // assert that the node sends out a disabling update as a result of the @@ -296,7 +331,9 @@ func closeChannelAndAssert(ctx context.Context, t *harnessTest, ) } - return assertChannelClosed(ctx, t, net, node, fundingChanPoint, closeUpdates) + return assertChannelClosed( + ctx, t, net, node, fundingChanPoint, anchors, closeUpdates, + ) } // closeReorgedChannelAndAssert attempts to close a channel identified by the @@ -317,14 +354,16 @@ func closeReorgedChannelAndAssert(ctx context.Context, t *harnessTest, t.Fatalf("unable to close channel: %v", err) } - return assertChannelClosed(ctx, t, net, node, fundingChanPoint, closeUpdates) + return assertChannelClosed( + ctx, t, net, node, fundingChanPoint, false, closeUpdates, + ) } // assertChannelClosed asserts that the channel is properly cleaned up after // initiating a cooperative or local close. func assertChannelClosed(ctx context.Context, t *harnessTest, net *lntest.NetworkHarness, node *lntest.HarnessNode, - fundingChanPoint *lnrpc.ChannelPoint, + fundingChanPoint *lnrpc.ChannelPoint, anchors bool, closeUpdates lnrpc.Lightning_CloseChannelClient) *chainhash.Hash { txid, err := lnd.GetChanPointFundingTxid(fundingChanPoint) @@ -333,10 +372,33 @@ func assertChannelClosed(ctx context.Context, t *harnessTest, } chanPointStr := fmt.Sprintf("%v:%v", txid, fundingChanPoint.OutputIndex) + // If the channel appears in list channels, ensure that its state + // contains ChanStatusCoopBroadcasted. + ctxt, _ := context.WithTimeout(ctx, defaultTimeout) + listChansRequest := &lnrpc.ListChannelsRequest{} + listChansResp, err := node.ListChannels(ctxt, listChansRequest) + if err != nil { + t.Fatalf("unable to query for list channels: %v", err) + } + for _, channel := range listChansResp.Channels { + // Skip other channels. + if channel.ChannelPoint != chanPointStr { + continue + } + + // Assert that the channel is in coop broadcasted. + if !strings.Contains(channel.ChanStatusFlags, + channeldb.ChanStatusCoopBroadcasted.String()) { + t.Fatalf("channel not coop broadcasted, "+ + "got: %v", channel.ChanStatusFlags) + } + } + // At this point, the channel should now be marked as being in the // state of "waiting close". + ctxt, _ = context.WithTimeout(ctx, defaultTimeout) pendingChansRequest := &lnrpc.PendingChannelsRequest{} - pendingChanResp, err := node.PendingChannels(ctx, pendingChansRequest) + pendingChanResp, err := node.PendingChannels(ctxt, pendingChansRequest) if err != nil { t.Fatalf("unable to query for pending channels: %v", err) } @@ -353,8 +415,13 @@ func assertChannelClosed(ctx context.Context, t *harnessTest, // We'll now, generate a single block, wait for the final close status // update, then ensure that the closing transaction was included in the - // block. - block := mineBlocks(t, net, 1, 1)[0] + // block. If there are anchors, we also expect an anchor sweep. + expectedTxes := 1 + if anchors { + expectedTxes = 2 + } + + block := mineBlocks(t, net, 1, expectedTxes)[0] closingTxid, err := net.WaitForChannelClose(ctx, closeUpdates) if err != nil { @@ -574,22 +641,6 @@ func shutdownAndAssert(net *lntest.NetworkHarness, t *harnessTest, } } -// calcStaticFee calculates appropriate fees for commitment transactions. This -// function provides a simple way to allow test balance assertions to take fee -// calculations into account. -// -// TODO(bvu): Refactor when dynamic fee estimation is added. -// TODO(conner) remove code duplication -func calcStaticFee(numHTLCs int) btcutil.Amount { - const ( - commitWeight = btcutil.Amount(724) - htlcWeight = 172 - feePerKw = btcutil.Amount(50 * 1000 / 4) - ) - return feePerKw * (commitWeight + - btcutil.Amount(htlcWeight*numHTLCs)) / 1000 -} - // completePaymentRequests sends payments from a lightning node to complete all // payment requests. If the awaitResponse parameter is true, this function // does not return until all payments successfully complete without errors. @@ -765,7 +816,7 @@ func testOnchainFundRecovery(net *lntest.NetworkHarness, t *harnessTest) { // method takes the expected value of Carol's balance when using the // given recovery window. Additionally, the caller can specify an action // to perform on the restored node before the node is shutdown. - restoreCheckBalance := func(expAmount int64, expectedNumUTXOs int, + restoreCheckBalance := func(expAmount int64, expectedNumUTXOs uint32, recoveryWindow int32, fn func(*lntest.HarnessNode)) { // Restore Carol, passing in the password, mnemonic, and @@ -791,13 +842,7 @@ func testOnchainFundRecovery(net *lntest.NetworkHarness, t *harnessTest) { t.Fatalf("unable to query wallet balance: %v", err) } - - // Verify that Carol's balance matches our expected - // amount. currBalance = resp.ConfirmedBalance - if expAmount != currBalance { - return false - } utxoReq := &lnrpc.ListUnspentRequest{ MaxConfs: math.MaxInt32, @@ -807,8 +852,13 @@ func testOnchainFundRecovery(net *lntest.NetworkHarness, t *harnessTest) { if err != nil { t.Fatalf("unable to query utxos: %v", err) } + currNumUTXOs = uint32(len(utxoResp.Utxos)) - currNumUTXOs := len(utxoResp.Utxos) + // Verify that Carol's balance and number of UTXOs + // matches what's expected. + if expAmount != currBalance { + return false + } if currNumUTXOs != expectedNumUTXOs { return false } @@ -924,7 +974,155 @@ func testOnchainFundRecovery(net *lntest.NetworkHarness, t *harnessTest) { // Ensure that using a recovery window of 20 succeeds with all UTXOs // found and the final balance reflected. - restoreCheckBalance(6*btcutil.SatoshiPerBitcoin, 6, 20, nil) + + // After these checks are done, we'll want to make sure we can also + // recover change address outputs. This is mainly motivated by a now + // fixed bug in the wallet in which change addresses could at times be + // created outside of the default key scopes. Recovery only used to be + // performed on the default key scopes, so ideally this test case + // would've caught the bug earlier. Carol has received 6 BTC so far from + // the miner, we'll send 5 back to ensure all of her UTXOs get spent to + // avoid fee discrepancies and a change output is formed. + const minerAmt = 5 * btcutil.SatoshiPerBitcoin + const finalBalance = 6 * btcutil.SatoshiPerBitcoin + promptChangeAddr := func(node *lntest.HarnessNode) { + minerAddr, err := net.Miner.NewAddress() + if err != nil { + t.Fatalf("unable to create new miner address: %v", err) + } + ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) + resp, err := node.SendCoins(ctxt, &lnrpc.SendCoinsRequest{ + Addr: minerAddr.String(), + Amount: minerAmt, + }) + if err != nil { + t.Fatalf("unable to send coins to miner: %v", err) + } + txid, err := waitForTxInMempool( + net.Miner.Node, minerMempoolTimeout, + ) + if err != nil { + t.Fatalf("transaction not found in mempool: %v", err) + } + if resp.Txid != txid.String() { + t.Fatalf("txid mismatch: %v vs %v", resp.Txid, + txid.String()) + } + block := mineBlocks(t, net, 1, 1)[0] + assertTxInBlock(t, block, txid) + } + restoreCheckBalance(finalBalance, 6, 20, promptChangeAddr) + + // We should expect a static fee of 27750 satoshis for spending 6 inputs + // (3 P2WPKH, 3 NP2WPKH) to two P2WPKH outputs. Carol should therefore + // only have one UTXO present (the change output) of 6 - 5 - fee BTC. + const fee = 27750 + restoreCheckBalance(finalBalance-minerAmt-fee, 1, 21, nil) +} + +// commitType is a simple enum used to run though the basic funding flow with +// different commitment formats. +type commitType byte + +const ( + // commitTypeLegacy is the old school commitment type. + commitTypeLegacy commitType = iota + + // commiTypeTweakless is the commitment type where the remote key is + // static (non-tweaked). + commitTypeTweakless + + // commitTypeAnchors is the kind of commitment that has extra outputs + // used for anchoring down to commitment using CPFP. + commitTypeAnchors +) + +// String returns that name of the commitment type. +func (c commitType) String() string { + switch c { + case commitTypeLegacy: + return "legacy" + case commitTypeTweakless: + return "tweakless" + case commitTypeAnchors: + return "anchors" + default: + return "invalid" + } +} + +// Args returns the command line flag to supply to enable this commitment type. +func (c commitType) Args() []string { + switch c { + case commitTypeLegacy: + return []string{"--protocol.committweak"} + case commitTypeTweakless: + return []string{} + case commitTypeAnchors: + return []string{"--protocol.anchors"} + } + + return nil +} + +// calcStaticFee calculates appropriate fees for commitment transactions. This +// function provides a simple way to allow test balance assertions to take fee +// calculations into account. +func (c commitType) calcStaticFee(numHTLCs int) btcutil.Amount { + const htlcWeight = input.HTLCWeight + var ( + feePerKw = chainfee.SatPerKVByte(50000).FeePerKWeight() + commitWeight = input.CommitWeight + anchors = btcutil.Amount(0) + ) + + // The anchor commitment type is slightly heavier, and we must also add + // the value of the two anchors to the resulting fee the initiator + // pays. + if c == commitTypeAnchors { + commitWeight = input.AnchorCommitWeight + anchors = 2 * anchorSize + } + + return feePerKw.FeeForWeight(int64(commitWeight+htlcWeight*numHTLCs)) + + anchors +} + +// channelCommitType retrieves the active channel commitment type for the given +// chan point. +func channelCommitType(node *lntest.HarnessNode, + chanPoint *lnrpc.ChannelPoint) (commitType, error) { + + ctxb := context.Background() + ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) + + req := &lnrpc.ListChannelsRequest{} + channels, err := node.ListChannels(ctxt, req) + if err != nil { + return 0, fmt.Errorf("listchannels failed: %v", err) + } + + for _, c := range channels.Channels { + if c.ChannelPoint == txStr(chanPoint) { + switch c.CommitmentType { + + // If the anchor output size is non-zero, we are + // dealing with the anchor type. + case lnrpc.CommitmentType_ANCHORS: + return commitTypeAnchors, nil + + // StaticRemoteKey means it is tweakless, + case lnrpc.CommitmentType_STATIC_REMOTE_KEY: + return commitTypeTweakless, nil + + // Otherwise legacy. + default: + return commitTypeLegacy, nil + } + } + } + + return 0, fmt.Errorf("channel point %v not found", chanPoint) } // basicChannelFundingTest is a sub-test of the main testBasicChannelFunding @@ -932,8 +1130,8 @@ func testOnchainFundRecovery(net *lntest.NetworkHarness, t *harnessTest) { // then return a function closure that should be called to assert proper // channel closure. func basicChannelFundingTest(t *harnessTest, net *lntest.NetworkHarness, - alice *lntest.HarnessNode, - bob *lntest.HarnessNode) (*lnrpc.Channel, *lnrpc.Channel, func(), error) { + alice *lntest.HarnessNode, bob *lntest.HarnessNode, + fundingShim *lnrpc.FundingShim) (*lnrpc.Channel, *lnrpc.Channel, func(), error) { chanAmt := lnd.MaxBtcFundingAmount pushAmt := btcutil.Amount(100000) @@ -949,8 +1147,9 @@ func basicChannelFundingTest(t *harnessTest, net *lntest.NetworkHarness, chanPoint := openChannelAndAssert( ctxt, t, net, alice, bob, lntest.OpenChannelParams{ - Amt: chanAmt, - PushAmt: pushAmt, + Amt: chanAmt, + PushAmt: pushAmt, + FundingShim: fundingShim, }, ) @@ -966,6 +1165,12 @@ func basicChannelFundingTest(t *harnessTest, net *lntest.NetworkHarness, "channel: %v", err) } + cType, err := channelCommitType(alice, chanPoint) + if err != nil { + return nil, nil, nil, fmt.Errorf("unable to get channel "+ + "type: %v", err) + } + // With the channel open, ensure that the amount specified above has // properly been pushed to Bob. balReq := &lnrpc.ChannelBalanceRequest{} @@ -981,14 +1186,19 @@ func basicChannelFundingTest(t *harnessTest, net *lntest.NetworkHarness, return nil, nil, nil, fmt.Errorf("unable to get bobs's "+ "balance: %v", err) } - if aliceBal.Balance != int64(chanAmt-pushAmt-calcStaticFee(0)) { + + expBalanceAlice := chanAmt - pushAmt - cType.calcStaticFee(0) + aliceBalance := btcutil.Amount(aliceBal.Balance) + if aliceBalance != expBalanceAlice { return nil, nil, nil, fmt.Errorf("alice's balance is "+ "incorrect: expected %v got %v", - chanAmt-pushAmt-calcStaticFee(0), aliceBal) + expBalanceAlice, aliceBalance) } - if bobBal.Balance != int64(pushAmt) { + + bobBalance := btcutil.Amount(bobBal.Balance) + if bobBalance != pushAmt { return nil, nil, nil, fmt.Errorf("bob's balance is incorrect: "+ - "expected %v got %v", pushAmt, bobBal.Balance) + "expected %v got %v", pushAmt, bobBalance) } req := &lnrpc.ListChannelsRequest{} @@ -1022,19 +1232,24 @@ func testBasicChannelFunding(net *lntest.NetworkHarness, t *harnessTest) { ctxb := context.Background() + // Run through the test with combinations of all the different + // commitment types. + allTypes := []commitType{ + commitTypeLegacy, + commitTypeTweakless, + commitTypeAnchors, + } + test: // We'll test all possible combinations of the feature bit presence // that both nodes can signal for this new channel type. We'll make a // new Carol+Dave for each test instance as well. - for _, carolTweakless := range []bool{true, false} { - for _, daveTweakless := range []bool{true, false} { + for _, carolCommitType := range allTypes { + for _, daveCommitType := range allTypes { // Based on the current tweak variable for Carol, we'll // preferentially signal the legacy commitment format. // We do the same for Dave shortly below. - var carolArgs []string - if !carolTweakless { - carolArgs = []string{"--legacyprotocol.committweak"} - } + carolArgs := carolCommitType.Args() carol, err := net.NewNode("Carol", carolArgs) if err != nil { t.Fatalf("unable to create new node: %v", err) @@ -1048,10 +1263,7 @@ test: t.Fatalf("unable to send coins to carol: %v", err) } - var daveArgs []string - if !daveTweakless { - daveArgs = []string{"--legacyprotocol.committweak"} - } + daveArgs := daveCommitType.Args() dave, err := net.NewNode("Dave", daveArgs) if err != nil { t.Fatalf("unable to create new node: %v", err) @@ -1066,35 +1278,70 @@ test: t.Fatalf("unable to connect peers: %v", err) } - testName := fmt.Sprintf("carol_tweak=%v,dave_tweak=%v", - carolTweakless, daveTweakless) + testName := fmt.Sprintf("carol_commit=%v,dave_commit=%v", + carolCommitType, daveCommitType) ht := t success := t.t.Run(testName, func(t *testing.T) { carolChannel, daveChannel, closeChan, err := basicChannelFundingTest( - ht, net, carol, dave, + ht, net, carol, dave, nil, ) if err != nil { t.Fatalf("failed funding flow: %v", err) } - tweaklessSignalled := carolTweakless && daveTweakless - tweaklessChans := (carolChannel.StaticRemoteKey && - daveChannel.StaticRemoteKey) + // Both nodes should report the same commitment + // type. + chansCommitType := carolChannel.CommitmentType + if daveChannel.CommitmentType != chansCommitType { + t.Fatalf("commit types don't match, "+ + "carol got %v, dave got %v", + carolChannel.CommitmentType, + daveChannel.CommitmentType, + ) + } + + // Now check that the commitment type reported + // by both nodes is what we expect. It will be + // the minimum of the two nodes' preference, in + // the order Legacy, Tweakless, Anchors. + expType := carolCommitType + + switch daveCommitType { + + // Dave supports anchors, type will be what + // Carol supports. + case commitTypeAnchors: + + // Dave only supports tweakless, channel will + // be downgraded to this type if Carol supports + // anchors. + case commitTypeTweakless: + if expType == commitTypeAnchors { + expType = commitTypeTweakless + } + + // Dave only supoprts legacy type, channel will + // be downgraded to this type. + case commitTypeLegacy: + expType = commitTypeLegacy + + default: + t.Fatalf("invalid commit type %v", + daveCommitType) + } + + // Check that the signalled type matches what we + // expect. switch { - // If both sides signalled a tweakless channel, and the - // resulting channel doesn't reflect this, then this - // is a failed case. - case tweaklessSignalled && !tweaklessChans: - t.Fatalf("expected tweakless channnel, got " + - "non-tweaked channel") - - // If both sides didn't signal a tweakless - // channel, and the resulting channel is - // tweakless, and this is also a failed case. - case !tweaklessSignalled && tweaklessChans: - t.Fatalf("expected non-tweaked channel, got " + - "tweakless channel") + case expType == commitTypeAnchors && chansCommitType == lnrpc.CommitmentType_ANCHORS: + case expType == commitTypeTweakless && chansCommitType == lnrpc.CommitmentType_STATIC_REMOTE_KEY: + case expType == commitTypeLegacy && chansCommitType == lnrpc.CommitmentType_LEGACY: + + default: + t.Fatalf("expected nodes to signal "+ + "commit type %v, instead got "+ + "%v", expType, chansCommitType) } // As we've concluded this sub-test case we'll @@ -1165,23 +1412,20 @@ func testUnconfirmedChannelFunding(net *lntest.NetworkHarness, t *harnessTest) { // Now, we'll connect her to Alice so that they can open a channel // together. The funding flow should select Carol's unconfirmed output // as she doesn't have any other funds since it's a new node. + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) if err := net.ConnectNodes(ctxt, carol, net.Alice); err != nil { t.Fatalf("unable to connect dave to alice: %v", err) } - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanOpenUpdate, err := net.OpenChannel( - ctxt, carol, net.Alice, + + chanOpenUpdate := openChannelStream( + ctxt, t, net, carol, net.Alice, lntest.OpenChannelParams{ Amt: chanAmt, PushAmt: pushAmt, SpendUnconfirmed: true, }, ) - if err != nil { - t.Fatalf("unable to open channel between carol and alice: %v", - err) - } // Confirm the channel and wait for it to be recognized by both // parties. Two transactions should be mined, the unconfirmed spend and @@ -1193,6 +1437,11 @@ func testUnconfirmedChannelFunding(net *lntest.NetworkHarness, t *harnessTest) { t.Fatalf("error while waiting for channel open: %v", err) } + cType, err := channelCommitType(net.Alice, chanPoint) + if err != nil { + t.Fatalf("unable to get channel type: %v", err) + } + // With the channel open, we'll check the balances on each side of the // channel as a sanity check to ensure things worked out as intended. balReq := &lnrpc.ChannelBalanceRequest{} @@ -1206,9 +1455,9 @@ func testUnconfirmedChannelFunding(net *lntest.NetworkHarness, t *harnessTest) { if err != nil { t.Fatalf("unable to get alice's balance: %v", err) } - if carolBal.Balance != int64(chanAmt-pushAmt-calcStaticFee(0)) { + if carolBal.Balance != int64(chanAmt-pushAmt-cType.calcStaticFee(0)) { t.Fatalf("carol's balance is incorrect: expected %v got %v", - chanAmt-pushAmt-calcStaticFee(0), carolBal) + chanAmt-pushAmt-cType.calcStaticFee(0), carolBal) } if aliceBal.Balance != int64(pushAmt) { t.Fatalf("alice's balance is incorrect: expected %v got %v", @@ -1220,6 +1469,103 @@ func testUnconfirmedChannelFunding(net *lntest.NetworkHarness, t *harnessTest) { closeChannelAndAssert(ctxt, t, net, carol, chanPoint, false) } +// testPaymentFollowingChannelOpen tests that the channel transition from +// 'pending' to 'open' state does not cause any inconsistencies within other +// subsystems trying to udpate the channel state in the db. We follow this +// transition with a payment that updates the commitment state and verify that +// the pending state is up to date. +func testPaymentFollowingChannelOpen(net *lntest.NetworkHarness, t *harnessTest) { + ctxb := context.Background() + + const paymentAmt = btcutil.Amount(100) + channelCapacity := btcutil.Amount(paymentAmt * 1000) + + // We first establish a channel between Alice and Bob. + ctxt, cancel := context.WithTimeout(ctxb, channelOpenTimeout) + defer cancel() + pendingUpdate, err := net.OpenPendingChannel( + ctxt, net.Alice, net.Bob, channelCapacity, 0, + ) + if err != nil { + t.Fatalf("unable to open channel: %v", err) + } + + // At this point, the channel's funding transaction will have been + // broadcast, but not confirmed. Alice and Bob's nodes + // should reflect this when queried via RPC. + ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout) + defer cancel() + assertNumOpenChannelsPending(ctxt, t, net.Alice, net.Bob, 1) + + // We are restarting Bob's node to let the link be created for the + // pending channel. + if err := net.RestartNode(net.Bob, nil); err != nil { + t.Fatalf("Bob restart failed: %v", err) + } + + // We ensure that Bob reconnets to Alice. + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + if err := net.EnsureConnected(ctxt, net.Bob, net.Alice); err != nil { + t.Fatalf("peers unable to reconnect after restart: %v", err) + } + + // We mine one block for the channel to be confirmed. + _ = mineBlocks(t, net, 6, 1)[0] + + // We verify that the chanel is open from both nodes point of view. + ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout) + defer cancel() + assertNumOpenChannelsPending(ctxt, t, net.Alice, net.Bob, 0) + + // With the channel open, we'll create invoices for Bob that Alice will + // pay to in order to advance the state of the channel. + bobPayReqs, _, _, err := createPayReqs( + net.Bob, paymentAmt, 1, + ) + if err != nil { + t.Fatalf("unable to create pay reqs: %v", err) + } + + // Send payment to Bob so that a channel update to disk will be + // executed. + ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout) + defer cancel() + _, err = net.Alice.SendPaymentSync( + ctxt, + &lnrpc.SendRequest{ + PaymentRequest: bobPayReqs[0], + }, + ) + if err != nil { + t.Fatalf("unable to create payment stream for alice: %v", err) + } + + // At this point we want to make sure the channel is opened and not + // pending. + ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout) + defer cancel() + res, err := net.Bob.ListChannels(ctxt, &lnrpc.ListChannelsRequest{}) + if err != nil { + t.Fatalf("unable to list bob channels: %v", err) + } + if len(res.Channels) == 0 { + t.Fatalf("bob list of channels is empty") + } + + // Finally, immediately close the channel. This function will also + // block until the channel is closed and will additionally assert the + // relevant channel closing post conditions. + chanPoint := &lnrpc.ChannelPoint{ + FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{ + FundingTxidBytes: pendingUpdate.Txid, + }, + OutputIndex: pendingUpdate.OutputIndex, + } + ctxt, cancel = context.WithTimeout(ctxb, channelCloseTimeout) + defer cancel() + closeChannelAndAssert(ctxt, t, net, net.Alice, chanPoint, false) +} + // txStr returns the string representation of the channel's funding transaction. func txStr(chanPoint *lnrpc.ChannelPoint) string { fundingTxID, err := lnd.GetChanPointFundingTxid(chanPoint) @@ -1625,8 +1971,9 @@ func testUpdateChannelPolicy(net *lntest.NetworkHarness, t *harnessTest) { // Alice knows about the channel policy of Carol and should therefore // not be able to find a path during routing. + expErr := channeldb.FailureReasonNoRoute.Error() if err == nil || - !strings.Contains(err.Error(), "unable to find a path") { + !strings.Contains(err.Error(), expErr) { t.Fatalf("expected payment to fail, instead got %v", err) } @@ -2173,13 +2520,46 @@ func testOpenChannelAfterReorg(net *lntest.NetworkHarness, t *harnessTest) { closeReorgedChannelAndAssert(ctxt, t, net, net.Alice, chanPoint, false) } -// testDisconnectingTargetPeer performs a test which -// disconnects Alice-peer from Bob-peer and then re-connects them again +// testDisconnectingTargetPeer performs a test which disconnects Alice-peer from +// Bob-peer and then re-connects them again. We expect Alice to be able to +// disconnect at any point. func testDisconnectingTargetPeer(net *lntest.NetworkHarness, t *harnessTest) { ctxb := context.Background() + // We'll start both nodes with a high backoff so that they don't + // reconnect automatically during our test. + args := []string{ + "--minbackoff=1m", + "--maxbackoff=1m", + } + + alice, err := net.NewNode("Alice", args) + if err != nil { + t.Fatalf("unable to create new node: %v", err) + } + defer shutdownAndAssert(net, t, alice) + + bob, err := net.NewNode("Bob", args) + if err != nil { + t.Fatalf("unable to create new node: %v", err) + } + defer shutdownAndAssert(net, t, bob) + + // Start by connecting Alice and Bob with no channels. + ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) + if err := net.ConnectNodes(ctxt, alice, bob); err != nil { + t.Fatalf("unable to connect Alice's peer to Bob's: err %v", err) + } + // Check existing connection. - assertNumConnections(t, net.Alice, net.Bob, 1) + assertNumConnections(t, alice, bob, 1) + + // Give Alice some coins so she can fund a channel. + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + err = net.SendCoins(ctxt, btcutil.SatoshiPerBitcoin, alice) + if err != nil { + t.Fatalf("unable to send coins to carol: %v", err) + } chanAmt := lnd.MaxBtcFundingAmount pushAmt := btcutil.Amount(0) @@ -2187,30 +2567,31 @@ func testDisconnectingTargetPeer(net *lntest.NetworkHarness, t *harnessTest) { // Create a new channel that requires 1 confs before it's considered // open, then broadcast the funding transaction const numConfs = 1 - ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) - pendingUpdate, err := net.OpenPendingChannel(ctxt, net.Alice, net.Bob, - chanAmt, pushAmt) + ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) + pendingUpdate, err := net.OpenPendingChannel( + ctxt, alice, bob, chanAmt, pushAmt, + ) if err != nil { t.Fatalf("unable to open channel: %v", err) } - // At this point, the channel's funding transaction will have - // been broadcast, but not confirmed. Alice and Bob's nodes - // should reflect this when queried via RPC. + // At this point, the channel's funding transaction will have been + // broadcast, but not confirmed. Alice and Bob's nodes should reflect + // this when queried via RPC. ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - assertNumOpenChannelsPending(ctxt, t, net.Alice, net.Bob, 1) + assertNumOpenChannelsPending(ctxt, t, alice, bob, 1) - // Disconnect Alice-peer from Bob-peer and get error - // causes by one pending channel with detach node is existing. - if err := net.DisconnectNodes(ctxt, net.Alice, net.Bob); err == nil { + // Disconnect Alice-peer from Bob-peer and get error causes by one + // pending channel with detach node is existing. + if err := net.DisconnectNodes(ctxt, alice, bob); err != nil { t.Fatalf("Bob's peer was disconnected from Alice's"+ " while one pending channel is existing: err %v", err) } time.Sleep(time.Millisecond * 300) - // Check existing connection. - assertNumConnections(t, net.Alice, net.Bob, 1) + // Assert that the connection was torn down. + assertNumConnections(t, alice, bob, 0) fundingTxID, err := chainhash.NewHash(pendingUpdate.Txid) if err != nil { @@ -2224,15 +2605,21 @@ func testDisconnectingTargetPeer(net *lntest.NetworkHarness, t *harnessTest) { block := mineBlocks(t, net, numConfs, 1)[0] assertTxInBlock(t, block, fundingTxID) - // At this point, the channel should be fully opened and there should - // be no pending channels remaining for either node. + // At this point, the channel should be fully opened and there should be + // no pending channels remaining for either node. time.Sleep(time.Millisecond * 300) ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - assertNumOpenChannelsPending(ctxt, t, net.Alice, net.Bob, 0) + assertNumOpenChannelsPending(ctxt, t, alice, bob, 0) - // The channel should be listed in the peer information returned by - // both peers. + // Reconnect the nodes so that the channel can become active. + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + if err := net.ConnectNodes(ctxt, alice, bob); err != nil { + t.Fatalf("unable to connect Alice's peer to Bob's: err %v", err) + } + + // The channel should be listed in the peer information returned by both + // peers. outPoint := wire.OutPoint{ Hash: *fundingTxID, Index: pendingUpdate.OutputIndex, @@ -2240,17 +2627,33 @@ func testDisconnectingTargetPeer(net *lntest.NetworkHarness, t *harnessTest) { // Check both nodes to ensure that the channel is ready for operation. ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.AssertChannelExists(ctxt, net.Alice, &outPoint); err != nil { + if err := net.AssertChannelExists(ctxt, alice, &outPoint); err != nil { t.Fatalf("unable to assert channel existence: %v", err) } ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.AssertChannelExists(ctxt, net.Bob, &outPoint); err != nil { + if err := net.AssertChannelExists(ctxt, bob, &outPoint); err != nil { t.Fatalf("unable to assert channel existence: %v", err) } - // Finally, immediately close the channel. This function will also - // block until the channel is closed and will additionally assert the - // relevant channel closing post conditions. + // Disconnect Alice-peer from Bob-peer and get error causes by one + // active channel with detach node is existing. + if err := net.DisconnectNodes(ctxt, alice, bob); err != nil { + t.Fatalf("Bob's peer was disconnected from Alice's"+ + " while one active channel is existing: err %v", err) + } + + // Check existing connection. + assertNumConnections(t, alice, bob, 0) + + // Reconnect both nodes before force closing the channel. + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + if err := net.ConnectNodes(ctxt, alice, bob); err != nil { + t.Fatalf("unable to connect Alice's peer to Bob's: err %v", err) + } + + // Finally, immediately close the channel. This function will also block + // until the channel is closed and will additionally assert the relevant + // channel closing post conditions. chanPoint := &lnrpc.ChannelPoint{ FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{ FundingTxidBytes: pendingUpdate.Txid, @@ -2258,48 +2661,30 @@ func testDisconnectingTargetPeer(net *lntest.NetworkHarness, t *harnessTest) { OutputIndex: pendingUpdate.OutputIndex, } - // Disconnect Alice-peer from Bob-peer and get error - // causes by one active channel with detach node is existing. - if err := net.DisconnectNodes(ctxt, net.Alice, net.Bob); err == nil { - t.Fatalf("Bob's peer was disconnected from Alice's"+ - " while one active channel is existing: err %v", err) - } + ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) + closeChannelAndAssert(ctxt, t, net, alice, chanPoint, true) - // Check existing connection. - assertNumConnections(t, net.Alice, net.Bob, 1) - - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, chanPoint, true) - - // Disconnect Alice-peer from Bob-peer without getting error - // about existing channels. - var predErr error - err = wait.Predicate(func() bool { - if err := net.DisconnectNodes(ctxt, net.Alice, net.Bob); err != nil { - predErr = err - return false - } - return true - }, time.Second*15) - if err != nil { + // Disconnect Alice-peer from Bob-peer without getting error about + // existing channels. + if err := net.DisconnectNodes(ctxt, alice, bob); err != nil { t.Fatalf("unable to disconnect Bob's peer from Alice's: err %v", - predErr) + err) } // Check zero peer connections. - assertNumConnections(t, net.Alice, net.Bob, 0) + assertNumConnections(t, alice, bob, 0) // Finally, re-connect both nodes. ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, net.Alice, net.Bob); err != nil { + if err := net.ConnectNodes(ctxt, alice, bob); err != nil { t.Fatalf("unable to connect Alice's peer to Bob's: err %v", err) } // Check existing connection. - assertNumConnections(t, net.Alice, net.Bob, 1) + assertNumConnections(t, alice, net.Bob, 1) // Cleanup by mining the force close and sweep transaction. - cleanupForceClose(t, net, net.Alice, chanPoint) + cleanupForceClose(t, net, alice, chanPoint) } // testFundingPersistence is intended to ensure that the Funding Manager @@ -2491,9 +2876,14 @@ func testChannelBalance(net *lntest.NetworkHarness, t *harnessTest) { "timeout: %v", err) } + cType, err := channelCommitType(net.Alice, chanPoint) + if err != nil { + t.Fatalf("unable to get channel type: %v", err) + } + // As this is a single funder channel, Alice's balance should be // exactly 0.5 BTC since now state transitions have taken place yet. - checkChannelBalance(net.Alice, amount-calcStaticFee(0)) + checkChannelBalance(net.Alice, amount-cType.calcStaticFee(0)) // Ensure Bob currently has no available balance within the channel. checkChannelBalance(net.Bob, 0) @@ -2759,13 +3149,80 @@ func padCLTV(cltv uint32) uint32 { // total of 3 + n transactions will be broadcast, representing the commitment // transaction, a transaction sweeping the local CSV delayed output, a // transaction sweeping the CSV delayed 2nd-layer htlcs outputs, and n -// htlc success transactions, where n is the number of payments Alice attempted +// htlc timeout transactions, where n is the number of payments Alice attempted // to send to Carol. This test includes several restarts to ensure that the // transaction output states are persisted throughout the forced closure // process. // // TODO(roasbeef): also add an unsettled HTLC before force closing. func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { + // We'll test the scenario for some of the commitment types, to ensure + // outputs can be swept. + commitTypes := []commitType{ + commitTypeLegacy, + commitTypeAnchors, + } + + for _, channelType := range commitTypes { + testName := fmt.Sprintf("committype=%v", channelType) + + success := t.t.Run(testName, func(t *testing.T) { + ht := newHarnessTest(t, net) + + args := channelType.Args() + alice, err := net.NewNode("Alice", args) + if err != nil { + t.Fatalf("unable to create new node: %v", err) + } + defer shutdownAndAssert(net, ht, alice) + + // Since we'd like to test failure scenarios with + // outstanding htlcs, we'll introduce another node into + // our test network: Carol. + carolArgs := []string{"--hodl.exit-settle"} + carolArgs = append(carolArgs, args...) + carol, err := net.NewNode("Carol", carolArgs) + if err != nil { + t.Fatalf("unable to create new nodes: %v", err) + } + defer shutdownAndAssert(net, ht, carol) + + // Each time, we'll send Alice new set of coins in + // order to fund the channel. + ctxt, _ := context.WithTimeout( + context.Background(), defaultTimeout, + ) + err = net.SendCoins( + ctxt, btcutil.SatoshiPerBitcoin, alice, + ) + if err != nil { + t.Fatalf("unable to send coins to Alice: %v", + err) + } + + // Also give Carol some coins to allow her to sweep her + // anchor. + err = net.SendCoins( + ctxt, btcutil.SatoshiPerBitcoin, carol, + ) + if err != nil { + t.Fatalf("unable to send coins to Alice: %v", + err) + } + + channelForceClosureTest( + net, ht, alice, carol, channelType, + ) + }) + if !success { + return + } + } +} + +func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, + alice, carol *lntest.HarnessNode, channelType commitType) { + ctxb := context.Background() const ( @@ -2779,18 +3236,10 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // instead, or make delay a param defaultCLTV := uint32(lnd.DefaultBitcoinTimeLockDelta) - // Since we'd like to test failure scenarios with outstanding htlcs, - // we'll introduce another node into our test network: Carol. - carol, err := net.NewNode("Carol", []string{"--hodl.exit-settle"}) - if err != nil { - t.Fatalf("unable to create new nodes: %v", err) - } - defer shutdownAndAssert(net, t, carol) - // We must let Alice have an open channel before she can send a node // announcement, so we open a channel with Carol, ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, net.Alice, carol); err != nil { + if err := net.ConnectNodes(ctxt, alice, carol); err != nil { t.Fatalf("unable to connect alice to carol: %v", err) } @@ -2808,7 +3257,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) chanPoint := openChannelAndAssert( - ctxt, t, net, net.Alice, carol, + ctxt, t, net, alice, carol, lntest.OpenChannelParams{ Amt: chanAmt, PushAmt: pushAmt, @@ -2818,7 +3267,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // Wait for Alice and Carol to receive the channel edge from the // funding manager. ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint) + err = alice.WaitForNetworkChannelOpen(ctxt, chanPoint) if err != nil { t.Fatalf("alice didn't see the alice->carol channel before "+ "timeout: %v", err) @@ -2835,7 +3284,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { ctx, cancel := context.WithCancel(ctxb) defer cancel() - alicePayStream, err := net.Alice.SendPayment(ctx) + alicePayStream, err := alice.SendPayment(ctx) if err != nil { t.Fatalf("unable to create payment stream for alice: %v", err) } @@ -2855,7 +3304,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // Once the HTLC has cleared, all the nodes n our mini network should // show that the HTLC has been locked in. - nodes := []*lntest.HarnessNode{net.Alice, carol} + nodes := []*lntest.HarnessNode{alice, carol} var predErr error err = wait.Predicate(func() bool { predErr = assertNumActiveHtlcs(nodes, numInvoices) @@ -2885,7 +3334,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { ) ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - aliceChan, err := getChanInfo(ctxt, net.Alice) + aliceChan, err := getChanInfo(ctxt, alice) if err != nil { t.Fatalf("unable to get alice's channel info: %v", err) } @@ -2898,7 +3347,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // the commitment transaction was immediately broadcast in order to // fulfill the force closure request. ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - _, closingTxID, err := net.CloseChannel(ctxt, net.Alice, chanPoint, true) + _, closingTxID, err := net.CloseChannel(ctxt, alice, chanPoint, true) if err != nil { t.Fatalf("unable to execute force channel closure: %v", err) } @@ -2907,7 +3356,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // PendingChannels RPC under the waiting close section. pendingChansRequest := &lnrpc.PendingChannelsRequest{} ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Alice.PendingChannels(ctxt, pendingChansRequest) + pendingChanResp, err := alice.PendingChannels(ctxt, pendingChansRequest) if err != nil { t.Fatalf("unable to query for pending channels: %v", err) } @@ -2943,13 +3392,21 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // when the system comes back on line. This restart tests state // persistence at the beginning of the process, when the commitment // transaction has been broadcast but not yet confirmed in a block. - if err := net.RestartNode(net.Alice, nil); err != nil { + if err := net.RestartNode(alice, nil); err != nil { t.Fatalf("Node restart failed: %v", err) } // Mine a block which should confirm the commitment transaction - // broadcast as a result of the force closure. - _, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) + // broadcast as a result of the force closure. If there are anchors, we + // also expect the anchor sweep tx to be in the mempool. + expectedTxes := 1 + if channelType == commitTypeAnchors { + expectedTxes = 2 + } + + _, err = waitForNTxsInMempool( + net.Miner.Node, expectedTxes, minerMempoolTimeout, + ) if err != nil { t.Fatalf("failed to find commitment in miner mempool: %v", err) } @@ -2960,52 +3417,52 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // Now that the commitment has been confirmed, the channel should be // marked as force closed. - err = wait.Predicate(func() bool { + err = wait.NoError(func() error { ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Alice.PendingChannels( + pendingChanResp, err := alice.PendingChannels( ctxt, pendingChansRequest, ) if err != nil { - predErr = fmt.Errorf("unable to query for pending "+ + return fmt.Errorf("unable to query for pending "+ "channels: %v", err) - return false } - predErr = checkNumForceClosedChannels(pendingChanResp, 1) - if predErr != nil { - return false + err = checkNumForceClosedChannels(pendingChanResp, 1) + if err != nil { + return err } - forceClose, predErr := findForceClosedChannel( - pendingChanResp, &op, - ) - if predErr != nil { - return false + forceClose, err := findForceClosedChannel(pendingChanResp, &op) + if err != nil { + return err } // Now that the channel has been force closed, it should now // have the height and number of blocks to confirm populated. - predErr = checkCommitmentMaturity( + err = checkCommitmentMaturity( forceClose, commCsvMaturityHeight, int32(defaultCSV), ) - if predErr != nil { - return false + if err != nil { + return err } // None of our outputs have been swept, so they should all be in - // limbo. + // limbo. For anchors, we expect the anchor amount to be + // recovered. if forceClose.LimboBalance == 0 { - predErr = errors.New("all funds should still be in " + + return errors.New("all funds should still be in " + "limbo") - return false } - if forceClose.RecoveredBalance != 0 { - predErr = errors.New("no funds should yet be shown " + + expectedRecoveredBalance := int64(0) + if channelType == commitTypeAnchors { + expectedRecoveredBalance = anchorSize + } + if forceClose.RecoveredBalance != expectedRecoveredBalance { + return errors.New("no funds should yet be shown " + "as recovered") - return false } - return true + return nil }, 15*time.Second) if err != nil { t.Fatalf(predErr.Error()) @@ -3015,30 +3472,33 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // force close commitment transaction have been persisted once the // transaction has been confirmed, but before the outputs are spendable // (the "kindergarten" bucket.) - if err := net.RestartNode(net.Alice, nil); err != nil { + if err := net.RestartNode(alice, nil); err != nil { t.Fatalf("Node restart failed: %v", err) } // Carol's sweep tx should be in the mempool already, as her output is - // not timelocked. - _, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) + // not timelocked. If there are anchors, we also expect Carol's anchor + // sweep now. + _, err = waitForNTxsInMempool( + net.Miner.Node, expectedTxes, minerMempoolTimeout, + ) if err != nil { t.Fatalf("failed to find Carol's sweep in miner mempool: %v", err) } // Currently within the codebase, the default CSV is 4 relative blocks. - // For the persistence test, we generate three blocks, then trigger + // For the persistence test, we generate two blocks, then trigger // a restart and then generate the final block that should trigger // the creation of the sweep transaction. - if _, err := net.Miner.Node.Generate(defaultCSV - 1); err != nil { + if _, err := net.Miner.Node.Generate(defaultCSV - 2); err != nil { t.Fatalf("unable to mine blocks: %v", err) } // The following restart checks to ensure that outputs in the // kindergarten bucket are persisted while waiting for the required // number of confirmations to be reported. - if err := net.RestartNode(net.Alice, nil); err != nil { + if err := net.RestartNode(alice, nil); err != nil { t.Fatalf("Node restart failed: %v", err) } @@ -3046,7 +3506,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // channels with her funds still in limbo. err = wait.NoError(func() error { ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Alice.PendingChannels( + pendingChanResp, err := alice.PendingChannels( ctxt, pendingChansRequest, ) if err != nil { @@ -3067,12 +3527,12 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { } // At this point, the nursery should show that the commitment - // output has 1 block left before its CSV delay expires. In + // output has 2 block left before its CSV delay expires. In // total, we have mined exactly defaultCSV blocks, so the htlc // outputs should also reflect that this many blocks have // passed. err = checkCommitmentMaturity( - forceClose, commCsvMaturityHeight, 1, + forceClose, commCsvMaturityHeight, 2, ) if err != nil { return err @@ -3083,7 +3543,11 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { return errors.New("all funds should still be in " + "limbo") } - if forceClose.RecoveredBalance != 0 { + expectedRecoveredBalance := int64(0) + if channelType == commitTypeAnchors { + expectedRecoveredBalance = anchorSize + } + if forceClose.RecoveredBalance != expectedRecoveredBalance { return errors.New("no funds should yet be shown " + "as recovered") } @@ -3100,9 +3564,9 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { t.Fatalf("unable to mine blocks: %v", err) } - // At this point, the sweeping transaction should now be broadcast. So - // we fetch the node's mempool to ensure it has been properly - // broadcast. + // At this point, the CSV will expire in the next block, meaning that + // the sweeping transaction should now be broadcast. So we fetch the + // node's mempool to ensure it has been properly broadcast. sweepingTXID, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) if err != nil { t.Fatalf("failed to get sweep tx from mempool: %v", err) @@ -3124,7 +3588,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // Restart Alice to ensure that she resumes watching the finalized // commitment sweep txid. - if err := net.RestartNode(net.Alice, nil); err != nil { + if err := net.RestartNode(alice, nil); err != nil { t.Fatalf("Node restart failed: %v", err) } @@ -3152,7 +3616,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // Now that the commit output has been fully swept, check to see // that the channel remains open for the pending htlc outputs. ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Alice.PendingChannels( + pendingChanResp, err := alice.PendingChannels( ctxt, pendingChansRequest, ) if err != nil { @@ -3203,7 +3667,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // of blocks we have generated since adding it to the nursery, and take // an additional block off so that we end up one block shy of the expiry // height, and add the block padding. - cltvHeightDelta := padCLTV(defaultCLTV - defaultCSV - 2 - 1) + cltvHeightDelta := padCLTV(defaultCLTV - defaultCSV - 1 - 1) // Advance the blockchain until just before the CLTV expires, nothing // exciting should have happened during this time. @@ -3215,7 +3679,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // We now restart Alice, to ensure that she will broadcast the presigned // htlc timeout txns after the delay expires after experiencing a while // waiting for the htlc outputs to incubate. - if err := net.RestartNode(net.Alice, nil); err != nil { + if err := net.RestartNode(alice, nil); err != nil { t.Fatalf("Node restart failed: %v", err) } @@ -3223,7 +3687,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // channels with one pending HTLC. err = wait.NoError(func() error { ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Alice.PendingChannels( + pendingChanResp, err := alice.PendingChannels( ctxt, pendingChansRequest, ) if err != nil { @@ -3316,7 +3780,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // With the htlc timeout txns still in the mempool, we restart Alice to // verify that she can resume watching the htlc txns she broadcasted // before crashing. - if err := net.RestartNode(net.Alice, nil); err != nil { + if err := net.RestartNode(alice, nil); err != nil { t.Fatalf("Node restart failed: %v", err) } @@ -3330,7 +3794,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // Alice is restarted here to ensure that she promptly moved the crib // outputs to the kindergarten bucket after the htlc timeout txns were // confirmed. - if err := net.RestartNode(net.Alice, nil); err != nil { + if err := net.RestartNode(alice, nil); err != nil { t.Fatalf("Node restart failed: %v", err) } @@ -3342,7 +3806,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // Restart Alice to ensure that she can recover from a failure before // having graduated the htlc outputs in the kindergarten bucket. - if err := net.RestartNode(net.Alice, nil); err != nil { + if err := net.RestartNode(alice, nil); err != nil { t.Fatalf("Node restart failed: %v", err) } @@ -3351,7 +3815,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // as pending force closed. err = wait.Predicate(func() bool { ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err = net.Alice.PendingChannels( + pendingChanResp, err = alice.PendingChannels( ctxt, pendingChansRequest, ) if err != nil { @@ -3443,7 +3907,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // The following restart checks to ensure that the nursery store is // storing the txid of the previously broadcast htlc sweep txn, and that // it begins watching that txid after restarting. - if err := net.RestartNode(net.Alice, nil); err != nil { + if err := net.RestartNode(alice, nil); err != nil { t.Fatalf("Node restart failed: %v", err) } @@ -3452,7 +3916,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // as pending force closed. err = wait.Predicate(func() bool { ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Alice.PendingChannels( + pendingChanResp, err := alice.PendingChannels( ctxt, pendingChansRequest, ) if err != nil { @@ -3501,7 +3965,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // up within the pending channels RPC. err = wait.Predicate(func() bool { ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Alice.PendingChannels( + pendingChanResp, err := alice.PendingChannels( ctxt, pendingChansRequest, ) if err != nil { @@ -3546,6 +4010,83 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { } } +// assertAmountSent generates a closure which queries listchannels for sndr and +// rcvr, and asserts that sndr sent amt satoshis, and that rcvr received amt +// satoshis. +// +// NOTE: This method assumes that each node only has one channel, and it is the +// channel used to send the payment. +func assertAmountSent(amt btcutil.Amount, sndr, rcvr *lntest.HarnessNode) func() error { + return func() error { + // Both channels should also have properly accounted from the + // amount that has been sent/received over the channel. + listReq := &lnrpc.ListChannelsRequest{} + ctxb := context.Background() + ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) + sndrListChannels, err := sndr.ListChannels(ctxt, listReq) + if err != nil { + return fmt.Errorf("unable to query for %s's channel "+ + "list: %v", sndr.Name(), err) + } + sndrSatoshisSent := sndrListChannels.Channels[0].TotalSatoshisSent + if sndrSatoshisSent != int64(amt) { + return fmt.Errorf("%s's satoshis sent is incorrect "+ + "got %v, expected %v", sndr.Name(), + sndrSatoshisSent, amt) + } + + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + rcvrListChannels, err := rcvr.ListChannels(ctxt, listReq) + if err != nil { + return fmt.Errorf("unable to query for %s's channel "+ + "list: %v", rcvr.Name(), err) + } + rcvrSatoshisReceived := rcvrListChannels.Channels[0].TotalSatoshisReceived + if rcvrSatoshisReceived != int64(amt) { + return fmt.Errorf("%s's satoshis received is "+ + "incorrect got %v, expected %v", rcvr.Name(), + rcvrSatoshisReceived, amt) + } + + return nil + } +} + +// assertLastHTLCError checks that the last sent HTLC of the last payment sent +// by the given node failed with the expected failure code. +func assertLastHTLCError(t *harnessTest, node *lntest.HarnessNode, + code lnrpc.Failure_FailureCode) { + + req := &lnrpc.ListPaymentsRequest{ + IncludeIncomplete: true, + } + ctxt, _ := context.WithTimeout(context.Background(), defaultTimeout) + paymentsResp, err := node.ListPayments(ctxt, req) + if err != nil { + t.Fatalf("error when obtaining payments: %v", err) + } + + payments := paymentsResp.Payments + if len(payments) == 0 { + t.Fatalf("no payments found") + } + + payment := payments[len(payments)-1] + htlcs := payment.Htlcs + if len(htlcs) == 0 { + t.Fatalf("no htlcs") + } + + htlc := htlcs[len(htlcs)-1] + if htlc.Failure == nil { + t.Fatalf("expected failure") + } + + if htlc.Failure.Code != code { + t.Fatalf("expected failure %v, got %v", code, htlc.Failure.Code) + } +} + // testSphinxReplayPersistence verifies that replayed onion packets are rejected // by a remote peer after a restart. We use a combination of unsafe // configuration arguments to force Carol to replay the same sphinx packet after @@ -3570,9 +4111,8 @@ func testSphinxReplayPersistence(net *lntest.NetworkHarness, t *harnessTest) { defer shutdownAndAssert(net, t, dave) // Next, we'll create Carol and establish a channel to from her to - // Dave. Carol is started in both unsafe-replay and unsafe-disconnect, - // which will cause her to replay any pending Adds held in memory upon - // reconnection. + // Dave. Carol is started in both unsafe-replay which will cause her to + // replay any pending Adds held in memory upon reconnection. carol, err := net.NewNode("Carol", []string{"--unsafe-replay"}) if err != nil { t.Fatalf("unable to create new nodes: %v", err) @@ -3596,33 +4136,6 @@ func testSphinxReplayPersistence(net *lntest.NetworkHarness, t *harnessTest) { }, ) - assertAmountSent := func(amt btcutil.Amount) { - // Both channels should also have properly accounted from the - // amount that has been sent/received over the channel. - listReq := &lnrpc.ListChannelsRequest{} - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - carolListChannels, err := carol.ListChannels(ctxt, listReq) - if err != nil { - t.Fatalf("unable to query for alice's channel list: %v", err) - } - carolSatoshisSent := carolListChannels.Channels[0].TotalSatoshisSent - if carolSatoshisSent != int64(amt) { - t.Fatalf("Carol's satoshis sent is incorrect got %v, expected %v", - carolSatoshisSent, amt) - } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - daveListChannels, err := dave.ListChannels(ctxt, listReq) - if err != nil { - t.Fatalf("unable to query for Dave's channel list: %v", err) - } - daveSatoshisReceived := daveListChannels.Channels[0].TotalSatoshisReceived - if daveSatoshisReceived != int64(amt) { - t.Fatalf("Dave's satoshis received is incorrect got %v, expected %v", - daveSatoshisReceived, amt) - } - } - // Now that the channel is open, create an invoice for Dave which // expects a payment of 1000 satoshis from Carol paid via a particular // preimage. @@ -3687,8 +4200,12 @@ func testSphinxReplayPersistence(net *lntest.NetworkHarness, t *harnessTest) { // With the payment sent but hedl, all balance related stats should not // have changed. - time.Sleep(time.Millisecond * 200) - assertAmountSent(0) + err = wait.InvariantNoError( + assertAmountSent(0, carol, dave), 3*time.Second, + ) + if err != nil { + t.Fatalf(err.Error()) + } // With the first payment sent, restart dave to make sure he is // persisting the information required to detect replayed sphinx @@ -3709,15 +4226,19 @@ func testSphinxReplayPersistence(net *lntest.NetworkHarness, t *harnessTest) { // Construct the response we expect after sending a duplicate packet // that fails due to sphinx replay detection. - replayErr := "InvalidOnionKey" - if !strings.Contains(resp.PaymentError, replayErr) { - t.Fatalf("received payment error: %v, expected %v", - resp.PaymentError, replayErr) + if resp.PaymentError == "" { + t.Fatalf("expected payment error") } + assertLastHTLCError(t, carol, lnrpc.Failure_INVALID_ONION_KEY) // Since the payment failed, the balance should still be left // unaltered. - assertAmountSent(0) + err = wait.InvariantNoError( + assertAmountSent(0, carol, dave), 3*time.Second, + ) + if err != nil { + t.Fatalf(err.Error()) + } ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) closeChannelAndAssert(ctxt, t, net, carol, chanPoint, true) @@ -3726,13 +4247,33 @@ func testSphinxReplayPersistence(net *lntest.NetworkHarness, t *harnessTest) { cleanupForceClose(t, net, carol, chanPoint) } -func testSingleHopInvoice(net *lntest.NetworkHarness, t *harnessTest) { +func testListPayments(net *lntest.NetworkHarness, t *harnessTest) { ctxb := context.Background() - // Open a channel with 100k satoshis between Alice and Bob with Alice being - // the sole funder of the channel. - ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) + // First start by deleting all payments that Alice knows of. This will + // allow us to execute the test with a clean state for Alice. + delPaymentsReq := &lnrpc.DeleteAllPaymentsRequest{} + ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) + if _, err := net.Alice.DeleteAllPayments(ctxt, delPaymentsReq); err != nil { + t.Fatalf("unable to delete payments: %v", err) + } + + // Check that there are no payments before test. + reqInit := &lnrpc.ListPaymentsRequest{} + ctxt, _ = context.WithTimeout(ctxt, defaultTimeout) + paymentsRespInit, err := net.Alice.ListPayments(ctxt, reqInit) + if err != nil { + t.Fatalf("error when obtaining Alice payments: %v", err) + } + if len(paymentsRespInit.Payments) != 0 { + t.Fatalf("incorrect number of payments, got %v, want %v", + len(paymentsRespInit.Payments), 0) + } + + // Open a channel with 100k satoshis between Alice and Bob with Alice + // being the sole funder of the channel. chanAmt := btcutil.Amount(100000) + ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) chanPoint := openChannelAndAssert( ctxt, t, net, net.Alice, net.Bob, lntest.OpenChannelParams{ @@ -3740,45 +4281,18 @@ func testSingleHopInvoice(net *lntest.NetworkHarness, t *harnessTest) { }, ) - assertAmountSent := func(amt btcutil.Amount) { - // Both channels should also have properly accounted from the - // amount that has been sent/received over the channel. - listReq := &lnrpc.ListChannelsRequest{} - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - aliceListChannels, err := net.Alice.ListChannels(ctxt, listReq) - if err != nil { - t.Fatalf("unable to query for alice's channel list: %v", err) - } - aliceSatoshisSent := aliceListChannels.Channels[0].TotalSatoshisSent - if aliceSatoshisSent != int64(amt) { - t.Fatalf("Alice's satoshis sent is incorrect got %v, expected %v", - aliceSatoshisSent, amt) - } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - bobListChannels, err := net.Bob.ListChannels(ctxt, listReq) - if err != nil { - t.Fatalf("unable to query for bob's channel list: %v", err) - } - bobSatoshisReceived := bobListChannels.Channels[0].TotalSatoshisReceived - if bobSatoshisReceived != int64(amt) { - t.Fatalf("Bob's satoshis received is incorrect got %v, expected %v", - bobSatoshisReceived, amt) - } - } - // Now that the channel is open, create an invoice for Bob which // expects a payment of 1000 satoshis from Alice paid via a particular // preimage. const paymentAmt = 1000 - preimage := bytes.Repeat([]byte("A"), 32) + preimage := bytes.Repeat([]byte("B"), 32) invoice := &lnrpc.Invoice{ Memo: "testing", RPreimage: preimage, Value: paymentAmt, } - ctxt, _ = context.WithTimeout(ctxt, defaultTimeout) - invoiceResp, err := net.Bob.AddInvoice(ctxb, invoice) + addInvoiceCtxt, _ := context.WithTimeout(ctxb, defaultTimeout) + invoiceResp, err := net.Bob.AddInvoice(addInvoiceCtxt, invoice) if err != nil { t.Fatalf("unable to add invoice: %v", err) } @@ -3786,13 +4300,11 @@ func testSingleHopInvoice(net *lntest.NetworkHarness, t *harnessTest) { // Wait for Alice to recognize and advertise the new channel generated // above. ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { + if err = net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint); err != nil { t.Fatalf("alice didn't advertise channel before "+ "timeout: %v", err) } - err = net.Bob.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { + if err = net.Bob.WaitForNetworkChannelOpen(ctxt, chanPoint); err != nil { t.Fatalf("bob didn't advertise channel before "+ "timeout: %v", err) } @@ -3807,181 +4319,42 @@ func testSingleHopInvoice(net *lntest.NetworkHarness, t *harnessTest) { if err != nil { t.Fatalf("unable to send payment: %v", err) } - - // Ensure we obtain the proper preimage in the response. if resp.PaymentError != "" { t.Fatalf("error when attempting recv: %v", resp.PaymentError) - } else if !bytes.Equal(preimage, resp.PaymentPreimage) { - t.Fatalf("preimage mismatch: expected %v, got %v", preimage, - resp.GetPaymentPreimage()) } - // Bob's invoice should now be found and marked as settled. - payHash := &lnrpc.PaymentHash{ - RHash: invoiceResp.RHash, - } + // Grab Alice's list of payments, she should show the existence of + // exactly one payment. + req := &lnrpc.ListPaymentsRequest{} ctxt, _ = context.WithTimeout(ctxt, defaultTimeout) - dbInvoice, err := net.Bob.LookupInvoice(ctxt, payHash) + paymentsResp, err := net.Alice.ListPayments(ctxt, req) if err != nil { - t.Fatalf("unable to lookup invoice: %v", err) + t.Fatalf("error when obtaining Alice payments: %v", err) } - if !dbInvoice.Settled { - t.Fatalf("bob's invoice should be marked as settled: %v", - spew.Sdump(dbInvoice)) + if len(paymentsResp.Payments) != 1 { + t.Fatalf("incorrect number of payments, got %v, want %v", + len(paymentsResp.Payments), 1) } + p := paymentsResp.Payments[0] + path := p.Htlcs[len(p.Htlcs)-1].Route.Hops - // With the payment completed all balance related stats should be - // properly updated. - time.Sleep(time.Millisecond * 200) - assertAmountSent(paymentAmt) - - // Create another invoice for Bob, this time leaving off the preimage - // to one will be randomly generated. We'll test the proper - // encoding/decoding of the zpay32 payment requests. - invoice = &lnrpc.Invoice{ - Memo: "test3", - Value: paymentAmt, + // Ensure that the stored path shows a direct payment to Bob with no + // other nodes in-between. + if len(path) != 1 || path[0].PubKey != net.Bob.PubKeyStr { + t.Fatalf("incorrect path") } - ctxt, _ = context.WithTimeout(ctxt, defaultTimeout) - invoiceResp, err = net.Bob.AddInvoice(ctxt, invoice) - if err != nil { - t.Fatalf("unable to add invoice: %v", err) + + // The payment amount should also match our previous payment directly. + if p.Value != paymentAmt { + t.Fatalf("incorrect amount, got %v, want %v", + p.Value, paymentAmt) } - // Next send another payment, but this time using a zpay32 encoded - // invoice rather than manually specifying the payment details. - sendReq = &lnrpc.SendRequest{ - PaymentRequest: invoiceResp.PaymentRequest, - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - resp, err = net.Alice.SendPaymentSync(ctxt, sendReq) - if err != nil { - t.Fatalf("unable to send payment: %v", err) - } - if resp.PaymentError != "" { - t.Fatalf("error when attempting recv: %v", resp.PaymentError) - } - - // The second payment should also have succeeded, with the balances - // being update accordingly. - time.Sleep(time.Millisecond * 200) - assertAmountSent(paymentAmt * 2) - - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, chanPoint, false) -} - -func testListPayments(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - // First start by deleting all payments that Alice knows of. This will - // allow us to execute the test with a clean state for Alice. - delPaymentsReq := &lnrpc.DeleteAllPaymentsRequest{} - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - if _, err := net.Alice.DeleteAllPayments(ctxt, delPaymentsReq); err != nil { - t.Fatalf("unable to delete payments: %v", err) - } - - // Check that there are no payments before test. - reqInit := &lnrpc.ListPaymentsRequest{} - ctxt, _ = context.WithTimeout(ctxt, defaultTimeout) - paymentsRespInit, err := net.Alice.ListPayments(ctxt, reqInit) - if err != nil { - t.Fatalf("error when obtaining Alice payments: %v", err) - } - if len(paymentsRespInit.Payments) != 0 { - t.Fatalf("incorrect number of payments, got %v, want %v", - len(paymentsRespInit.Payments), 0) - } - - // Open a channel with 100k satoshis between Alice and Bob with Alice - // being the sole funder of the channel. - chanAmt := btcutil.Amount(100000) - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPoint := openChannelAndAssert( - ctxt, t, net, net.Alice, net.Bob, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - - // Now that the channel is open, create an invoice for Bob which - // expects a payment of 1000 satoshis from Alice paid via a particular - // preimage. - const paymentAmt = 1000 - preimage := bytes.Repeat([]byte("B"), 32) - invoice := &lnrpc.Invoice{ - Memo: "testing", - RPreimage: preimage, - Value: paymentAmt, - } - addInvoiceCtxt, _ := context.WithTimeout(ctxb, defaultTimeout) - invoiceResp, err := net.Bob.AddInvoice(addInvoiceCtxt, invoice) - if err != nil { - t.Fatalf("unable to add invoice: %v", err) - } - - // Wait for Alice to recognize and advertise the new channel generated - // above. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err = net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint); err != nil { - t.Fatalf("alice didn't advertise channel before "+ - "timeout: %v", err) - } - if err = net.Bob.WaitForNetworkChannelOpen(ctxt, chanPoint); err != nil { - t.Fatalf("bob didn't advertise channel before "+ - "timeout: %v", err) - } - - // With the invoice for Bob added, send a payment towards Alice paying - // to the above generated invoice. - sendReq := &lnrpc.SendRequest{ - PaymentRequest: invoiceResp.PaymentRequest, - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - resp, err := net.Alice.SendPaymentSync(ctxt, sendReq) - if err != nil { - t.Fatalf("unable to send payment: %v", err) - } - if resp.PaymentError != "" { - t.Fatalf("error when attempting recv: %v", resp.PaymentError) - } - - // Grab Alice's list of payments, she should show the existence of - // exactly one payment. - req := &lnrpc.ListPaymentsRequest{} - ctxt, _ = context.WithTimeout(ctxt, defaultTimeout) - paymentsResp, err := net.Alice.ListPayments(ctxt, req) - if err != nil { - t.Fatalf("error when obtaining Alice payments: %v", err) - } - if len(paymentsResp.Payments) != 1 { - t.Fatalf("incorrect number of payments, got %v, want %v", - len(paymentsResp.Payments), 1) - } - p := paymentsResp.Payments[0] - - // Ensure that the stored path shows a direct payment to Bob with no - // other nodes in-between. - expectedPath := []string{ - net.Bob.PubKeyStr, - } - if !reflect.DeepEqual(p.Path, expectedPath) { - t.Fatalf("incorrect path, got %v, want %v", - p.Path, expectedPath) - } - - // The payment amount should also match our previous payment directly. - if p.Value != paymentAmt { - t.Fatalf("incorrect amount, got %v, want %v", - p.Value, paymentAmt) - } - - // The payment hash (or r-hash) should have been stored correctly. - correctRHash := hex.EncodeToString(invoiceResp.RHash) - if !reflect.DeepEqual(p.PaymentHash, correctRHash) { - t.Fatalf("incorrect RHash, got %v, want %v", - p.PaymentHash, correctRHash) + // The payment hash (or r-hash) should have been stored correctly. + correctRHash := hex.EncodeToString(invoiceResp.RHash) + if !reflect.DeepEqual(p.PaymentHash, correctRHash) { + t.Fatalf("incorrect RHash, got %v, want %v", + p.PaymentHash, correctRHash) } // As we made a single-hop direct payment, there should have been no fee @@ -4005,7 +4378,7 @@ func testListPayments(net *lntest.NetworkHarness, t *harnessTest) { t.Fatalf("Can't delete payments at the end: %v", err) } - // Check that there are no payments before test. + // Check that there are no payments after test. listReq := &lnrpc.ListPaymentsRequest{} ctxt, _ = context.WithTimeout(ctxt, defaultTimeout) paymentsResp, err = net.Alice.ListPayments(ctxt, listReq) @@ -4165,7 +4538,7 @@ func testMultiHopPayments(net *lntest.NetworkHarness, t *harnessTest) { // // First, we'll create Dave and establish a channel to Alice. Dave will // be running an older node that requires the legacy onion payload. - daveArgs := []string{"--legacyprotocol.onion"} + daveArgs := []string{"--protocol.legacyonion"} dave, err := net.NewNode("Dave", daveArgs) if err != nil { t.Fatalf("unable to create new nodes: %v", err) @@ -4406,43 +4779,135 @@ func testMultiHopPayments(net *lntest.NetworkHarness, t *harnessTest) { closeChannelAndAssert(ctxt, t, net, carol, chanPointCarol, false) } -// testSingleHopSendToRoute tests that payments are properly processed -// through a provided route with a single hop. We'll create the -// following network topology: -// Alice --100k--> Bob -// We'll query the daemon for routes from Alice to Bob and then -// send payments through the route. +type singleHopSendToRouteCase struct { + name string + + // streaming tests streaming SendToRoute if true, otherwise tests + // synchronous SenToRoute. + streaming bool + + // routerrpc submits the request to the routerrpc subserver if true, + // otherwise submits to the main rpc server. + routerrpc bool + + // mpp sets the MPP fields on the request if true, otherwise submits a + // regular payment. + mpp bool +} + +var singleHopSendToRouteCases = []singleHopSendToRouteCase{ + { + name: "regular main sync", + }, + { + name: "regular main stream", + streaming: true, + }, + { + name: "regular routerrpc sync", + routerrpc: true, + }, + { + name: "mpp main sync", + mpp: true, + }, + { + name: "mpp main stream", + streaming: true, + mpp: true, + }, + { + name: "mpp routerrpc sync", + routerrpc: true, + mpp: true, + }, +} + +// testSingleHopSendToRoute tests that payments are properly processed through a +// provided route with a single hop. We'll create the following network +// topology: +// Carol --100k--> Dave +// We'll query the daemon for routes from Carol to Dave and then send payments +// by feeding the route back into the various SendToRoute RPC methods. Here we +// test all three SendToRoute endpoints, forcing each to perform both a regular +// payment and an MPP payment. func testSingleHopSendToRoute(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() + for _, test := range singleHopSendToRouteCases { + test := test + + t.t.Run(test.name, func(t1 *testing.T) { + ht := newHarnessTest(t1, t.lndHarness) + ht.RunTestCase(&testCase{ + name: test.name, + test: func(_ *lntest.NetworkHarness, tt *harnessTest) { + testSingleHopSendToRouteCase(net, tt, test) + }, + }) + }) + } +} + +func testSingleHopSendToRouteCase(net *lntest.NetworkHarness, t *harnessTest, + test singleHopSendToRouteCase) { const chanAmt = btcutil.Amount(100000) + const paymentAmtSat = 1000 + const numPayments = 5 + const amountPaid = int64(numPayments * paymentAmtSat) + + ctxb := context.Background() var networkChans []*lnrpc.ChannelPoint - // Open a channel with 100k satoshis between Alice and Bob with Alice + // Create Carol and Dave, then establish a channel between them. Carol + // is the sole funder of the channel with 100k satoshis. The network + // topology should look like: + // Carol -> 100k -> Dave + carol, err := net.NewNode("Carol", nil) + if err != nil { + t.Fatalf("unable to create new nodes: %v", err) + } + defer shutdownAndAssert(net, t, carol) + + dave, err := net.NewNode("Dave", nil) + if err != nil { + t.Fatalf("unable to create new nodes: %v", err) + } + defer shutdownAndAssert(net, t, dave) + + ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) + if err := net.ConnectNodes(ctxt, carol, dave); err != nil { + t.Fatalf("unable to connect carol to dave: %v", err) + } + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + err = net.SendCoins(ctxt, btcutil.SatoshiPerBitcoin, carol) + if err != nil { + t.Fatalf("unable to send coins to carol: %v", err) + } + + // Open a channel with 100k satoshis between Carol and Dave with Carol // being the sole funder of the channel. - ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) - chanPointAlice := openChannelAndAssert( - ctxt, t, net, net.Alice, net.Bob, + ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) + chanPointCarol := openChannelAndAssert( + ctxt, t, net, carol, dave, lntest.OpenChannelParams{ Amt: chanAmt, }, ) - networkChans = append(networkChans, chanPointAlice) + networkChans = append(networkChans, chanPointCarol) - aliceChanTXID, err := lnd.GetChanPointFundingTxid(chanPointAlice) + carolChanTXID, err := lnd.GetChanPointFundingTxid(chanPointCarol) if err != nil { t.Fatalf("unable to get txid: %v", err) } - aliceFundPoint := wire.OutPoint{ - Hash: *aliceChanTXID, - Index: chanPointAlice.OutputIndex, + carolFundPoint := wire.OutPoint{ + Hash: *carolChanTXID, + Index: chanPointCarol.OutputIndex, } // Wait for all nodes to have seen all channels. - nodes := []*lntest.HarnessNode{net.Alice, net.Bob} - nodeNames := []string{"Alice", "Bob"} + nodes := []*lntest.HarnessNode{carol, dave} for _, chanPoint := range networkChans { - for i, node := range nodes { + for _, node := range nodes { txid, err := lnd.GetChanPointFundingTxid(chanPoint) if err != nil { t.Fatalf("unable to get txid: %v", err) @@ -4456,111 +4921,299 @@ func testSingleHopSendToRoute(net *lntest.NetworkHarness, t *harnessTest) { err = node.WaitForNetworkChannelOpen(ctxt, chanPoint) if err != nil { t.Fatalf("%s(%d): timeout waiting for "+ - "channel(%s) open: %v", nodeNames[i], + "channel(%s) open: %v", node.Name(), node.NodeID, point, err) } } } - // Query for routes to pay from Alice to Bob. + // Create invoices for Dave, which expect a payment from Carol. + payReqs, rHashes, _, err := createPayReqs( + dave, paymentAmtSat, numPayments, + ) + if err != nil { + t.Fatalf("unable to create pay reqs: %v", err) + } + + // Reconstruct payment addresses. + var payAddrs [][]byte + for _, payReq := range payReqs { + ctx, _ := context.WithTimeout( + context.Background(), defaultTimeout, + ) + resp, err := dave.DecodePayReq( + ctx, + &lnrpc.PayReqString{PayReq: payReq}, + ) + if err != nil { + t.Fatalf("decode pay req: %v", err) + } + payAddrs = append(payAddrs, resp.PaymentAddr) + } + + // Query for routes to pay from Carol to Dave. // We set FinalCltvDelta to 40 since by default QueryRoutes returns // the last hop with a final cltv delta of 9 where as the default in // htlcswitch is 40. - const paymentAmt = 1000 routesReq := &lnrpc.QueryRoutesRequest{ - PubKey: net.Bob.PubKeyStr, - Amt: paymentAmt, + PubKey: dave.PubKeyStr, + Amt: paymentAmtSat, FinalCltvDelta: lnd.DefaultBitcoinTimeLockDelta, } ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - routes, err := net.Alice.QueryRoutes(ctxt, routesReq) + routes, err := carol.QueryRoutes(ctxt, routesReq) if err != nil { - t.Fatalf("unable to get route: %v", err) + t.Fatalf("unable to get route from %s: %v", + carol.Name(), err) } - // Create 5 invoices for Bob, which expect a payment from Alice for 1k - // satoshis with a different preimage each time. - const numPayments = 5 - _, rHashes, _, err := createPayReqs( - net.Bob, paymentAmt, numPayments, - ) - if err != nil { - t.Fatalf("unable to create pay reqs: %v", err) - } + // There should only be one route to try, so take the first item. + r := routes.Routes[0] - // We'll wait for all parties to recognize the new channels within the - // network. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.Bob.WaitForNetworkChannelOpen(ctxt, chanPointAlice) - if err != nil { - t.Fatalf("alice didn't advertise her channel in time: %v", err) + // Construct a closure that will set MPP fields on the route, which + // allows us to test MPP payments. + setMPPFields := func(i int) { + hop := r.Hops[len(r.Hops)-1] + hop.TlvPayload = true + hop.MppRecord = &lnrpc.MPPRecord{ + PaymentAddr: payAddrs[i], + TotalAmtMsat: paymentAmtSat * 1000, + } } - time.Sleep(time.Millisecond * 50) - - // Using Alice as the source, pay to the 5 invoices from Carol created - // above. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - alicePayStream, err := net.Alice.SendToRoute(ctxt) - if err != nil { - t.Fatalf("unable to create payment stream for alice: %v", err) - } + // Construct closures for each of the payment types covered: + // - main rpc server sync + // - main rpc server streaming + // - routerrpc server sync + sendToRouteSync := func() { + for i, rHash := range rHashes { + // Populate the MPP fields for the final hop if we are + // testing MPP payments. + if test.mpp { + setMPPFields(i) + } - for _, rHash := range rHashes { - sendReq := &lnrpc.SendToRouteRequest{ - PaymentHash: rHash, - Route: routes.Routes[0], + sendReq := &lnrpc.SendToRouteRequest{ + PaymentHash: rHash, + Route: r, + } + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + resp, err := carol.SendToRouteSync( + ctxt, sendReq, + ) + if err != nil { + t.Fatalf("unable to send to route for "+ + "%s: %v", carol.Name(), err) + } + if resp.PaymentError != "" { + t.Fatalf("received payment error from %s: %v", + carol.Name(), resp.PaymentError) + } } - err := alicePayStream.Send(sendReq) - + } + sendToRouteStream := func() { + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + alicePayStream, err := carol.SendToRoute(ctxt) if err != nil { - t.Fatalf("unable to send payment: %v", err) + t.Fatalf("unable to create payment stream for "+ + "carol: %v", err) } - } - for range rHashes { - resp, err := alicePayStream.Recv() - if err != nil { - t.Fatalf("unable to send payment: %v", err) + for i, rHash := range rHashes { + // Populate the MPP fields for the final hop if we are + // testing MPP payments. + if test.mpp { + setMPPFields(i) + } + + sendReq := &lnrpc.SendToRouteRequest{ + PaymentHash: rHash, + Route: routes.Routes[0], + } + err := alicePayStream.Send(sendReq) + + if err != nil { + t.Fatalf("unable to send payment: %v", err) + } + + resp, err := alicePayStream.Recv() + if err != nil { + t.Fatalf("unable to send payment: %v", err) + } + if resp.PaymentError != "" { + t.Fatalf("received payment error: %v", + resp.PaymentError) + } } - if resp.PaymentError != "" { - t.Fatalf("received payment error: %v", resp.PaymentError) + } + sendToRouteRouterRPC := func() { + for i, rHash := range rHashes { + // Populate the MPP fields for the final hop if we are + // testing MPP payments. + if test.mpp { + setMPPFields(i) + } + + sendReq := &routerrpc.SendToRouteRequest{ + PaymentHash: rHash, + Route: r, + } + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + resp, err := carol.RouterClient.SendToRoute( + ctxt, sendReq, + ) + if err != nil { + t.Fatalf("unable to send to route for "+ + "%s: %v", carol.Name(), err) + } + if resp.Failure != nil { + t.Fatalf("received payment error from %s: %v", + carol.Name(), resp.Failure) + } } } - req := &lnrpc.ListPaymentsRequest{} + // Using Carol as the node as the source, send the payments + // synchronously via the the routerrpc's SendToRoute, or via the main RPC + // server's SendToRoute streaming or sync calls. + switch { + case !test.routerrpc && test.streaming: + sendToRouteStream() + case !test.routerrpc && !test.streaming: + sendToRouteSync() + case test.routerrpc && !test.streaming: + sendToRouteRouterRPC() + default: + t.Fatalf("routerrpc does not support streaming send_to_route") + } + + // Verify that the payment's from Carol's PoV have the correct payment + // hash and amount. ctxt, _ = context.WithTimeout(ctxt, defaultTimeout) - paymentsResp, err := net.Alice.ListPayments(ctxt, req) + paymentsResp, err := carol.ListPayments( + ctxt, &lnrpc.ListPaymentsRequest{}, + ) if err != nil { - t.Fatalf("error when obtaining Alice payments: %v", err) + t.Fatalf("error when obtaining %s payments: %v", + carol.Name(), err) } - if len(paymentsResp.Payments) != 5 { + if len(paymentsResp.Payments) != numPayments { t.Fatalf("incorrect number of payments, got %v, want %v", - len(paymentsResp.Payments), 5) + len(paymentsResp.Payments), numPayments) } - // Verify that the ListPayments displays the payment without an invoice - // since the payment was completed with SendToRoute. - for _, p := range paymentsResp.Payments { + for i, p := range paymentsResp.Payments { + // Assert that the payment hashes for each payment match up. + rHashHex := hex.EncodeToString(rHashes[i]) + if p.PaymentHash != rHashHex { + t.Fatalf("incorrect payment hash for payment %d, "+ + "want: %s got: %s", + i, rHashHex, p.PaymentHash) + } + + // Assert that each payment has no invoice since the payment was + // completed using SendToRoute. if p.PaymentRequest != "" { - t.Fatalf("incorrect payreq, want: \"\", got: %v", - p.PaymentRequest) + t.Fatalf("incorrect payment request for payment: %d, "+ + "want: \"\", got: %s", + i, p.PaymentRequest) + } + + // Assert the payment ammount is correct. + if p.ValueSat != paymentAmtSat { + t.Fatalf("incorrect payment amt for payment %d, "+ + "want: %d, got: %d", + i, paymentAmtSat, p.ValueSat) + } + + // Assert exactly one htlc was made. + if len(p.Htlcs) != 1 { + t.Fatalf("expected 1 htlc for payment %d, got: %d", + i, len(p.Htlcs)) + } + + // Assert the htlc's route is populated. + htlc := p.Htlcs[0] + if htlc.Route == nil { + t.Fatalf("expected route for payment %d", i) + } + + // Assert the hop has exactly one hop. + if len(htlc.Route.Hops) != 1 { + t.Fatalf("expected 1 hop for payment %d, got: %d", + i, len(htlc.Route.Hops)) + } + + // If this is an MPP test, assert the MPP record's fields are + // properly populated. Otherwise the hop should not have an MPP + // record. + hop := htlc.Route.Hops[0] + if test.mpp { + if hop.MppRecord == nil { + t.Fatalf("expected mpp record for mpp payment") + } + + if hop.MppRecord.TotalAmtMsat != paymentAmtSat*1000 { + t.Fatalf("incorrect mpp total msat for payment %d "+ + "want: %d, got: %d", + i, paymentAmtSat*1000, + hop.MppRecord.TotalAmtMsat) + } + + expAddr := payAddrs[i] + if !bytes.Equal(hop.MppRecord.PaymentAddr, expAddr) { + t.Fatalf("incorrect mpp payment addr for payment %d "+ + "want: %x, got: %x", + i, expAddr, hop.MppRecord.PaymentAddr) + } + } else if hop.MppRecord != nil { + t.Fatalf("unexpected mpp record for non-mpp payment") + } + } + + // Verify that the invoices's from Dave's PoV have the correct payment + // hash and amount. + ctxt, _ = context.WithTimeout(ctxt, defaultTimeout) + invoicesResp, err := dave.ListInvoices( + ctxt, &lnrpc.ListInvoiceRequest{}, + ) + if err != nil { + t.Fatalf("error when obtaining %s payments: %v", + dave.Name(), err) + } + if len(invoicesResp.Invoices) != numPayments { + t.Fatalf("incorrect number of invoices, got %v, want %v", + len(invoicesResp.Invoices), numPayments) + } + + for i, inv := range invoicesResp.Invoices { + // Assert that the payment hashes match up. + if !bytes.Equal(inv.RHash, rHashes[i]) { + t.Fatalf("incorrect payment hash for invoice %d, "+ + "want: %x got: %x", + i, rHashes[i], inv.RHash) + } + + // Assert that the amount paid to the invoice is correct. + if inv.AmtPaidSat != paymentAmtSat { + t.Fatalf("incorrect payment amt for invoice %d, "+ + "want: %d, got %d", + i, paymentAmtSat, inv.AmtPaidSat) } } // At this point all the channels within our proto network should be - // shifted by 5k satoshis in the direction of Bob, the sink within the + // shifted by 5k satoshis in the direction of Dave, the sink within the // payment flow generated above. The order of asserts corresponds to // increasing of time is needed to embed the HTLC in commitment - // transaction, in channel Alice->Bob, order is Bob and then Alice. - const amountPaid = int64(5000) - assertAmountPaid(t, "Alice(local) => Bob(remote)", net.Bob, - aliceFundPoint, int64(0), amountPaid) - assertAmountPaid(t, "Alice(local) => Bob(remote)", net.Alice, - aliceFundPoint, amountPaid, int64(0)) + // transaction, in channel Carol->Dave, order is Dave and then Carol. + assertAmountPaid(t, "Carol(local) => Dave(remote)", dave, + carolFundPoint, int64(0), amountPaid) + assertAmountPaid(t, "Carol(local) => Dave(remote)", carol, + carolFundPoint, amountPaid, int64(0)) ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, chanPointAlice, false) + closeChannelAndAssert(ctxt, t, net, carol, chanPointCarol, false) } // testMultiHopSendToRoute tests that payments are properly processed @@ -4887,15 +5540,12 @@ func testUnannouncedChannels(net *lntest.NetworkHarness, t *harnessTest) { // Open a channel between Alice and Bob, ensuring the // channel has been opened properly. ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) - chanOpenUpdate, err := net.OpenChannel( - ctxt, net.Alice, net.Bob, + chanOpenUpdate := openChannelStream( + ctxt, t, net, net.Alice, net.Bob, lntest.OpenChannelParams{ Amt: amount, }, ) - if err != nil { - t.Fatalf("unable to open channel: %v", err) - } // Mine 2 blocks, and check that the channel is opened but not yet // announced to the network. @@ -5125,8 +5775,8 @@ func testPrivateChannels(net *lntest.NetworkHarness, t *harnessTest) { t.Fatalf("unable to connect dave to alice: %v", err) } ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanOpenUpdate, err := net.OpenChannel( - ctxt, carol, net.Alice, + chanOpenUpdate := openChannelStream( + ctxt, t, net, carol, net.Alice, lntest.OpenChannelParams{ Amt: chanAmt, Private: true, @@ -6062,7 +6712,8 @@ func subscribeChannelNotifications(ctxb context.Context, t *harnessTest, // verifyCloseUpdate is used to verify that a closed channel update is of the // expected type. func verifyCloseUpdate(chanUpdate *lnrpc.ChannelEventUpdate, - force bool, forceType lnrpc.ChannelCloseSummary_ClosureType) error { + closeType lnrpc.ChannelCloseSummary_ClosureType, + closeInitiator lnrpc.Initiator) error { // We should receive one inactive and one closed notification // for each channel. @@ -6081,23 +6732,19 @@ func verifyCloseUpdate(chanUpdate *lnrpc.ChannelEventUpdate, chanUpdate.Type) } - switch force { - case true: - if update.ClosedChannel.CloseType != forceType { - return fmt.Errorf("channel closure type mismatch: "+ - "expected %v, got %v", - forceType, - update.ClosedChannel.CloseType) - } - case false: - if update.ClosedChannel.CloseType != - lnrpc.ChannelCloseSummary_COOPERATIVE_CLOSE { - return fmt.Errorf("channel closure type "+ - "mismatch: expected %v, got %v", - lnrpc.ChannelCloseSummary_COOPERATIVE_CLOSE, - update.ClosedChannel.CloseType) - } + if update.ClosedChannel.CloseType != closeType { + return fmt.Errorf("channel closure type "+ + "mismatch: expected %v, got %v", + closeType, + update.ClosedChannel.CloseType) } + + if update.ClosedChannel.CloseInitiator != closeInitiator { + return fmt.Errorf("expected close intiator: %v, got: %v", + closeInitiator, + update.ClosedChannel.CloseInitiator) + } + default: return fmt.Errorf("channel update channel of wrong type, "+ "expected closed channel, got %T", @@ -6140,25 +6787,34 @@ func testBasicChannelCreationAndUpdates(net *lntest.NetworkHarness, t *harnessTe // Since each of the channels just became open, Bob and Alice should // each receive an open and an active notification for each channel. var numChannelUpds int - const totalNtfns = 2 * numChannels + const totalNtfns = 3 * numChannels verifyOpenUpdatesReceived := func(sub channelSubscription) error { numChannelUpds = 0 for numChannelUpds < totalNtfns { select { case update := <-sub.updateChan: switch update.Type { - case lnrpc.ChannelEventUpdate_ACTIVE_CHANNEL: - if numChannelUpds%2 != 1 { - return fmt.Errorf("expected open" + - "channel ntfn, got active " + + case lnrpc.ChannelEventUpdate_PENDING_OPEN_CHANNEL: + if numChannelUpds%3 != 0 { + return fmt.Errorf("expected " + + "open or active" + + "channel ntfn, got pending open " + "channel ntfn instead") } case lnrpc.ChannelEventUpdate_OPEN_CHANNEL: - if numChannelUpds%2 != 0 { - return fmt.Errorf("expected active" + + if numChannelUpds%3 != 1 { + return fmt.Errorf("expected " + + "pending open or active" + "channel ntfn, got open" + "channel ntfn instead") } + case lnrpc.ChannelEventUpdate_ACTIVE_CHANNEL: + if numChannelUpds%3 != 2 { + return fmt.Errorf("expected " + + "pending open or open" + + "channel ntfn, got active " + + "channel ntfn instead") + } default: return fmt.Errorf("update type mismatch: "+ "expected open or active channel "+ @@ -6200,18 +6856,29 @@ func testBasicChannelCreationAndUpdates(net *lntest.NetworkHarness, t *harnessTe // verifyCloseUpdatesReceived is used to verify that Alice and Bob // receive the correct channel updates in order. verifyCloseUpdatesReceived := func(sub channelSubscription, - forceType lnrpc.ChannelCloseSummary_ClosureType) error { + forceType lnrpc.ChannelCloseSummary_ClosureType, + closeInitiator lnrpc.Initiator) error { // Ensure one inactive and one closed notification is received for each // closed channel. numChannelUpds := 0 for numChannelUpds < 2*numChannels { - // Every other channel should be force closed. + expectedCloseType := lnrpc.ChannelCloseSummary_COOPERATIVE_CLOSE + + // Every other channel should be force closed. If this + // channel was force closed, set the expected close type + // the the type passed in. force := (numChannelUpds/2)%2 == 0 + if force { + expectedCloseType = forceType + } select { case chanUpdate := <-sub.updateChan: - err := verifyCloseUpdate(chanUpdate, force, forceType) + err := verifyCloseUpdate( + chanUpdate, expectedCloseType, + closeInitiator, + ) if err != nil { return err } @@ -6220,9 +6887,10 @@ func testBasicChannelCreationAndUpdates(net *lntest.NetworkHarness, t *harnessTe case err := <-sub.errChan: return err case <-time.After(time.Second * 10): - return fmt.Errorf("timeout waiting for channel "+ - "notifications, only received %d/%d "+ - "chanupds", numChannelUpds, 2*numChannels) + return fmt.Errorf("timeout waiting "+ + "for channel notifications, only "+ + "received %d/%d chanupds", + numChannelUpds, 2*numChannels) } } @@ -6231,15 +6899,21 @@ func testBasicChannelCreationAndUpdates(net *lntest.NetworkHarness, t *harnessTe // Verify Bob receives all closed channel notifications. He should // receive a remote force close notification for force closed channels. + // All channels (cooperatively and force closed) should have a remote + // close initiator because Alice closed the channels. if err := verifyCloseUpdatesReceived(bobChanSub, - lnrpc.ChannelCloseSummary_REMOTE_FORCE_CLOSE); err != nil { + lnrpc.ChannelCloseSummary_REMOTE_FORCE_CLOSE, + lnrpc.Initiator_INITIATOR_REMOTE); err != nil { t.Fatalf("errored verifying close updates: %v", err) } // Verify Alice receives all closed channel notifications. She should // receive a remote force close notification for force closed channels. + // All channels (cooperatively and force closed) should have a local + // close initiator because Alice closed the channels. if err := verifyCloseUpdatesReceived(aliceChanSub, - lnrpc.ChannelCloseSummary_LOCAL_FORCE_CLOSE); err != nil { + lnrpc.ChannelCloseSummary_LOCAL_FORCE_CLOSE, + lnrpc.Initiator_INITIATOR_LOCAL); err != nil { t.Fatalf("errored verifying close updates: %v", err) } } @@ -6281,15 +6955,12 @@ func testMaxPendingChannels(net *lntest.NetworkHarness, t *harnessTest) { openStreams := make([]lnrpc.Lightning_OpenChannelClient, maxPendingChannels) for i := 0; i < maxPendingChannels; i++ { ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - stream, err := net.OpenChannel( - ctxt, net.Alice, carol, + stream := openChannelStream( + ctxt, t, net, net.Alice, carol, lntest.OpenChannelParams{ Amt: amount, }, ) - if err != nil { - t.Fatalf("unable to open channel: %v", err) - } openStreams[i] = stream } @@ -6410,6 +7081,27 @@ func waitForNTxsInMempool(miner *rpcclient.Client, n int, } } +// getNTxsFromMempool polls until finding the desired number of transactions in +// the provided miner's mempool and returns the full transactions to the caller. +func getNTxsFromMempool(miner *rpcclient.Client, n int, + timeout time.Duration) ([]*wire.MsgTx, error) { + + txids, err := waitForNTxsInMempool(miner, n, timeout) + if err != nil { + return nil, err + } + + var txes []*wire.MsgTx + for _, txid := range txids { + tx, err := miner.GetRawTransaction(txid) + if err != nil { + return nil, err + } + txes = append(txes, tx.MsgTx()) + } + return txes, nil +} + // testFailingChannel tests that we will fail the channel by force closing ii // in the case where a counterparty tries to settle an HTLC with the wrong // preimage. @@ -6883,10 +7575,12 @@ func testRevokedCloseRetribution(net *lntest.NetworkHarness, t *harnessTest) { // Carol will be the breached party. We set --nolisten to ensure Bob // won't be able to connect to her and trigger the channel data - // protection logic automatically. + // protection logic automatically. We also can't have Carol + // automatically re-connect too early, otherwise DLP would be initiated + // instead of the breach we want to provoke. carol, err := net.NewNode( "Carol", - []string{"--hodl.exit-settle", "--nolisten"}, + []string{"--hodl.exit-settle", "--nolisten", "--minbackoff=1h"}, ) if err != nil { t.Fatalf("unable to create new carol node: %v", err) @@ -7146,10 +7840,12 @@ func testRevokedCloseRetributionZeroValueRemoteOutput(net *lntest.NetworkHarness // Dave will be the breached party. We set --nolisten to ensure Carol // won't be able to connect to him and trigger the channel data - // protection logic automatically. + // protection logic automatically. We also can't have Dave automatically + // re-connect too early, otherwise DLP would be initiated instead of the + // breach we want to provoke. dave, err := net.NewNode( "Dave", - []string{"--hodl.exit-settle", "--nolisten"}, + []string{"--hodl.exit-settle", "--nolisten", "--minbackoff=1h"}, ) if err != nil { t.Fatalf("unable to create new node: %v", err) @@ -7569,6 +8265,13 @@ func testRevokedCloseRetributionRemoteHodl(net *lntest.NetworkHarness, checkCarolBalance(pushAmt - 3*paymentAmt) checkCarolNumUpdatesAtLeast(carolStateNumPreCopy + 1) + // Suspend Dave, such that Carol won't reconnect at startup, triggering + // the data loss protection. + restartDave, err := net.SuspendNode(dave) + if err != nil { + t.Fatalf("unable to suspend Dave: %v", err) + } + // Now we shutdown Carol, copying over the her temporary database state // which has the *prior* channel state over her current most up to date // state. With this, we essentially force Carol to travel back in time @@ -7620,17 +8323,13 @@ func testRevokedCloseRetributionRemoteHodl(net *lntest.NetworkHarness, t.Fatalf("expected closeTx(%v) in mempool, instead found %v", closeTxId, txid) } - time.Sleep(200 * time.Millisecond) // Generate a single block to mine the breach transaction. block := mineBlocks(t, net, 1, 1)[0] - // Wait so Dave receives a confirmation of Carol's breach transaction. - time.Sleep(200 * time.Millisecond) - - // We restart Dave to ensure that he is persisting his retribution - // state and continues exacting justice after her node restarts. - if err := net.RestartNode(dave, nil); err != nil { + // We resurrect Dave to ensure he will be exacting justice after his + // node restarts. + if err := restartDave(); err != nil { t.Fatalf("unable to stop Dave's node: %v", err) } @@ -8207,13 +8906,27 @@ func assertNumPendingChannels(t *harnessTest, node *lntest.HarnessNode, // on chain as he has no funds in the channel. func assertDLPExecuted(net *lntest.NetworkHarness, t *harnessTest, carol *lntest.HarnessNode, carolStartingBalance int64, - dave *lntest.HarnessNode, daveStartingBalance int64) { + dave *lntest.HarnessNode, daveStartingBalance int64, + anchors bool) { + + // We disabled auto-reconnect for some tests to avoid timing issues. + // To make sure the nodes are initiating DLP now, we have to manually + // re-connect them. + ctxb := context.Background() + err := net.ConnectNodes(ctxb, carol, dave) + if err != nil && !strings.Contains(err.Error(), "already connected") { + t.Fatalf("unable to connect Carol to Dave to initiate DLP: %v", + err) + } // Upon reconnection, the nodes should detect that Dave is out of sync. // Carol should force close the channel using her latest commitment. - ctxb := context.Background() - forceClose, err := waitForTxInMempool( - net.Miner.Node, minerMempoolTimeout, + expectedTxes := 1 + if anchors { + expectedTxes = 2 + } + _, err = waitForNTxsInMempool( + net.Miner.Node, expectedTxes, minerMempoolTimeout, ) if err != nil { t.Fatalf("unable to find Carol's force close tx in mempool: %v", @@ -8236,12 +8949,13 @@ func assertDLPExecuted(net *lntest.NetworkHarness, t *harnessTest, } // Generate a single block, which should confirm the closing tx. - block := mineBlocks(t, net, 1, 1)[0] - assertTxInBlock(t, block, forceClose) + block := mineBlocks(t, net, 1, expectedTxes)[0] // Dave should sweep his funds immediately, as they are not timelocked. - daveSweep, err := waitForTxInMempool( - net.Miner.Node, minerMempoolTimeout, + // We also expect Dave to sweep his anchor, if present. + + _, err = waitForNTxsInMempool( + net.Miner.Node, expectedTxes, minerMempoolTimeout, ) if err != nil { t.Fatalf("unable to find Dave's sweep tx in mempool: %v", err) @@ -8256,8 +8970,7 @@ func assertDLPExecuted(net *lntest.NetworkHarness, t *harnessTest, assertNumPendingChannels(t, carol, 0, 1) // Mine the sweep tx. - block = mineBlocks(t, net, 1, 1)[0] - assertTxInBlock(t, block, daveSweep) + block = mineBlocks(t, net, 1, expectedTxes)[0] // Now Dave should consider the channel fully closed. assertNumPendingChannels(t, dave, 0, 0) @@ -8323,8 +9036,12 @@ func testDataLossProtection(net *lntest.NetworkHarness, t *harnessTest) { // Carol will be the up-to-date party. We set --nolisten to ensure Dave // won't be able to connect to her and trigger the channel data - // protection logic automatically. - carol, err := net.NewNode("Carol", []string{"--nolisten"}) + // protection logic automatically. We also can't have Carol + // automatically re-connect too early, otherwise DLP would be initiated + // at the wrong moment. + carol, err := net.NewNode( + "Carol", []string{"--nolisten", "--minbackoff=1h"}, + ) if err != nil { t.Fatalf("unable to create new carol node: %v", err) } @@ -8499,6 +9216,7 @@ func testDataLossProtection(net *lntest.NetworkHarness, t *harnessTest) { if err != nil { t.Fatalf("unable to suspend node: %v", err) } + return restart, chanPoint, balResp.ConfirmedBalance, nil } @@ -8527,6 +9245,7 @@ func testDataLossProtection(net *lntest.NetworkHarness, t *harnessTest) { // on chain, and both of them properly carry out the DLP protocol. assertDLPExecuted( net, t, carol, carolStartingBalance, dave, daveStartingBalance, + false, ) // As a second part of this test, we will test the scenario where a @@ -8684,7 +9403,12 @@ func testHtlcErrorPropagation(net *lntest.NetworkHarness, t *harnessTest) { t.Fatalf("channel not seen by alice before timeout: %v", err) } - commitFee := calcStaticFee(0) + cType, err := channelCommitType(net.Alice, chanPointAlice) + if err != nil { + t.Fatalf("unable to get channel type: %v", err) + } + + commitFee := cType.calcStaticFee(0) assertBaseBalance := func() { balReq := &lnrpc.ChannelBalanceRequest{} ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) @@ -8810,12 +9534,11 @@ out: t.Fatalf("payment should have been rejected due to invalid " + "payment hash") } - expectedErrorCode := lnwire.CodeIncorrectOrUnknownPaymentDetails.String() - if !strings.Contains(resp.PaymentError, expectedErrorCode) { - // TODO(roasbeef): make into proper gRPC error code - t.Fatalf("payment should have failed due to unknown payment hash, "+ - "instead failed due to: %v", resp.PaymentError) - } + + assertLastHTLCError( + t, net.Alice, + lnrpc.Failure_INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS, + ) // The balances of all parties should be the same as initially since // the HTLC was canceled. @@ -8842,18 +9565,11 @@ out: t.Fatalf("payment should have been rejected due to wrong " + "HTLC amount") } - expectedErrorCode = lnwire.CodeIncorrectOrUnknownPaymentDetails.String() - if !strings.Contains(resp.PaymentError, expectedErrorCode) { - t.Fatalf("payment should have failed due to wrong amount, "+ - "instead failed due to: %v", resp.PaymentError) - } - // We'll also ensure that the encoded error includes the invlaid HTLC - // amount. - if !strings.Contains(resp.PaymentError, htlcAmt.String()) { - t.Fatalf("error didn't include expected payment amt of %v: "+ - "%v", htlcAmt, resp.PaymentError) - } + assertLastHTLCError( + t, net.Alice, + lnrpc.Failure_INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS, + ) // The balances of all parties should be the same as initially since // the HTLC was canceled. @@ -8932,12 +9648,12 @@ out: if resp.PaymentError == "" { t.Fatalf("payment should fail due to insufficient "+ "capacity: %v", err) - } else if !strings.Contains(resp.PaymentError, - lnwire.CodeTemporaryChannelFailure.String()) { - t.Fatalf("payment should fail due to insufficient capacity, "+ - "instead: %v", resp.PaymentError) } + assertLastHTLCError( + t, net.Alice, lnrpc.Failure_TEMPORARY_CHANNEL_FAILURE, + ) + // Generate new invoice to not pay same invoice twice. ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) carolInvoice, err = carol.AddInvoice(ctxt, invoiceReq) @@ -8974,11 +9690,8 @@ out: if resp.PaymentError == "" { t.Fatalf("payment should have failed") } - expectedErrorCode = lnwire.CodeUnknownNextPeer.String() - if !strings.Contains(resp.PaymentError, expectedErrorCode) { - t.Fatalf("payment should fail due to unknown hop, instead: %v", - resp.PaymentError) - } + + assertLastHTLCError(t, net.Alice, lnrpc.Failure_UNKNOWN_NEXT_PEER) // Finally, immediately close the channel. This function will also // block until the channel is closed and will additionally assert the @@ -9145,9 +9858,8 @@ func testRejectHTLC(net *lntest.NetworkHarness, t *harnessTest) { "should have been rejected, carol will not accept forwarded htlcs", ) } - if !strings.Contains(err.Error(), lnwire.CodeChannelDisabled.String()) { - t.Fatalf("error returned should have been Channel Disabled") - } + + assertLastHTLCError(t, net.Alice, lnrpc.Failure_CHANNEL_DISABLED) // Close all channels. ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) @@ -9630,9 +10342,7 @@ func testNodeSignVerify(net *lntest.NetworkHarness, t *harnessTest) { closeChannelAndAssert(ctxt, t, net, net.Alice, aliceBobCh, false) } -// testAsyncPayments tests the performance of the async payments, and also -// checks that balances of both sides can't be become negative under stress -// payment strikes. +// testAsyncPayments tests the performance of the async payments. func testAsyncPayments(net *lntest.NetworkHarness, t *harnessTest) { ctxb := context.Background() @@ -9658,18 +10368,16 @@ func testAsyncPayments(net *lntest.NetworkHarness, t *harnessTest) { t.Fatalf("unable to get alice channel info: %v", err) } - // Calculate the number of invoices. We will deplete the channel - // all the way down to the channel reserve. - chanReserve := channelCapacity / 100 - availableBalance := btcutil.Amount(info.LocalBalance) - chanReserve - numInvoices := int(availableBalance / paymentAmt) + // We'll create a number of invoices equal the max number of HTLCs that + // can be carried in one direction. The number on the commitment will + // likely be lower, but we can't guarantee that any more HTLCs will + // succeed due to the limited path diversity and inability of the router + // to retry via another path. + numInvoices := int(input.MaxHTLCNumber / 2) bobAmt := int64(numInvoices * paymentAmt) aliceAmt := info.LocalBalance - bobAmt - // Send one more payment in order to cause insufficient capacity error. - numInvoices++ - // With the channel open, we'll create invoices for Bob that Alice // will pay to in order to advance the state of the channel. bobPayReqs, _, _, err := createPayReqs( @@ -9710,28 +10418,13 @@ func testAsyncPayments(net *lntest.NetworkHarness, t *harnessTest) { } } - // We should receive one insufficient capacity error, because we sent - // one more payment than we can actually handle with the current - // channel capacity. - errorReceived := false + // Wait until all the payments have settled. for i := 0; i < numInvoices; i++ { - if resp, err := alicePayStream.Recv(); err != nil { + if _, err := alicePayStream.Recv(); err != nil { t.Fatalf("payment stream have been closed: %v", err) - } else if resp.PaymentError != "" { - if errorReceived { - t.Fatalf("redundant payment error: %v", - resp.PaymentError) - } - - errorReceived = true - continue } } - if !errorReceived { - t.Fatalf("insufficient capacity error haven't been received") - } - // All payments have been sent, mark the finish time. timeTaken := time.Since(now) @@ -9821,8 +10514,12 @@ func testBidirectionalAsyncPayments(net *lntest.NetworkHarness, t *harnessTest) t.Fatalf("unable to get alice channel info: %v", err) } - // Calculate the number of invoices. - numInvoices := int(info.LocalBalance / paymentAmt) + // We'll create a number of invoices equal the max number of HTLCs that + // can be carried in one direction. The number on the commitment will + // likely be lower, but we can't guarantee that any more HTLCs will + // succeed due to the limited path diversity and inability of the router + // to retry via another path. + numInvoices := int(input.MaxHTLCNumber / 2) // Nodes should exchange the same amount of money and because of this // at the end balances should remain the same. @@ -9977,954 +10674,174 @@ func testBidirectionalAsyncPayments(net *lntest.NetworkHarness, t *harnessTest) t.Fatalf("alice's pending htlcs is incorrect, got %v, "+ "expected %v", len(aliceInfo.PendingHtlcs), 0) } - - // Next query for Bob's and Alice's channel states, in order to confirm - // that all payment have been successful transmitted. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - bobInfo, err := getChanInfo(ctxt, net.Bob) - if err != nil { - t.Fatalf("unable to get bob's channel info: %v", err) - } - - if bobInfo.LocalBalance != bobAmt { - t.Fatalf("bob's local balance is incorrect, got %v, expected"+ - " %v", bobInfo.LocalBalance, bobAmt) - } - if bobInfo.RemoteBalance != aliceAmt { - t.Fatalf("bob's remote balance is incorrect, got %v, "+ - "expected %v", bobInfo.RemoteBalance, aliceAmt) - } - if len(bobInfo.PendingHtlcs) != 0 { - t.Fatalf("bob's pending htlcs is incorrect, got %v, "+ - "expected %v", len(bobInfo.PendingHtlcs), 0) - } - - // Finally, immediately close the channel. This function will also - // block until the channel is closed and will additionally assert the - // relevant channel closing post conditions. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, chanPoint, false) -} - -// assertActiveHtlcs makes sure all the passed nodes have the _exact_ HTLCs -// matching payHashes on _all_ their channels. -func assertActiveHtlcs(nodes []*lntest.HarnessNode, payHashes ...[]byte) error { - ctxb := context.Background() - - req := &lnrpc.ListChannelsRequest{} - for _, node := range nodes { - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - nodeChans, err := node.ListChannels(ctxt, req) - if err != nil { - return fmt.Errorf("unable to get node chans: %v", err) - } - - for _, channel := range nodeChans.Channels { - // Record all payment hashes active for this channel. - htlcHashes := make(map[string]struct{}) - for _, htlc := range channel.PendingHtlcs { - _, ok := htlcHashes[string(htlc.HashLock)] - if ok { - return fmt.Errorf("duplicate HashLock") - } - htlcHashes[string(htlc.HashLock)] = struct{}{} - } - - // Channel should have exactly the payHashes active. - if len(payHashes) != len(htlcHashes) { - return fmt.Errorf("node %x had %v htlcs active, "+ - "expected %v", node.PubKey[:], - len(htlcHashes), len(payHashes)) - } - - // Make sure all the payHashes are active. - for _, payHash := range payHashes { - if _, ok := htlcHashes[string(payHash)]; ok { - continue - } - return fmt.Errorf("node %x didn't have the "+ - "payHash %v active", node.PubKey[:], - payHash) - } - } - } - - return nil -} - -func assertNumActiveHtlcsChanPoint(node *lntest.HarnessNode, - chanPoint wire.OutPoint, numHtlcs int) error { - ctxb := context.Background() - - req := &lnrpc.ListChannelsRequest{} - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - nodeChans, err := node.ListChannels(ctxt, req) - if err != nil { - return err - } - - for _, channel := range nodeChans.Channels { - if channel.ChannelPoint != chanPoint.String() { - continue - } - - if len(channel.PendingHtlcs) != numHtlcs { - return fmt.Errorf("expected %v active HTLCs, got %v", - numHtlcs, len(channel.PendingHtlcs)) - } - return nil - } - - return fmt.Errorf("channel point %v not found", chanPoint) -} - -func assertNumActiveHtlcs(nodes []*lntest.HarnessNode, numHtlcs int) error { - ctxb := context.Background() - - req := &lnrpc.ListChannelsRequest{} - for _, node := range nodes { - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - nodeChans, err := node.ListChannels(ctxt, req) - if err != nil { - return err - } - - for _, channel := range nodeChans.Channels { - if len(channel.PendingHtlcs) != numHtlcs { - return fmt.Errorf("expected %v HTLCs, got %v", - numHtlcs, len(channel.PendingHtlcs)) - } - } - } - - return nil -} - -func assertSpendingTxInMempool(t *harnessTest, miner *rpcclient.Client, - timeout time.Duration, chanPoint wire.OutPoint) { - - breakTimeout := time.After(timeout) - ticker := time.NewTicker(50 * time.Millisecond) - defer ticker.Stop() - - for { - select { - case <-breakTimeout: - t.Fatalf("didn't find tx in mempool") - case <-ticker.C: - mempool, err := miner.GetRawMempool() - if err != nil { - t.Fatalf("unable to get mempool: %v", err) - } - - if len(mempool) == 0 { - continue - } - - for _, txid := range mempool { - tx, err := miner.GetRawTransaction(txid) - if err != nil { - t.Fatalf("unable to fetch tx: %v", err) - } - - for _, txIn := range tx.MsgTx().TxIn { - if txIn.PreviousOutPoint == chanPoint { - return - } - } - } - } - } -} - -func createThreeHopNetwork(t *harnessTest, net *lntest.NetworkHarness, - carolHodl bool) (*lnrpc.ChannelPoint, *lnrpc.ChannelPoint, - *lntest.HarnessNode) { - - ctxb := context.Background() - - // We'll start the test by creating a channel between Alice and Bob, - // which will act as the first leg for out multi-hop HTLC. - const chanAmt = 1000000 - ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) - aliceChanPoint := openChannelAndAssert( - ctxt, t, net, net.Alice, net.Bob, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err := net.Alice.WaitForNetworkChannelOpen(ctxt, aliceChanPoint) - if err != nil { - t.Fatalf("alice didn't report channel: %v", err) - } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.Bob.WaitForNetworkChannelOpen(ctxt, aliceChanPoint) - if err != nil { - t.Fatalf("bob didn't report channel: %v", err) - } - - // Next, we'll create a new node "carol" and have Bob connect to her. If - // the carolHodl flag is set, we'll make carol always hold onto the - // HTLC, this way it'll force Bob to go to chain to resolve the HTLC. - carolFlags := []string{} - if carolHodl { - carolFlags = append(carolFlags, "--hodl.exit-settle") - } - carol, err := net.NewNode("Carol", carolFlags) - if err != nil { - t.Fatalf("unable to create new node: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, net.Bob, carol); err != nil { - t.Fatalf("unable to connect bob to carol: %v", err) - } - - // We'll then create a channel from Bob to Carol. After this channel is - // open, our topology looks like: A -> B -> C. - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - bobChanPoint := openChannelAndAssert( - ctxt, t, net, net.Bob, carol, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.Bob.WaitForNetworkChannelOpen(ctxt, bobChanPoint) - if err != nil { - t.Fatalf("alice didn't report channel: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = carol.WaitForNetworkChannelOpen(ctxt, bobChanPoint) - if err != nil { - t.Fatalf("bob didn't report channel: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.Alice.WaitForNetworkChannelOpen(ctxt, bobChanPoint) - if err != nil { - t.Fatalf("bob didn't report channel: %v", err) - } - - return aliceChanPoint, bobChanPoint, carol -} - -// testMultiHopHtlcLocalTimeout tests that in a multi-hop HTLC scenario, if the -// outgoing HTLC is about to time out, then we'll go to chain in order to claim -// it. Any dust HTLC's should be immediately canceled backwards. Once the -// timeout has been reached, then we should sweep it on-chain, and cancel the -// HTLC backwards. -func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - // First, we'll create a three hop network: Alice -> Bob -> Carol, with - // Carol refusing to actually settle or directly cancel any HTLC's - // self. - aliceChanPoint, bobChanPoint, carol := - createThreeHopNetwork(t, net, true) - - // Clean up carol's node when the test finishes. - defer shutdownAndAssert(net, t, carol) - - time.Sleep(time.Second * 1) - - // Now that our channels are set up, we'll send two HTLC's from Alice - // to Carol. The first HTLC will be universally considered "dust", - // while the second will be a proper fully valued HTLC. - const ( - dustHtlcAmt = btcutil.Amount(100) - htlcAmt = btcutil.Amount(30000) - finalCltvDelta = 40 - ) - - ctx, cancel := context.WithCancel(ctxb) - defer cancel() - - alicePayStream, err := net.Alice.SendPayment(ctx) - if err != nil { - t.Fatalf("unable to create payment stream for alice: %v", err) - } - - // We'll create two random payment hashes unknown to carol, then send - // each of them by manually specifying the HTLC details. - carolPubKey := carol.PubKey[:] - dustPayHash := makeFakePayHash(t) - payHash := makeFakePayHash(t) - err = alicePayStream.Send(&lnrpc.SendRequest{ - Dest: carolPubKey, - Amt: int64(dustHtlcAmt), - PaymentHash: dustPayHash, - FinalCltvDelta: finalCltvDelta, - }) - if err != nil { - t.Fatalf("unable to send alice htlc: %v", err) - } - err = alicePayStream.Send(&lnrpc.SendRequest{ - Dest: carolPubKey, - Amt: int64(htlcAmt), - PaymentHash: payHash, - FinalCltvDelta: finalCltvDelta, - }) - if err != nil { - t.Fatalf("unable to send alice htlc: %v", err) - } - - // Verify that all nodes in the path now have two HTLC's with the - // proper parameters. - var predErr error - nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol} - err = wait.Predicate(func() bool { - predErr = assertActiveHtlcs(nodes, dustPayHash, payHash) - if predErr != nil { - return false - } - - return true - }, time.Second*15) - if err != nil { - t.Fatalf("htlc mismatch: %v", predErr) - } - - // We'll now mine enough blocks to trigger Bob's broadcast of his - // commitment transaction due to the fact that the HTLC is about to - // timeout. With the default outgoing broadcast delta of zero, this will - // be the same height as the htlc expiry height. - numBlocks := padCLTV( - uint32(finalCltvDelta - lnd.DefaultOutgoingBroadcastDelta), - ) - if _, err := net.Miner.Node.Generate(numBlocks); err != nil { - t.Fatalf("unable to generate blocks: %v", err) - } - - // Bob's force close transaction should now be found in the mempool. - bobFundingTxid, err := lnd.GetChanPointFundingTxid(bobChanPoint) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - closeTxid, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find closing txid: %v", err) - } - assertSpendingTxInMempool( - t, net.Miner.Node, minerMempoolTimeout, wire.OutPoint{ - Hash: *bobFundingTxid, - Index: bobChanPoint.OutputIndex, - }, - ) - - // Mine a block to confirm the closing transaction. - mineBlocks(t, net, 1, 1) - - // At this point, Bob should have canceled backwards the dust HTLC - // that we sent earlier. This means Alice should now only have a single - // HTLC on her channel. - nodes = []*lntest.HarnessNode{net.Alice} - err = wait.Predicate(func() bool { - predErr = assertActiveHtlcs(nodes, payHash) - if predErr != nil { - return false - } - - return true - }, time.Second*15) - if err != nil { - t.Fatalf("htlc mismatch: %v", predErr) - } - - // With the closing transaction confirmed, we should expect Bob's HTLC - // timeout transaction to be broadcast due to the expiry being reached. - htlcTimeout, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find bob's htlc timeout tx: %v", err) - } - - // We'll mine the remaining blocks in order to generate the sweep - // transaction of Bob's commitment output. - mineBlocks(t, net, defaultCSV, 1) - assertSpendingTxInMempool( - t, net.Miner.Node, minerMempoolTimeout, wire.OutPoint{ - Hash: *closeTxid, - Index: 1, - }, - ) - - // Bob's pending channel report should show that he has a commitment - // output awaiting sweeping, and also that there's an outgoing HTLC - // output pending. - pendingChansRequest := &lnrpc.PendingChannelsRequest{} - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Bob.PendingChannels(ctxt, pendingChansRequest) - if err != nil { - t.Fatalf("unable to query for pending channels: %v", err) - } - - if len(pendingChanResp.PendingForceClosingChannels) == 0 { - t.Fatalf("bob should have pending for close chan but doesn't") - } - forceCloseChan := pendingChanResp.PendingForceClosingChannels[0] - if forceCloseChan.LimboBalance == 0 { - t.Fatalf("bob should have nonzero limbo balance instead "+ - "has: %v", forceCloseChan.LimboBalance) - } - if len(forceCloseChan.PendingHtlcs) == 0 { - t.Fatalf("bob should have pending htlc but doesn't") - } - - // Now we'll mine an additional block, which should confirm Bob's commit - // sweep. This block should also prompt Bob to broadcast their second - // layer sweep due to the CSV on the HTLC timeout output. - mineBlocks(t, net, 1, 1) - assertSpendingTxInMempool( - t, net.Miner.Node, minerMempoolTimeout, wire.OutPoint{ - Hash: *htlcTimeout, - Index: 0, - }, - ) - - // The block should have confirmed Bob's HTLC timeout transaction. - // Therefore, at this point, there should be no active HTLC's on the - // commitment transaction from Alice -> Bob. - nodes = []*lntest.HarnessNode{net.Alice} - err = wait.Predicate(func() bool { - predErr = assertNumActiveHtlcs(nodes, 0) - if predErr != nil { - return false - } - return true - }, time.Second*15) - if err != nil { - t.Fatalf("alice's channel still has active htlc's: %v", predErr) - } - - // At this point, Bob should show that the pending HTLC has advanced to - // the second stage and is to be swept. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err = net.Bob.PendingChannels(ctxt, pendingChansRequest) - if err != nil { - t.Fatalf("unable to query for pending channels: %v", err) - } - forceCloseChan = pendingChanResp.PendingForceClosingChannels[0] - if forceCloseChan.PendingHtlcs[0].Stage != 2 { - t.Fatalf("bob's htlc should have advanced to the second stage: %v", err) - } - - // Next, we'll mine a final block that should confirm the second-layer - // sweeping transaction. - if _, err := net.Miner.Node.Generate(1); err != nil { - t.Fatalf("unable to generate blocks: %v", err) - } - - // Once this transaction has been confirmed, Bob should detect that he - // no longer has any pending channels. - err = wait.Predicate(func() bool { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err = net.Bob.PendingChannels(ctxt, pendingChansRequest) - if err != nil { - predErr = fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - if len(pendingChanResp.PendingForceClosingChannels) != 0 { - predErr = fmt.Errorf("bob still has pending "+ - "channels but shouldn't: %v", - spew.Sdump(pendingChanResp)) - return false - } - - return true - - }, time.Second*15) - if err != nil { - t.Fatalf(predErr.Error()) - } - - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, aliceChanPoint, false) -} - -// testMultiHopLocalForceCloseOnChainHtlcTimeout tests that in a multi-hop HTLC -// scenario, if the node that extended the HTLC to the final node closes their -// commitment on-chain early, then it eventually recognizes this HTLC as one -// that's timed out. At this point, the node should timeout the HTLC, then -// cancel it backwards as normal. -func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, - t *harnessTest) { - ctxb := context.Background() - - // First, we'll create a three hop network: Alice -> Bob -> Carol, with - // Carol refusing to actually settle or directly cancel any HTLC's - // self. - aliceChanPoint, bobChanPoint, carol := - createThreeHopNetwork(t, net, true) - - // Clean up carol's node when the test finishes. - defer shutdownAndAssert(net, t, carol) - - // With our channels set up, we'll then send a single HTLC from Alice - // to Carol. As Carol is in hodl mode, she won't settle this HTLC which - // opens up the base for out tests. - const ( - finalCltvDelta = 40 - htlcAmt = btcutil.Amount(30000) - ) - ctx, cancel := context.WithCancel(ctxb) - defer cancel() - - alicePayStream, err := net.Alice.SendPayment(ctx) - if err != nil { - t.Fatalf("unable to create payment stream for alice: %v", err) - } - - // We'll now send a single HTLC across our multi-hop network. - carolPubKey := carol.PubKey[:] - payHash := makeFakePayHash(t) - err = alicePayStream.Send(&lnrpc.SendRequest{ - Dest: carolPubKey, - Amt: int64(htlcAmt), - PaymentHash: payHash, - FinalCltvDelta: finalCltvDelta, - }) - if err != nil { - t.Fatalf("unable to send alice htlc: %v", err) - } - - // Once the HTLC has cleared, all channels in our mini network should - // have the it locked in. - var predErr error - nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol} - err = wait.Predicate(func() bool { - predErr = assertActiveHtlcs(nodes, payHash) - if predErr != nil { - return false - } - - return true - }, time.Second*15) - if err != nil { - t.Fatalf("htlc mismatch: %v", err) - } - - // Now that all parties have the HTLC locked in, we'll immediately - // force close the Bob -> Carol channel. This should trigger contract - // resolution mode for both of them. - ctxt, _ := context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Bob, bobChanPoint, true) - - // At this point, Bob should have a pending force close channel as he - // just went to chain. - pendingChansRequest := &lnrpc.PendingChannelsRequest{} - err = wait.Predicate(func() bool { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Bob.PendingChannels(ctxt, - pendingChansRequest) - if err != nil { - predErr = fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - if len(pendingChanResp.PendingForceClosingChannels) == 0 { - predErr = fmt.Errorf("bob should have pending for " + - "close chan but doesn't") - return false - } - - forceCloseChan := pendingChanResp.PendingForceClosingChannels[0] - if forceCloseChan.LimboBalance == 0 { - predErr = fmt.Errorf("bob should have nonzero limbo "+ - "balance instead has: %v", - forceCloseChan.LimboBalance) - return false - } - - return true - }, time.Second*15) - if err != nil { - t.Fatalf(predErr.Error()) - } - - // We'll mine defaultCSV blocks in order to generate the sweep transaction - // of Bob's funding output. - if _, err := net.Miner.Node.Generate(defaultCSV); err != nil { - t.Fatalf("unable to generate blocks: %v", err) - } - - _, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find bob's funding output sweep tx: %v", err) - } - - // We'll now mine enough blocks for the HTLC to expire. After this, Bob - // should hand off the now expired HTLC output to the utxo nursery. - numBlocks := padCLTV(uint32(finalCltvDelta - defaultCSV - 1)) - if _, err := net.Miner.Node.Generate(numBlocks); err != nil { - t.Fatalf("unable to generate blocks: %v", err) - } - - // Bob's pending channel report should show that he has a single HTLC - // that's now in stage one. - err = wait.Predicate(func() bool { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Bob.PendingChannels( - ctxt, pendingChansRequest, - ) - if err != nil { - predErr = fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - - if len(pendingChanResp.PendingForceClosingChannels) == 0 { - predErr = fmt.Errorf("bob should have pending force " + - "close chan but doesn't") - return false - } - - forceCloseChan := pendingChanResp.PendingForceClosingChannels[0] - if len(forceCloseChan.PendingHtlcs) != 1 { - predErr = fmt.Errorf("bob should have pending htlc " + - "but doesn't") - return false - } - if forceCloseChan.PendingHtlcs[0].Stage != 1 { - predErr = fmt.Errorf("bob's htlc should have "+ - "advanced to the first stage: %v", err) - return false - } - - return true - }, time.Second*15) - if err != nil { - t.Fatalf("bob didn't hand off time-locked HTLC: %v", predErr) - } - - // We should also now find a transaction in the mempool, as Bob should - // have broadcast his second layer timeout transaction. - timeoutTx, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find bob's htlc timeout tx: %v", err) - } - - // Next, we'll mine an additional block. This should serve to confirm - // the second layer timeout transaction. - block := mineBlocks(t, net, 1, 1)[0] - assertTxInBlock(t, block, timeoutTx) - - // With the second layer timeout transaction confirmed, Bob should have - // canceled backwards the HTLC that carol sent. - nodes = []*lntest.HarnessNode{net.Alice} - err = wait.Predicate(func() bool { - predErr = assertNumActiveHtlcs(nodes, 0) - if predErr != nil { - return false - } - return true - }, time.Second*15) - if err != nil { - t.Fatalf("alice's channel still has active htlc's: %v", predErr) - } - - // Additionally, Bob should now show that HTLC as being advanced to the - // second stage. - err = wait.Predicate(func() bool { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Bob.PendingChannels( - ctxt, pendingChansRequest, - ) - if err != nil { - predErr = fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - - if len(pendingChanResp.PendingForceClosingChannels) == 0 { - predErr = fmt.Errorf("bob should have pending for " + - "close chan but doesn't") - return false - } - - forceCloseChan := pendingChanResp.PendingForceClosingChannels[0] - if len(forceCloseChan.PendingHtlcs) != 1 { - predErr = fmt.Errorf("bob should have pending htlc " + - "but doesn't") - return false - } - if forceCloseChan.PendingHtlcs[0].Stage != 2 { - predErr = fmt.Errorf("bob's htlc should have "+ - "advanced to the second stage: %v", err) - return false - } - - return true - }, time.Second*15) - if err != nil { - t.Fatalf("bob didn't hand off time-locked HTLC: %v", predErr) - } - - // We'll now mine 4 additional blocks. This should be enough for Bob's - // CSV timelock to expire and the sweeping transaction of the HTLC to be - // broadcast. - if _, err := net.Miner.Node.Generate(defaultCSV); err != nil { - t.Fatalf("unable to mine blocks: %v", err) - } - - sweepTx, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find bob's htlc sweep tx: %v", err) - } - - // We'll then mine a final block which should confirm this second layer - // sweep transaction. - block = mineBlocks(t, net, 1, 1)[0] - assertTxInBlock(t, block, sweepTx) - - // At this point, Bob should no longer show any channels as pending - // close. - err = wait.Predicate(func() bool { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Bob.PendingChannels( - ctxt, pendingChansRequest, - ) - if err != nil { - predErr = fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - if len(pendingChanResp.PendingForceClosingChannels) != 0 { - predErr = fmt.Errorf("bob still has pending channels "+ - "but shouldn't: %v", spew.Sdump(pendingChanResp)) - return false - } - - return true - }, time.Second*15) + + // Next query for Bob's and Alice's channel states, in order to confirm + // that all payment have been successful transmitted. + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + bobInfo, err := getChanInfo(ctxt, net.Bob) if err != nil { - t.Fatalf(predErr.Error()) + t.Fatalf("unable to get bob's channel info: %v", err) + } + + if bobInfo.LocalBalance != bobAmt { + t.Fatalf("bob's local balance is incorrect, got %v, expected"+ + " %v", bobInfo.LocalBalance, bobAmt) + } + if bobInfo.RemoteBalance != aliceAmt { + t.Fatalf("bob's remote balance is incorrect, got %v, "+ + "expected %v", bobInfo.RemoteBalance, aliceAmt) + } + if len(bobInfo.PendingHtlcs) != 0 { + t.Fatalf("bob's pending htlcs is incorrect, got %v, "+ + "expected %v", len(bobInfo.PendingHtlcs), 0) } + // Finally, immediately close the channel. This function will also + // block until the channel is closed and will additionally assert the + // relevant channel closing post conditions. ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, aliceChanPoint, false) + closeChannelAndAssert(ctxt, t, net, net.Alice, chanPoint, false) } -// testMultiHopRemoteForceCloseOnChainHtlcTimeout tests that if we extend a -// multi-hop HTLC, and the final destination of the HTLC force closes the -// channel, then we properly timeout the HTLC on *their* commitment transaction -// once the timeout has expired. Once we sweep the transaction, we should also -// cancel back the initial HTLC. -func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, - t *harnessTest) { +// assertActiveHtlcs makes sure all the passed nodes have the _exact_ HTLCs +// matching payHashes on _all_ their channels. +func assertActiveHtlcs(nodes []*lntest.HarnessNode, payHashes ...[]byte) error { ctxb := context.Background() - // First, we'll create a three hop network: Alice -> Bob -> Carol, with - // Carol refusing to actually settle or directly cancel any HTLC's - // self. - aliceChanPoint, bobChanPoint, carol := - createThreeHopNetwork(t, net, true) - - // Clean up carol's node when the test finishes. - defer shutdownAndAssert(net, t, carol) + req := &lnrpc.ListChannelsRequest{} + for _, node := range nodes { + ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) + nodeChans, err := node.ListChannels(ctxt, req) + if err != nil { + return fmt.Errorf("unable to get node chans: %v", err) + } - // With our channels set up, we'll then send a single HTLC from Alice - // to Carol. As Carol is in hodl mode, she won't settle this HTLC which - // opens up the base for out tests. - const ( - finalCltvDelta = 40 - htlcAmt = btcutil.Amount(30000) - ) + for _, channel := range nodeChans.Channels { + // Record all payment hashes active for this channel. + htlcHashes := make(map[string]struct{}) + for _, htlc := range channel.PendingHtlcs { + _, ok := htlcHashes[string(htlc.HashLock)] + if ok { + return fmt.Errorf("duplicate HashLock") + } + htlcHashes[string(htlc.HashLock)] = struct{}{} + } - ctx, cancel := context.WithCancel(ctxb) - defer cancel() + // Channel should have exactly the payHashes active. + if len(payHashes) != len(htlcHashes) { + return fmt.Errorf("node %x had %v htlcs active, "+ + "expected %v", node.PubKey[:], + len(htlcHashes), len(payHashes)) + } - alicePayStream, err := net.Alice.SendPayment(ctx) - if err != nil { - t.Fatalf("unable to create payment stream for alice: %v", err) + // Make sure all the payHashes are active. + for _, payHash := range payHashes { + if _, ok := htlcHashes[string(payHash)]; ok { + continue + } + return fmt.Errorf("node %x didn't have the "+ + "payHash %v active", node.PubKey[:], + payHash) + } + } } - // We'll now send a single HTLC across our multi-hop network. - carolPubKey := carol.PubKey[:] - payHash := makeFakePayHash(t) - err = alicePayStream.Send(&lnrpc.SendRequest{ - Dest: carolPubKey, - Amt: int64(htlcAmt), - PaymentHash: payHash, - FinalCltvDelta: finalCltvDelta, - }) - if err != nil { - t.Fatalf("unable to send alice htlc: %v", err) - } + return nil +} - // Once the HTLC has cleared, all the nodes in our mini network should - // show that the HTLC has been locked in. - var predErr error - nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol} - err = wait.Predicate(func() bool { - predErr = assertActiveHtlcs(nodes, payHash) - if predErr != nil { - return false - } +func assertNumActiveHtlcsChanPoint(node *lntest.HarnessNode, + chanPoint wire.OutPoint, numHtlcs int) error { + ctxb := context.Background() - return true - }, time.Second*15) + req := &lnrpc.ListChannelsRequest{} + ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) + nodeChans, err := node.ListChannels(ctxt, req) if err != nil { - t.Fatalf("htlc mismatch: %v", predErr) + return err } - // At this point, we'll now instruct Carol to force close the - // transaction. This will let us exercise that Bob is able to sweep the - // expired HTLC on Carol's version of the commitment transaction. - ctxt, _ := context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, carol, bobChanPoint, true) - - // At this point, Bob should have a pending force close channel as - // Carol has gone directly to chain. - pendingChansRequest := &lnrpc.PendingChannelsRequest{} - err = wait.Predicate(func() bool { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Bob.PendingChannels( - ctxt, pendingChansRequest, - ) - if err != nil { - predErr = fmt.Errorf("unable to query for "+ - "pending channels: %v", err) - return false - } - if len(pendingChanResp.PendingForceClosingChannels) == 0 { - predErr = fmt.Errorf("bob should have pending " + - "force close channels but doesn't") - return false + for _, channel := range nodeChans.Channels { + if channel.ChannelPoint != chanPoint.String() { + continue } - return true - }, time.Second*15) - if err != nil { - t.Fatalf(predErr.Error()) + if len(channel.PendingHtlcs) != numHtlcs { + return fmt.Errorf("expected %v active HTLCs, got %v", + numHtlcs, len(channel.PendingHtlcs)) + } + return nil } - // Bob can sweep his output immediately. - _, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find bob's funding output sweep tx: %v", - err) - } + return fmt.Errorf("channel point %v not found", chanPoint) +} - // Next, we'll mine enough blocks for the HTLC to expire. At this - // point, Bob should hand off the output to his internal utxo nursery, - // which will broadcast a sweep transaction. - numBlocks := padCLTV(finalCltvDelta - 1) - if _, err := net.Miner.Node.Generate(numBlocks); err != nil { - t.Fatalf("unable to generate blocks: %v", err) - } +func assertNumActiveHtlcs(nodes []*lntest.HarnessNode, numHtlcs int) error { + ctxb := context.Background() - // If we check Bob's pending channel report, it should show that he has - // a single HTLC that's now in the second stage, as skip the initial - // first stage since this is a direct HTLC. - err = wait.Predicate(func() bool { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Bob.PendingChannels( - ctxt, pendingChansRequest, - ) + req := &lnrpc.ListChannelsRequest{} + for _, node := range nodes { + ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) + nodeChans, err := node.ListChannels(ctxt, req) if err != nil { - predErr = fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - return false + return err } - if len(pendingChanResp.PendingForceClosingChannels) == 0 { - predErr = fmt.Errorf("bob should have pending for " + - "close chan but doesn't") - return false + for _, channel := range nodeChans.Channels { + if len(channel.PendingHtlcs) != numHtlcs { + return fmt.Errorf("expected %v HTLCs, got %v", + numHtlcs, len(channel.PendingHtlcs)) + } } + } - forceCloseChan := pendingChanResp.PendingForceClosingChannels[0] - if len(forceCloseChan.PendingHtlcs) != 1 { - predErr = fmt.Errorf("bob should have pending htlc " + - "but doesn't") - return false - } - if forceCloseChan.PendingHtlcs[0].Stage != 2 { - predErr = fmt.Errorf("bob's htlc should have "+ - "advanced to the second stage: %v", err) - return false - } + return nil +} - return true - }, time.Second*15) - if err != nil { - t.Fatalf("bob didn't hand off time-locked HTLC: %v", predErr) - } +func assertSpendingTxInMempool(t *harnessTest, miner *rpcclient.Client, + timeout time.Duration, chanPoint wire.OutPoint) chainhash.Hash { - // Bob's sweeping transaction should now be found in the mempool at - // this point. - sweepTx, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - // If Bob's transaction isn't yet in the mempool, then due to - // internal message passing and the low period between blocks - // being mined, it may have been detected as a late - // registration. As a result, we'll mine another block and - // repeat the check. If it doesn't go through this time, then - // we'll fail. - // TODO(halseth): can we use waitForChannelPendingForceClose to - // avoid this hack? - if _, err := net.Miner.Node.Generate(1); err != nil { - t.Fatalf("unable to generate block: %v", err) - } - sweepTx, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find bob's sweeping transaction: "+ - "%v", err) - } - } + tx := getSpendingTxInMempool(t, miner, timeout, chanPoint) + return tx.TxHash() +} - // If we mine an additional block, then this should confirm Bob's - // transaction which sweeps the direct HTLC output. - block := mineBlocks(t, net, 1, 1)[0] - assertTxInBlock(t, block, sweepTx) +// getSpendingTxInMempool waits for a transaction spending the given outpoint to +// appear in the mempool and returns that tx in full. +func getSpendingTxInMempool(t *harnessTest, miner *rpcclient.Client, + timeout time.Duration, chanPoint wire.OutPoint) *wire.MsgTx { - // Now that the sweeping transaction has been confirmed, Bob should - // cancel back that HTLC. As a result, Alice should not know of any - // active HTLC's. - nodes = []*lntest.HarnessNode{net.Alice} - err = wait.Predicate(func() bool { - predErr = assertNumActiveHtlcs(nodes, 0) - if predErr != nil { - return false - } - return true - }, time.Second*15) - if err != nil { - t.Fatalf("alice's channel still has active htlc's: %v", predErr) - } + breakTimeout := time.After(timeout) + ticker := time.NewTicker(50 * time.Millisecond) + defer ticker.Stop() - // Now we'll check Bob's pending channel report. Since this was Carol's - // commitment, he doesn't have to wait for any CSV delays. As a result, - // he should show no additional pending transactions. - err = wait.Predicate(func() bool { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Bob.PendingChannels( - ctxt, pendingChansRequest, - ) - if err != nil { - predErr = fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - if len(pendingChanResp.PendingForceClosingChannels) != 0 { - predErr = fmt.Errorf("bob still has pending channels "+ - "but shouldn't: %v", spew.Sdump(pendingChanResp)) - return false - } + for { + select { + case <-breakTimeout: + t.Fatalf("didn't find tx in mempool") + case <-ticker.C: + mempool, err := miner.GetRawMempool() + if err != nil { + t.Fatalf("unable to get mempool: %v", err) + } - return true - }, time.Second*15) - if err != nil { - t.Fatalf(predErr.Error()) - } + if len(mempool) == 0 { + continue + } - // We'll close out the test by closing the channel from Alice to Bob, - // and then shutting down the new node we created as its no longer - // needed. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, aliceChanPoint, false) + for _, txid := range mempool { + tx, err := miner.GetRawTransaction(txid) + if err != nil { + t.Fatalf("unable to fetch tx: %v", err) + } + + msgTx := tx.MsgTx() + for _, txIn := range msgTx.TxIn { + if txIn.PreviousOutPoint == chanPoint { + return msgTx + } + } + } + } + } } // testSwitchCircuitPersistence creates a multihop network to ensure the sender @@ -11294,7 +11211,7 @@ func testSwitchOfflineDelivery(net *lntest.NetworkHarness, t *harnessTest) { // Carol -> Dave -> Alice -> Bob // // First, we'll create Dave and establish a channel to Alice. - dave, err := net.NewNode("Dave", []string{"--unsafe-disconnect"}) + dave, err := net.NewNode("Dave", nil) if err != nil { t.Fatalf("unable to create new nodes: %v", err) } @@ -11624,7 +11541,7 @@ func testSwitchOfflineDeliveryPersistence(net *lntest.NetworkHarness, t *harness // Carol -> Dave -> Alice -> Bob // // First, we'll create Dave and establish a channel to Alice. - dave, err := net.NewNode("Dave", []string{"--unsafe-disconnect"}) + dave, err := net.NewNode("Dave", nil) if err != nil { t.Fatalf("unable to create new nodes: %v", err) } @@ -11961,7 +11878,7 @@ func testSwitchOfflineDeliveryOutgoingOffline( // Carol -> Dave -> Alice -> Bob // // First, we'll create Dave and establish a channel to Alice. - dave, err := net.NewNode("Dave", []string{"--unsafe-disconnect"}) + dave, err := net.NewNode("Dave", nil) if err != nil { t.Fatalf("unable to create new nodes: %v", err) } @@ -12701,7 +12618,6 @@ func testSendUpdateDisableChannel(net *lntest.NetworkHarness, t *harnessTest) { carol, err := net.NewNode("Carol", []string{ "--minbackoff=10s", - "--unsafe-disconnect", "--chan-enable-timeout=1.5s", "--chan-disable-timeout=3s", "--chan-status-sample-interval=.5s", @@ -12934,11 +12850,17 @@ func testAbandonChannel(net *lntest.NetworkHarness, t *harnessTest) { ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) chanPoint := openChannelAndAssert( - ctxt, t, net, net.Alice, net.Bob, channelParam) + ctxt, t, net, net.Alice, net.Bob, channelParam, + ) + txid, err := lnd.GetChanPointFundingTxid(chanPoint) + if err != nil { + t.Fatalf("unable to get txid: %v", err) + } + chanPointStr := fmt.Sprintf("%v:%v", txid, chanPoint.OutputIndex) // Wait for channel to be confirmed open. ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err := net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint) + err = net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint) if err != nil { t.Fatalf("alice didn't report channel: %v", err) } @@ -12947,6 +12869,34 @@ func testAbandonChannel(net *lntest.NetworkHarness, t *harnessTest) { t.Fatalf("bob didn't report channel: %v", err) } + // Now that the channel is open, we'll obtain its channel ID real quick + // so we can use it to query the graph below. + listReq := &lnrpc.ListChannelsRequest{} + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + aliceChannelList, err := net.Alice.ListChannels(ctxt, listReq) + if err != nil { + t.Fatalf("unable to fetch alice's channels: %v", err) + } + var chanID uint64 + for _, channel := range aliceChannelList.Channels { + if channel.ChannelPoint == chanPointStr { + chanID = channel.ChanId + } + } + + if chanID == 0 { + t.Fatalf("unable to find channel") + } + + // To make sure the channel is removed from the backup file as well when + // being abandoned, grab a backup snapshot so we can compare it with the + // later state. + bkupBefore, err := ioutil.ReadFile(net.Alice.ChanBackupPath()) + if err != nil { + t.Fatalf("could not get channel backup before abandoning "+ + "channel: %v", err) + } + // Send request to abandon channel. abandonChannelRequest := &lnrpc.AbandonChannelRequest{ ChannelPoint: chanPoint, @@ -12959,9 +12909,8 @@ func testAbandonChannel(net *lntest.NetworkHarness, t *harnessTest) { } // Assert that channel in no longer open. - listReq := &lnrpc.ListChannelsRequest{} ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - aliceChannelList, err := net.Alice.ListChannels(ctxt, listReq) + aliceChannelList, err = net.Alice.ListChannels(ctxt, listReq) if err != nil { t.Fatalf("unable to list channels: %v", err) } @@ -13009,9 +12958,40 @@ func testAbandonChannel(net *lntest.NetworkHarness, t *harnessTest) { len(aliceClosedList.Channels)) } - // Now that we're done with the test, the channel can be closed. This is - // necessary to avoid unexpected outcomes of other tests that use Bob's - // lnd instance. + // Ensure that the channel can no longer be found in the channel graph. + _, err = net.Alice.GetChanInfo(ctxb, &lnrpc.ChanInfoRequest{ + ChanId: chanID, + }) + if !strings.Contains(err.Error(), "marked as zombie") { + t.Fatalf("channel shouldn't be found in the channel " + + "graph!") + } + + // Make sure the channel is no longer in the channel backup list. + err = wait.Predicate(func() bool { + bkupAfter, err := ioutil.ReadFile(net.Alice.ChanBackupPath()) + if err != nil { + t.Fatalf("could not get channel backup before "+ + "abandoning channel: %v", err) + } + + return len(bkupAfter) < len(bkupBefore) + }, defaultTimeout) + if err != nil { + t.Fatalf("channel wasn't removed from channel backup file") + } + + // Calling AbandonChannel again, should result in no new errors, as the + // channel has already been removed. + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + _, err = net.Alice.AbandonChannel(ctxt, abandonChannelRequest) + if err != nil { + t.Fatalf("unable to abandon channel a second time: %v", err) + } + + // Now that we're done with the test, the channel can be closed. This + // is necessary to avoid unexpected outcomes of other tests that use + // Bob's lnd instance. ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) closeChannelAndAssert(ctxt, t, net, net.Bob, chanPoint, true) @@ -13303,9 +13283,9 @@ func testChannelBackupUpdates(net *lntest.NetworkHarness, t *harnessTest) { } } - // As these two channels were just open, we should've got two - // notifications for channel backups. - assertBackupNtfns(2) + // As these two channels were just opened, we should've got two times + // the pending and open notifications for channel backups. + assertBackupNtfns(2 * 2) // The on disk file should also exactly match the latest backup that we // have. @@ -13534,6 +13514,14 @@ type chanRestoreTestCase struct { // private or not. private bool + // unconfirmed signals if the channel from Dave to Carol should be + // confirmed or not. + unconfirmed bool + + // anchorCommit is true, then the new anchor commitment type will be + // used for the channels created in the test. + anchorCommit bool + // restoreMethod takes an old node, then returns a function // closure that'll return the same node, but with its state // restored via a custom method. We use this to abstract away @@ -13558,11 +13546,16 @@ func testChanRestoreScenario(t *harnessTest, net *lntest.NetworkHarness, ctxb := context.Background() + var nodeArgs []string + if testCase.anchorCommit { + nodeArgs = commitTypeAnchors.Args() + } + // First, we'll create a brand new node we'll use within the test. If // we have a custom backup file specified, then we'll also create that // for use. dave, mnemonic, err := net.NewNodeWithSeed( - "dave", nil, password, + "dave", nodeArgs, password, ) if err != nil { t.Fatalf("unable to create new node: %v", err) @@ -13572,15 +13565,20 @@ func testChanRestoreScenario(t *harnessTest, net *lntest.NetworkHarness, defer func() { shutdownAndAssert(net, t, dave) }() - carol, err := net.NewNode("carol", nil) + carol, err := net.NewNode("carol", nodeArgs) if err != nil { t.Fatalf("unable to make new node: %v", err) } defer shutdownAndAssert(net, t, carol) - // Now that our new node is created, we'll give him some coins it can - // use to open channels with Carol. + // Now that our new nodes are created, we'll give them some coins for + // channel opening and anchor sweeping. ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) + err = net.SendCoins(ctxt, btcutil.SatoshiPerBitcoin, carol) + if err != nil { + t.Fatalf("unable to send coins to dave: %v", err) + } + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) err = net.SendCoins(ctxt, btcutil.SatoshiPerBitcoin, dave) if err != nil { t.Fatalf("unable to send coins to dave: %v", err) @@ -13598,25 +13596,55 @@ func testChanRestoreScenario(t *harnessTest, net *lntest.NetworkHarness, if err := net.ConnectNodes(ctxt, dave, carol); err != nil { t.Fatalf("unable to connect dave to carol: %v", err) } - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPoint := openChannelAndAssert( - ctxt, t, net, from, to, - lntest.OpenChannelParams{ - Amt: chanAmt, - PushAmt: pushAmt, - Private: testCase.private, - }, - ) - // Wait for both sides to see the opened channel. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = dave.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - t.Fatalf("dave didn't report channel: %v", err) - } - err = carol.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - t.Fatalf("carol didn't report channel: %v", err) + // We will either open a confirmed or unconfirmed channel, depending on + // the requirements of the test case. + switch { + case testCase.unconfirmed: + ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) + _, err := net.OpenPendingChannel( + ctxt, from, to, chanAmt, pushAmt, + ) + if err != nil { + t.Fatalf("couldn't open pending channel: %v", err) + } + + // Give the pubsub some time to update the channel backup. + err = wait.NoError(func() error { + fi, err := os.Stat(dave.ChanBackupPath()) + if err != nil { + return err + } + if fi.Size() <= chanbackup.NilMultiSizePacked { + return fmt.Errorf("backup file empty") + } + return nil + }, defaultTimeout) + if err != nil { + t.Fatalf("channel backup not updated in time: %v", err) + } + + default: + ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) + chanPoint := openChannelAndAssert( + ctxt, t, net, from, to, + lntest.OpenChannelParams{ + Amt: chanAmt, + PushAmt: pushAmt, + Private: testCase.private, + }, + ) + + // Wait for both sides to see the opened channel. + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + err = dave.WaitForNetworkChannelOpen(ctxt, chanPoint) + if err != nil { + t.Fatalf("dave didn't report channel: %v", err) + } + err = carol.WaitForNetworkChannelOpen(ctxt, chanPoint) + if err != nil { + t.Fatalf("carol didn't report channel: %v", err) + } } // If both parties should start with existing channel updates, then @@ -13716,6 +13744,7 @@ func testChanRestoreScenario(t *harnessTest, net *lntest.NetworkHarness, // end of the protocol. assertDLPExecuted( net, t, carol, carolStartingBalance, dave, daveStartingBalance, + testCase.anchorCommit, ) } @@ -13765,22 +13794,271 @@ func testChannelBackupRestore(net *lntest.NetworkHarness, t *harnessTest) { ctxb := context.Background() - var testCases = []chanRestoreTestCase{ - // Restore from backups obtained via the RPC interface. Dave - // was the initiator, of the non-advertised channel. + var testCases = []chanRestoreTestCase{ + // Restore from backups obtained via the RPC interface. Dave + // was the initiator, of the non-advertised channel. + { + name: "restore from RPC backup", + channelsUpdated: false, + initiator: true, + private: false, + restoreMethod: func(oldNode *lntest.HarnessNode, + backupFilePath string, + mnemonic []string) (nodeRestorer, error) { + + // For this restoration method, we'll grab the + // current multi-channel backup from the old + // node, and use it to restore a new node + // within the closure. + req := &lnrpc.ChanBackupExportRequest{} + chanBackup, err := oldNode.ExportAllChannelBackups( + ctxb, req, + ) + if err != nil { + return nil, fmt.Errorf("unable to obtain "+ + "channel backup: %v", err) + } + + multi := chanBackup.MultiChanBackup.MultiChanBackup + + // In our nodeRestorer function, we'll restore + // the node from seed, then manually recover + // the channel backup. + return chanRestoreViaRPC( + net, password, mnemonic, multi, + ) + }, + }, + + // Restore the backup from the on-disk file, using the RPC + // interface. + { + name: "restore from backup file", + initiator: true, + private: false, + restoreMethod: func(oldNode *lntest.HarnessNode, + backupFilePath string, + mnemonic []string) (nodeRestorer, error) { + + // Read the entire Multi backup stored within + // this node's channels.backup file. + multi, err := ioutil.ReadFile(backupFilePath) + if err != nil { + return nil, err + } + + // Now that we have Dave's backup file, we'll + // create a new nodeRestorer that will restore + // using the on-disk channels.backup. + return chanRestoreViaRPC( + net, password, mnemonic, multi, + ) + }, + }, + + // Restore the backup as part of node initialization with the + // prior mnemonic and new backup seed. + { + name: "restore during creation", + initiator: true, + private: false, + restoreMethod: func(oldNode *lntest.HarnessNode, + backupFilePath string, + mnemonic []string) (nodeRestorer, error) { + + // First, fetch the current backup state as is, + // to obtain our latest Multi. + chanBackup, err := oldNode.ExportAllChannelBackups( + ctxb, &lnrpc.ChanBackupExportRequest{}, + ) + if err != nil { + return nil, fmt.Errorf("unable to obtain "+ + "channel backup: %v", err) + } + backupSnapshot := &lnrpc.ChanBackupSnapshot{ + MultiChanBackup: chanBackup.MultiChanBackup, + } + + // Create a new nodeRestorer that will restore + // the node using the Multi backup we just + // obtained above. + return func() (*lntest.HarnessNode, error) { + return net.RestoreNodeWithSeed( + "dave", nil, password, + mnemonic, 1000, backupSnapshot, + ) + }, nil + }, + }, + + // Restore the backup once the node has already been + // re-created, using the Unlock call. + { + name: "restore during unlock", + initiator: true, + private: false, + restoreMethod: func(oldNode *lntest.HarnessNode, + backupFilePath string, + mnemonic []string) (nodeRestorer, error) { + + // First, fetch the current backup state as is, + // to obtain our latest Multi. + chanBackup, err := oldNode.ExportAllChannelBackups( + ctxb, &lnrpc.ChanBackupExportRequest{}, + ) + if err != nil { + return nil, fmt.Errorf("unable to obtain "+ + "channel backup: %v", err) + } + backupSnapshot := &lnrpc.ChanBackupSnapshot{ + MultiChanBackup: chanBackup.MultiChanBackup, + } + + // Create a new nodeRestorer that will restore + // the node with its seed, but no channel + // backup, shutdown this initialized node, then + // restart it again using Unlock. + return func() (*lntest.HarnessNode, error) { + newNode, err := net.RestoreNodeWithSeed( + "dave", nil, password, + mnemonic, 1000, nil, + ) + if err != nil { + return nil, err + } + + err = net.RestartNode( + newNode, nil, backupSnapshot, + ) + if err != nil { + return nil, err + } + + return newNode, nil + }, nil + }, + }, + + // Restore the backup from the on-disk file a second time to + // make sure imports can be canceled and later resumed. + { + name: "restore from backup file twice", + initiator: true, + private: false, + restoreMethod: func(oldNode *lntest.HarnessNode, + backupFilePath string, + mnemonic []string) (nodeRestorer, error) { + + // Read the entire Multi backup stored within + // this node's channels.backup file. + multi, err := ioutil.ReadFile(backupFilePath) + if err != nil { + return nil, err + } + + // Now that we have Dave's backup file, we'll + // create a new nodeRestorer that will restore + // using the on-disk channels.backup. + backup := &lnrpc.RestoreChanBackupRequest_MultiChanBackup{ + MultiChanBackup: multi, + } + + ctxb := context.Background() + + return func() (*lntest.HarnessNode, error) { + newNode, err := net.RestoreNodeWithSeed( + "dave", nil, password, mnemonic, + 1000, nil, + ) + if err != nil { + return nil, fmt.Errorf("unable to "+ + "restore node: %v", err) + } + + _, err = newNode.RestoreChannelBackups( + ctxb, + &lnrpc.RestoreChanBackupRequest{ + Backup: backup, + }, + ) + if err != nil { + return nil, fmt.Errorf("unable "+ + "to restore backups: %v", + err) + } + + _, err = newNode.RestoreChannelBackups( + ctxb, + &lnrpc.RestoreChanBackupRequest{ + Backup: backup, + }, + ) + if err != nil { + return nil, fmt.Errorf("unable "+ + "to restore backups the"+ + "second time: %v", + err) + } + + return newNode, nil + }, nil + }, + }, + + // Use the channel backup file that contains an unconfirmed + // channel and make sure recovery works as well. + { + name: "restore unconfirmed channel file", + channelsUpdated: false, + initiator: true, + private: false, + unconfirmed: true, + restoreMethod: func(oldNode *lntest.HarnessNode, + backupFilePath string, + mnemonic []string) (nodeRestorer, error) { + + // Read the entire Multi backup stored within + // this node's channels.backup file. + multi, err := ioutil.ReadFile(backupFilePath) + if err != nil { + return nil, err + } + + // Let's assume time passes, the channel + // confirms in the meantime but for some reason + // the backup we made while it was still + // unconfirmed is the only backup we have. We + // should still be able to restore it. To + // simulate time passing, we mine some blocks + // to get the channel confirmed _after_ we saved + // the backup. + mineBlocks(t, net, 6, 1) + + // In our nodeRestorer function, we'll restore + // the node from seed, then manually recover + // the channel backup. + return chanRestoreViaRPC( + net, password, mnemonic, multi, + ) + }, + }, + + // Create a backup using RPC that contains an unconfirmed + // channel and make sure recovery works as well. { - name: "restore from RPC backup", + name: "restore unconfirmed channel RPC", channelsUpdated: false, initiator: true, private: false, + unconfirmed: true, restoreMethod: func(oldNode *lntest.HarnessNode, backupFilePath string, mnemonic []string) (nodeRestorer, error) { // For this restoration method, we'll grab the // current multi-channel backup from the old - // node, and use it to restore a new node - // within the closure. + // node. The channel should be included, even if + // it is not confirmed yet. req := &lnrpc.ChanBackupExportRequest{} chanBackup, err := oldNode.ExportAllChannelBackups( ctxb, req, @@ -13789,12 +14067,26 @@ func testChannelBackupRestore(net *lntest.NetworkHarness, t *harnessTest) { return nil, fmt.Errorf("unable to obtain "+ "channel backup: %v", err) } + chanPoints := chanBackup.MultiChanBackup.ChanPoints + if len(chanPoints) == 0 { + return nil, fmt.Errorf("unconfirmed " + + "channel not included in backup") + } - multi := chanBackup.MultiChanBackup.MultiChanBackup + // Let's assume time passes, the channel + // confirms in the meantime but for some reason + // the backup we made while it was still + // unconfirmed is the only backup we have. We + // should still be able to restore it. To + // simulate time passing, we mine some blocks + // to get the channel confirmed _after_ we saved + // the backup. + mineBlocks(t, net, 6, 1) // In our nodeRestorer function, we'll restore // the node from seed, then manually recover // the channel backup. + multi := chanBackup.MultiChanBackup.MultiChanBackup return chanRestoreViaRPC( net, password, mnemonic, multi, ) @@ -13802,17 +14094,18 @@ func testChannelBackupRestore(net *lntest.NetworkHarness, t *harnessTest) { }, // Restore the backup from the on-disk file, using the RPC - // interface. + // interface, for anchor commitment channels. { - name: "restore from backup file", - initiator: true, - private: false, + name: "restore from backup file anchors", + initiator: true, + private: false, + anchorCommit: true, restoreMethod: func(oldNode *lntest.HarnessNode, backupFilePath string, mnemonic []string) (nodeRestorer, error) { // Read the entire Multi backup stored within - // this node's chaannels.backup file. + // this node's channels.backup file. multi, err := ioutil.ReadFile(backupFilePath) if err != nil { return nil, err @@ -13826,89 +14119,6 @@ func testChannelBackupRestore(net *lntest.NetworkHarness, t *harnessTest) { ) }, }, - - // Restore the backup as part of node initialization with the - // prior mnemonic and new backup seed. - { - name: "restore during creation", - initiator: true, - private: false, - restoreMethod: func(oldNode *lntest.HarnessNode, - backupFilePath string, - mnemonic []string) (nodeRestorer, error) { - - // First, fetch the current backup state as is, - // to obtain our latest Multi. - chanBackup, err := oldNode.ExportAllChannelBackups( - ctxb, &lnrpc.ChanBackupExportRequest{}, - ) - if err != nil { - return nil, fmt.Errorf("unable to obtain "+ - "channel backup: %v", err) - } - backupSnapshot := &lnrpc.ChanBackupSnapshot{ - MultiChanBackup: chanBackup.MultiChanBackup, - } - - // Create a new nodeRestorer that will restore - // the node using the Multi backup we just - // obtained above. - return func() (*lntest.HarnessNode, error) { - return net.RestoreNodeWithSeed( - "dave", nil, password, - mnemonic, 1000, backupSnapshot, - ) - }, nil - }, - }, - - // Restore the backup once the node has already been - // re-created, using the Unlock call. - { - name: "restore during unlock", - initiator: true, - private: false, - restoreMethod: func(oldNode *lntest.HarnessNode, - backupFilePath string, - mnemonic []string) (nodeRestorer, error) { - - // First, fetch the current backup state as is, - // to obtain our latest Multi. - chanBackup, err := oldNode.ExportAllChannelBackups( - ctxb, &lnrpc.ChanBackupExportRequest{}, - ) - if err != nil { - return nil, fmt.Errorf("unable to obtain "+ - "channel backup: %v", err) - } - backupSnapshot := &lnrpc.ChanBackupSnapshot{ - MultiChanBackup: chanBackup.MultiChanBackup, - } - - // Create a new nodeRestorer that will restore - // the node with its seed, but no channel - // backup, shutdown this initialized node, then - // restart it again using Unlock. - return func() (*lntest.HarnessNode, error) { - newNode, err := net.RestoreNodeWithSeed( - "dave", nil, password, - mnemonic, 1000, nil, - ) - if err != nil { - return nil, err - } - - err = net.RestartNode( - newNode, nil, backupSnapshot, - ) - if err != nil { - return nil, err - } - - return newNode, nil - }, nil - }, - }, } // TODO(roasbeef): online vs offline close? @@ -14038,12 +14248,12 @@ func testHoldInvoicePersistence(net *lntest.NetworkHarness, t *harnessTest) { } // Let Alice initiate payments for all the created invoices. - var paymentStreams []routerrpc.Router_SendPaymentClient + var paymentStreams []routerrpc.Router_SendPaymentV2Client for _, payReq := range payReqs { ctx, cancel := context.WithCancel(ctxb) defer cancel() - payStream, err := net.Alice.RouterClient.SendPayment( + payStream, err := net.Alice.RouterClient.SendPaymentV2( ctx, &routerrpc.SendPaymentRequest{ PaymentRequest: payReq, TimeoutSeconds: 60, @@ -14059,13 +14269,13 @@ func testHoldInvoicePersistence(net *lntest.NetworkHarness, t *harnessTest) { // Wait for inlight status update. for _, payStream := range paymentStreams { - status, err := payStream.Recv() + payment, err := payStream.Recv() if err != nil { t.Fatalf("Failed receiving status update: %v", err) } - if status.State != routerrpc.PaymentState_IN_FLIGHT { - t.Fatalf("state not in flight: %v", status.State) + if payment.Status != lnrpc.Payment_IN_FLIGHT { + t.Fatalf("state not in flight: %v", payment.Status) } } @@ -14104,8 +14314,8 @@ func testHoldInvoicePersistence(net *lntest.NetworkHarness, t *harnessTest) { // We wait for the payment attempt to have been // properly recorded in the DB. - if len(payment.Path) == 0 { - return fmt.Errorf("path is empty") + if len(payment.Htlcs) == 0 { + return fmt.Errorf("no attempt recorded") } delete(payHashes, payment.PaymentHash) @@ -14143,7 +14353,7 @@ func testHoldInvoicePersistence(net *lntest.NetworkHarness, t *harnessTest) { // Now after a restart, we must re-track the payments. We set up a // goroutine for each to track thir status updates. var ( - statusUpdates []chan *routerrpc.PaymentStatus + statusUpdates []chan *lnrpc.Payment wg sync.WaitGroup quit = make(chan struct{}) ) @@ -14155,7 +14365,7 @@ func testHoldInvoicePersistence(net *lntest.NetworkHarness, t *harnessTest) { ctx, cancel := context.WithCancel(ctxb) defer cancel() - payStream, err := net.Alice.RouterClient.TrackPayment( + payStream, err := net.Alice.RouterClient.TrackPaymentV2( ctx, &routerrpc.TrackPaymentRequest{ PaymentHash: hash[:], }, @@ -14165,20 +14375,20 @@ func testHoldInvoicePersistence(net *lntest.NetworkHarness, t *harnessTest) { } // We set up a channel where we'll forward any status update. - upd := make(chan *routerrpc.PaymentStatus) + upd := make(chan *lnrpc.Payment) wg.Add(1) go func() { defer wg.Done() for { - status, err := payStream.Recv() + payment, err := payStream.Recv() if err != nil { close(upd) return } select { - case upd <- status: + case upd <- payment: case <-quit: return } @@ -14188,17 +14398,17 @@ func testHoldInvoicePersistence(net *lntest.NetworkHarness, t *harnessTest) { statusUpdates = append(statusUpdates, upd) } - // Wait for the infligt status update. + // Wait for the in-flight status update. for _, upd := range statusUpdates { select { - case status, ok := <-upd: + case payment, ok := <-upd: if !ok { - t.Fatalf("failed getting status update") + t.Fatalf("failed getting payment update") } - if status.State != routerrpc.PaymentState_IN_FLIGHT { + if payment.Status != lnrpc.Payment_IN_FLIGHT { t.Fatalf("state not in in flight: %v", - status.State) + payment.Status) } case <-time.After(5 * time.Second): t.Fatalf("in flight status not recevied") @@ -14227,25 +14437,38 @@ func testHoldInvoicePersistence(net *lntest.NetworkHarness, t *harnessTest) { // Make sure we get the expected status update. for i, upd := range statusUpdates { - select { - case status, ok := <-upd: - if !ok { - t.Fatalf("failed getting status update") - } - - if i%2 == 0 { - if status.State != routerrpc.PaymentState_SUCCEEDED { - t.Fatalf("state not suceeded : %v", - status.State) + // Read until the payment is in a terminal state. + var payment *lnrpc.Payment + for payment == nil { + select { + case p, ok := <-upd: + if !ok { + t.Fatalf("failed getting payment update") } - } else { - if status.State != routerrpc.PaymentState_FAILED_INCORRECT_PAYMENT_DETAILS { - t.Fatalf("state not failed: %v", - status.State) + + if p.Status == lnrpc.Payment_IN_FLIGHT { + continue } + + payment = p + case <-time.After(5 * time.Second): + t.Fatalf("in flight status not recevied") + } + } + + // Assert terminal payment state. + if i%2 == 0 { + if payment.Status != lnrpc.Payment_SUCCEEDED { + t.Fatalf("state not suceeded : %v", + payment.Status) + } + } else { + if payment.FailureReason != + lnrpc.PaymentFailureReason_FAILURE_REASON_INCORRECT_PAYMENT_DETAILS { + + t.Fatalf("state not failed: %v", + payment.FailureReason) } - case <-time.After(5 * time.Second): - t.Fatalf("in flight status not recevied") } } @@ -14285,6 +14508,255 @@ func testHoldInvoicePersistence(net *lntest.NetworkHarness, t *harnessTest) { } } +// testExternalFundingChanPoint tests that we're able to carry out a normal +// channel funding workflow given a channel point that was constructed outside +// the main daemon. +func testExternalFundingChanPoint(net *lntest.NetworkHarness, t *harnessTest) { + ctxb := context.Background() + + // First, we'll create two new nodes that we'll use to open channel + // between for this test. + carol, err := net.NewNode("carol", nil) + if err != nil { + t.Fatalf("unable to start new node: %v", err) + } + defer shutdownAndAssert(net, t, carol) + + dave, err := net.NewNode("dave", nil) + if err != nil { + t.Fatalf("unable to start new node: %v", err) + } + defer shutdownAndAssert(net, t, dave) + + // Carol will be funding the channel, so we'll send some coins over to + // her and ensure they have enough confirmations before we proceed. + ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) + err = net.SendCoins(ctxt, btcutil.SatoshiPerBitcoin, carol) + if err != nil { + t.Fatalf("unable to send coins to carol: %v", err) + } + + // Before we start the test, we'll ensure both sides are connected to + // the funding flow can properly be executed. + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + err = net.EnsureConnected(ctxt, carol, dave) + if err != nil { + t.Fatalf("unable to connect peers: %v", err) + } + + // At this point, we're ready to simulate our external channle funding + // flow. To start with, we'll get to new keys from both sides which + // will be used to create the multi-sig output for the external funding + // transaction. + keyLoc := &signrpc.KeyLocator{ + KeyFamily: 9999, + KeyIndex: 1, + } + carolFundingKey, err := carol.WalletKitClient.DeriveKey(ctxb, keyLoc) + if err != nil { + t.Fatalf("unable to get carol funding key: %v", err) + } + daveFundingKey, err := dave.WalletKitClient.DeriveKey(ctxb, keyLoc) + if err != nil { + t.Fatalf("unable to get dave funding key: %v", err) + } + + // Now that we have the multi-sig keys for each party, we can manually + // construct the funding transaction. We'll instruct the backend to + // immediately create and broadcast a transaction paying out an exact + // amount. Normally this would reside in the mempool, but we just + // confirm it now for simplicity. + const chanSize = lnd.MaxBtcFundingAmount + _, fundingOutput, err := input.GenFundingPkScript( + carolFundingKey.RawKeyBytes, daveFundingKey.RawKeyBytes, + int64(chanSize), + ) + if err != nil { + t.Fatalf("unable to create funding script: %v", err) + } + txid, err := net.Miner.SendOutputsWithoutChange( + []*wire.TxOut{fundingOutput}, 5, + ) + if err != nil { + t.Fatalf("unable to create funding output: %v", err) + } + + // At this point, we can being our external channel funding workflow. + // We'll start by generating a pending channel ID externally that will + // be used to track this new funding type. + var pendingChanID [32]byte + if _, err := rand.Read(pendingChanID[:]); err != nil { + t.Fatalf("unable to gen pending chan ID: %v", err) + } + + _, currentHeight, err := net.Miner.Node.GetBestBlock() + if err != nil { + t.Fatalf("unable to get current blockheight %v", err) + } + + // Now that we have the pending channel ID, Dave (our responder) will + // register the intent to receive a new channel funding workflow using + // the pending channel ID. + chanPoint := &lnrpc.ChannelPoint{ + FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{ + FundingTxidBytes: txid[:], + }, + } + thawHeight := uint32(currentHeight + 10) + chanPointShim := &lnrpc.ChanPointShim{ + Amt: int64(chanSize), + ChanPoint: chanPoint, + LocalKey: &lnrpc.KeyDescriptor{ + RawKeyBytes: daveFundingKey.RawKeyBytes, + KeyLoc: &lnrpc.KeyLocator{ + KeyFamily: daveFundingKey.KeyLoc.KeyFamily, + KeyIndex: daveFundingKey.KeyLoc.KeyIndex, + }, + }, + RemoteKey: carolFundingKey.RawKeyBytes, + PendingChanId: pendingChanID[:], + ThawHeight: thawHeight, + } + fundingShim := &lnrpc.FundingShim{ + Shim: &lnrpc.FundingShim_ChanPointShim{ + ChanPointShim: chanPointShim, + }, + } + _, err = dave.FundingStateStep(ctxb, &lnrpc.FundingTransitionMsg{ + Trigger: &lnrpc.FundingTransitionMsg_ShimRegister{ + ShimRegister: fundingShim, + }, + }) + if err != nil { + t.Fatalf("unable to walk funding state forward: %v", err) + } + + // If we attempt to register the same shim (has the same pending chan + // ID), then we should get an error. + _, err = dave.FundingStateStep(ctxb, &lnrpc.FundingTransitionMsg{ + Trigger: &lnrpc.FundingTransitionMsg_ShimRegister{ + ShimRegister: fundingShim, + }, + }) + if err == nil { + t.Fatalf("duplicate pending channel ID funding shim " + + "registration should trigger an error") + } + + // We'll take the chan point shim we just registered for Dave (the + // responder), and swap the local/remote keys before we feed it in as + // Carol's funding shim as the initiator. + fundingShim.GetChanPointShim().LocalKey = &lnrpc.KeyDescriptor{ + RawKeyBytes: carolFundingKey.RawKeyBytes, + KeyLoc: &lnrpc.KeyLocator{ + KeyFamily: carolFundingKey.KeyLoc.KeyFamily, + KeyIndex: carolFundingKey.KeyLoc.KeyIndex, + }, + } + fundingShim.GetChanPointShim().RemoteKey = daveFundingKey.RawKeyBytes + + // At this point, we'll now carry out the normal basic channel funding + // test as everything should now proceed as normal (a regular channel + // funding flow). + carolChan, daveChan, _, err := basicChannelFundingTest( + t, net, carol, dave, fundingShim, + ) + if err != nil { + t.Fatalf("unable to open channels: %v", err) + } + + // Both channels should be marked as frozen with the proper thaw + // height. + if carolChan.ThawHeight != thawHeight { + t.Fatalf("expected thaw height of %v, got %v", + carolChan.ThawHeight, thawHeight) + } + if daveChan.ThawHeight != thawHeight { + t.Fatalf("expected thaw height of %v, got %v", + daveChan.ThawHeight, thawHeight) + } + + // Next, to make sure the channel functions as normal, we'll make some + // payments within the channel. + payAmt := btcutil.Amount(100000) + invoice := &lnrpc.Invoice{ + Memo: "new chans", + Value: int64(payAmt), + } + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + resp, err := dave.AddInvoice(ctxt, invoice) + if err != nil { + t.Fatalf("unable to add invoice: %v", err) + } + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + err = completePaymentRequests( + ctxt, carol, []string{resp.PaymentRequest}, true, + ) + if err != nil { + t.Fatalf("unable to make payments between Carol and Dave") + } + + // Now that the channels are open, and we've confirmed that they're + // operational, we'll now ensure that the channels are frozen as + // intended (if requested). + // + // First, we'll try to close the channel as Carol, the initiator. This + // should fail as a frozen channel only allows the responder to + // initiate a channel close. + ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) + _, _, err = net.CloseChannel(ctxt, carol, chanPoint, false) + if err == nil { + t.Fatalf("carol wasn't denied a co-op close attempt for a " + + "frozen channel") + } + + // Next we'll try but this time with Dave (the responder) as the + // initiator. This time the channel should be closed as normal. + ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) + closeChannelAndAssert(ctxt, t, net, dave, chanPoint, false) +} + +// sendAndAssertSuccess sends the given payment requests and asserts that the +// payment completes successfully. +func sendAndAssertSuccess(t *harnessTest, node *lntest.HarnessNode, + req *routerrpc.SendPaymentRequest) *lnrpc.Payment { + + ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout) + defer cancel() + + stream, err := node.RouterClient.SendPaymentV2(ctx, req) + if err != nil { + t.Fatalf("unable to send payment: %v", err) + } + + result, err := getPaymentResult(stream) + if err != nil { + t.Fatalf("unable to get payment result: %v", err) + } + + if result.Status != lnrpc.Payment_SUCCEEDED { + t.Fatalf("payment failed: %v", result.Status) + } + + return result +} + +// getPaymentResult reads a final result from the stream and returns it. +func getPaymentResult(stream routerrpc.Router_SendPaymentV2Client) ( + *lnrpc.Payment, error) { + + for { + payment, err := stream.Recv() + if err != nil { + return nil, err + } + + if payment.Status != lnrpc.Payment_IN_FLIGHT { + return payment, nil + } + } +} + type testCase struct { name string test func(net *lntest.NetworkHarness, t *harnessTest) @@ -14421,42 +14893,8 @@ var testsCases = []*testCase{ test: testBidirectionalAsyncPayments, }, { - // bob: outgoing our commit timeout - // carol: incoming their commit watch and see timeout - name: "test multi-hop htlc local force close immediate expiry", - test: testMultiHopHtlcLocalTimeout, - }, - { - // bob: outgoing watch and see, they sweep on chain - // carol: incoming our commit, know preimage - name: "test multi-hop htlc receiver chain claim", - test: testMultiHopReceiverChainClaim, - }, - { - // bob: outgoing our commit watch and see timeout - // carol: incoming their commit watch and see timeout - name: "test multi-hop local force close on-chain htlc timeout", - test: testMultiHopLocalForceCloseOnChainHtlcTimeout, - }, - { - // bob: outgoing their commit watch and see timeout - // carol: incoming our commit watch and see timeout - name: "test multi-hop remote force close on-chain htlc timeout", - test: testMultiHopRemoteForceCloseOnChainHtlcTimeout, - }, - { - // bob: outgoing our commit watch and see, they sweep on chain - // bob: incoming our commit watch and learn preimage - // carol: incoming their commit know preimage - name: "test multi-hop htlc local chain claim", - test: testMultiHopHtlcLocalChainClaim, - }, - { - // bob: outgoing their commit watch and see, they sweep on chain - // bob: incoming their commit watch and learn preimage - // carol: incoming our commit know preimage - name: "test multi-hop htlc remote chain claim", - test: testMultiHopHtlcRemoteChainClaim, + name: "test multi-hop htlc", + test: testMultiHopHtlcClaims, }, { name: "switch circuit persistence", @@ -14540,6 +14978,30 @@ var testsCases = []*testCase{ name: "cpfp", test: testCPFP, }, + { + name: "macaroon authentication", + test: testMacaroonAuthentication, + }, + { + name: "immediate payment after channel opened", + test: testPaymentFollowingChannelOpen, + }, + { + name: "external channel funding", + test: testExternalFundingChanPoint, + }, + { + name: "psbt channel funding", + test: testPsbtChanFunding, + }, + { + name: "sendtoroute multi path payment", + test: testSendToRouteMultiPath, + }, + { + name: "send multi path payment", + test: testSendMultiPathPayment, + }, } // TestLightningNetworkDaemon performs a series of integration tests amongst a @@ -14614,7 +15076,9 @@ func TestLightningNetworkDaemon(t *testing.T) { // Now we can set up our test harness (LND instance), with the chain // backend we just created. - lndHarness, err = lntest.NewNetworkHarness(miner, chainBackend) + lndHarness, err = lntest.NewNetworkHarness( + miner, chainBackend, itestLndBinary, + ) if err != nil { ht.Fatalf("unable to create lightning network harness: %v", err) } @@ -14678,6 +15142,10 @@ func TestLightningNetworkDaemon(t *testing.T) { // Stop at the first failure. Mimic behavior of original test // framework. if !success { + // Log failure time to help relate the lnd logs to the + // failure. + t.Logf("Failure time: %v", + time.Now().Format("2006-01-02 15:04:05.000")) break } } diff --git a/lntest/itest/macaroons.go b/lntest/itest/macaroons.go new file mode 100644 index 0000000000..bfade118be --- /dev/null +++ b/lntest/itest/macaroons.go @@ -0,0 +1,168 @@ +// +build rpctest + +package itest + +import ( + "context" + "strings" + + "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/lntest" + "github.com/lightningnetwork/lnd/macaroons" + "gopkg.in/macaroon.v2" +) + +// errContains is a helper function that returns true if a string is contained +// in the message of an error. +func errContains(err error, str string) bool { + return strings.Contains(err.Error(), str) +} + +// testMacaroonAuthentication makes sure that if macaroon authentication is +// enabled on the gRPC interface, no requests with missing or invalid +// macaroons are allowed. Further, the specific access rights (read/write, +// entity based) and first-party caveats are tested as well. +func testMacaroonAuthentication(net *lntest.NetworkHarness, t *harnessTest) { + var ( + ctxb = context.Background() + infoReq = &lnrpc.GetInfoRequest{} + newAddrReq = &lnrpc.NewAddressRequest{ + Type: AddrTypeWitnessPubkeyHash, + } + testNode = net.Alice + ) + + // First test: Make sure we get an error if we use no macaroons but try + // to connect to a node that has macaroon authentication enabled. + conn, err := testNode.ConnectRPC(false) + if err != nil { + t.Fatalf("unable to connect to alice: %v", err) + } + defer conn.Close() + ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) + defer cancel() + noMacConnection := lnrpc.NewLightningClient(conn) + _, err = noMacConnection.GetInfo(ctxt, infoReq) + if err == nil || !errContains(err, "expected 1 macaroon") { + t.Fatalf("expected to get an error when connecting without " + + "macaroons") + } + + // Second test: Ensure that an invalid macaroon also triggers an error. + invalidMac, _ := macaroon.New( + []byte("dummy_root_key"), []byte("0"), "itest", + macaroon.LatestVersion, + ) + conn, err = testNode.ConnectRPCWithMacaroon(invalidMac) + if err != nil { + t.Fatalf("unable to connect to alice: %v", err) + } + defer conn.Close() + ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout) + defer cancel() + invalidMacConnection := lnrpc.NewLightningClient(conn) + _, err = invalidMacConnection.GetInfo(ctxt, infoReq) + if err == nil || !errContains(err, "cannot get macaroon") { + t.Fatalf("expected to get an error when connecting with an " + + "invalid macaroon") + } + + // Third test: Try to access a write method with read-only macaroon. + readonlyMac, err := testNode.ReadMacaroon( + testNode.ReadMacPath(), defaultTimeout, + ) + if err != nil { + t.Fatalf("unable to read readonly.macaroon from node: %v", err) + } + conn, err = testNode.ConnectRPCWithMacaroon(readonlyMac) + if err != nil { + t.Fatalf("unable to connect to alice: %v", err) + } + defer conn.Close() + ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout) + defer cancel() + readonlyMacConnection := lnrpc.NewLightningClient(conn) + _, err = readonlyMacConnection.NewAddress(ctxt, newAddrReq) + if err == nil || !errContains(err, "permission denied") { + t.Fatalf("expected to get an error when connecting to " + + "write method with read-only macaroon") + } + + // Fourth test: Check first-party caveat with timeout that expired + // 30 seconds ago. + timeoutMac, err := macaroons.AddConstraints( + readonlyMac, macaroons.TimeoutConstraint(-30), + ) + if err != nil { + t.Fatalf("unable to add constraint to readonly macaroon: %v", + err) + } + conn, err = testNode.ConnectRPCWithMacaroon(timeoutMac) + if err != nil { + t.Fatalf("unable to connect to alice: %v", err) + } + defer conn.Close() + ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout) + defer cancel() + timeoutMacConnection := lnrpc.NewLightningClient(conn) + _, err = timeoutMacConnection.GetInfo(ctxt, infoReq) + if err == nil || !errContains(err, "macaroon has expired") { + t.Fatalf("expected to get an error when connecting with an " + + "invalid macaroon") + } + + // Fifth test: Check first-party caveat with invalid IP address. + invalidIpAddrMac, err := macaroons.AddConstraints( + readonlyMac, macaroons.IPLockConstraint("1.1.1.1"), + ) + if err != nil { + t.Fatalf("unable to add constraint to readonly macaroon: %v", + err) + } + conn, err = testNode.ConnectRPCWithMacaroon(invalidIpAddrMac) + if err != nil { + t.Fatalf("unable to connect to alice: %v", err) + } + defer conn.Close() + ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout) + defer cancel() + invalidIpAddrMacConnection := lnrpc.NewLightningClient(conn) + _, err = invalidIpAddrMacConnection.GetInfo(ctxt, infoReq) + if err == nil || !errContains(err, "different IP address") { + t.Fatalf("expected to get an error when connecting with an " + + "invalid macaroon") + } + + // Sixth test: Make sure that if we do everything correct and send + // the admin macaroon with first-party caveats that we can satisfy, + // we get a correct answer. + adminMac, err := testNode.ReadMacaroon( + testNode.AdminMacPath(), defaultTimeout, + ) + if err != nil { + t.Fatalf("unable to read admin.macaroon from node: %v", err) + } + adminMac, err = macaroons.AddConstraints( + adminMac, macaroons.TimeoutConstraint(30), + macaroons.IPLockConstraint("127.0.0.1"), + ) + if err != nil { + t.Fatalf("unable to add constraints to admin macaroon: %v", err) + } + conn, err = testNode.ConnectRPCWithMacaroon(adminMac) + if err != nil { + t.Fatalf("unable to connect to alice: %v", err) + } + defer conn.Close() + ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout) + defer cancel() + adminMacConnection := lnrpc.NewLightningClient(conn) + res, err := adminMacConnection.NewAddress(ctxt, newAddrReq) + if err != nil { + t.Fatalf("unable to get new address with valid macaroon: %v", + err) + } + if !strings.HasPrefix(res.Address, "bcrt1") { + t.Fatalf("returned address was not a regtest address") + } +} diff --git a/lntest/itest/onchain.go b/lntest/itest/onchain.go index ccc73feb54..22b50e3c50 100644 --- a/lntest/itest/onchain.go +++ b/lntest/itest/onchain.go @@ -94,7 +94,7 @@ func testCPFP(net *lntest.NetworkHarness, t *harnessTest) { } bumpFeeReq := &walletrpc.BumpFeeRequest{ Outpoint: op, - SatPerByte: uint32(sweep.DefaultMaxFeeRate.FeePerKVByte() / 1000), + SatPerByte: uint32(sweep.DefaultMaxFeeRate.FeePerKVByte() / 2000), } ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) _, err = net.Bob.WalletKitClient.BumpFee(ctxt, bumpFeeReq) diff --git a/lntest/itest/psbt.go b/lntest/itest/psbt.go new file mode 100644 index 0000000000..86f2462439 --- /dev/null +++ b/lntest/itest/psbt.go @@ -0,0 +1,350 @@ +// +build rpctest + +package itest + +import ( + "bytes" + "context" + "crypto/rand" + "fmt" + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" + "github.com/btcsuite/btcutil/psbt" + "github.com/lightningnetwork/lnd" + "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/lntest" +) + +// testPsbtChanFunding makes sure a channel can be opened between carol and dave +// by using a Partially Signed Bitcoin Transaction that funds the channel +// multisig funding output. +func testPsbtChanFunding(net *lntest.NetworkHarness, t *harnessTest) { + ctxb := context.Background() + const chanSize = lnd.MaxBtcFundingAmount + + // First, we'll create two new nodes that we'll use to open channel + // between for this test. + carol, err := net.NewNode("carol", nil) + if err != nil { + t.Fatalf("unable to start new node: %v", err) + } + defer shutdownAndAssert(net, t, carol) + + dave, err := net.NewNode("dave", nil) + if err != nil { + t.Fatalf("unable to start new node: %v", err) + } + defer shutdownAndAssert(net, t, dave) + + // Before we start the test, we'll ensure both sides are connected so + // the funding flow can be properly executed. + ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) + defer cancel() + err = net.EnsureConnected(ctxt, carol, dave) + if err != nil { + t.Fatalf("unable to connect peers: %v", err) + } + + // At this point, we can begin our PSBT channel funding workflow. We'll + // start by generating a pending channel ID externally that will be used + // to track this new funding type. + var pendingChanID [32]byte + if _, err := rand.Read(pendingChanID[:]); err != nil { + t.Fatalf("unable to gen pending chan ID: %v", err) + } + + // Now that we have the pending channel ID, Carol will open the channel + // by specifying a PSBT shim. + ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout) + defer cancel() + chanUpdates, psbtBytes, err := openChannelPsbt( + ctxt, carol, dave, lntest.OpenChannelParams{ + Amt: chanSize, + FundingShim: &lnrpc.FundingShim{ + Shim: &lnrpc.FundingShim_PsbtShim{ + PsbtShim: &lnrpc.PsbtShim{ + PendingChanId: pendingChanID[:], + }, + }, + }, + }, + ) + if err != nil { + t.Fatalf("unable to open channel: %v", err) + } + packet, err := psbt.NewFromRawBytes(bytes.NewReader(psbtBytes), false) + if err != nil { + t.Fatalf("unable to parse returned PSBT: %v", err) + } + + // We'll now create a fully signed transaction that sends to the outputs + // encoded in the PSBT. We'll let the miner do it and convert the final + // TX into a PSBT, that's way easier than assembling a PSBT manually. + tx, err := net.Miner.CreateTransaction(packet.UnsignedTx.TxOut, 5, true) + if err != nil { + t.Fatalf("unable to create funding transaction: %v", err) + } + + // The helper function splits the final TX into the non-witness data + // encoded in a PSBT and the witness data returned separately. + unsignedPsbt, scripts, witnesses, err := createPsbtFromSignedTx(tx) + if err != nil { + t.Fatalf("unable to convert funding transaction into PSBT: %v", + err) + } + + // The PSBT will also be checked if there are large enough inputs + // present. We need to add some fake UTXO information to the PSBT to + // tell it what size of inputs we have. + for idx, txIn := range unsignedPsbt.UnsignedTx.TxIn { + utxPrevOut := txIn.PreviousOutPoint.Index + fakeUtxo := &wire.MsgTx{ + Version: 2, + TxIn: []*wire.TxIn{{}}, + TxOut: make([]*wire.TxOut, utxPrevOut+1), + } + for idx := range fakeUtxo.TxOut { + fakeUtxo.TxOut[idx] = &wire.TxOut{} + } + fakeUtxo.TxOut[utxPrevOut].Value = 10000000000 + unsignedPsbt.Inputs[idx].NonWitnessUtxo = fakeUtxo + } + + // Serialize the PSBT with the faked UTXO information. + var buf bytes.Buffer + err = unsignedPsbt.Serialize(&buf) + if err != nil { + t.Fatalf("error serializing PSBT: %v", err) + } + + // We have a PSBT that has no witness data yet, which is exactly what we + // need for the next step: Verify the PSBT with the funding intent. + _, err = carol.FundingStateStep(ctxb, &lnrpc.FundingTransitionMsg{ + Trigger: &lnrpc.FundingTransitionMsg_PsbtVerify{ + PsbtVerify: &lnrpc.FundingPsbtVerify{ + PendingChanId: pendingChanID[:], + FundedPsbt: buf.Bytes(), + }, + }, + }) + if err != nil { + t.Fatalf("error verifying PSBT with funding intent: %v", err) + } + + // Now we'll add the witness data back into the PSBT to make it a + // complete and signed transaction that can be finalized. We'll trick + // a bit by putting the script sig back directly, because we know we + // will only get non-witness outputs from the miner wallet. + for idx := range tx.TxIn { + if len(witnesses[idx]) > 0 { + t.Fatalf("unexpected witness inputs in wallet TX") + } + unsignedPsbt.Inputs[idx].FinalScriptSig = scripts[idx] + } + + // We've signed our PSBT now, let's pass it to the intent again. + buf.Reset() + err = unsignedPsbt.Serialize(&buf) + if err != nil { + t.Fatalf("error serializing PSBT: %v", err) + } + _, err = carol.FundingStateStep(ctxb, &lnrpc.FundingTransitionMsg{ + Trigger: &lnrpc.FundingTransitionMsg_PsbtFinalize{ + PsbtFinalize: &lnrpc.FundingPsbtFinalize{ + PendingChanId: pendingChanID[:], + SignedPsbt: buf.Bytes(), + }, + }, + }) + if err != nil { + t.Fatalf("error finalizing PSBT with funding intent: %v", err) + } + + // Consume the "channel pending" update. This waits until the funding + // transaction has been published. + ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout) + defer cancel() + updateResp, err := receiveChanUpdate(ctxt, chanUpdates) + if err != nil { + t.Fatalf("unable to consume channel update message: %v", err) + } + upd, ok := updateResp.Update.(*lnrpc.OpenStatusUpdate_ChanPending) + if !ok { + t.Fatalf("expected PSBT funding update, instead got %v", + updateResp) + } + chanPoint := &lnrpc.ChannelPoint{ + FundingTxid: &lnrpc.ChannelPoint_FundingTxidBytes{ + FundingTxidBytes: upd.ChanPending.Txid, + }, + OutputIndex: upd.ChanPending.OutputIndex, + } + + // Great, now we can mine a block to get the transaction confirmed, then + // wait for the new channel to be propagated through the network. + txHash := tx.TxHash() + block := mineBlocks(t, net, 6, 1)[0] + assertTxInBlock(t, block, &txHash) + ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout) + defer cancel() + err = carol.WaitForNetworkChannelOpen(ctxt, chanPoint) + if err != nil { + t.Fatalf("carol didn't report channel: %v", err) + } + + // With the channel open, ensure that it is counted towards Carol's + // total channel balance. + balReq := &lnrpc.ChannelBalanceRequest{} + ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout) + defer cancel() + balRes, err := carol.ChannelBalance(ctxt, balReq) + if err != nil { + t.Fatalf("unable to get carol's balance: %v", err) + } + if balRes.Balance == 0 { + t.Fatalf("carol has an empty channel balance") + } + + // Next, to make sure the channel functions as normal, we'll make some + // payments within the channel. + payAmt := btcutil.Amount(100000) + invoice := &lnrpc.Invoice{ + Memo: "new chans", + Value: int64(payAmt), + } + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + resp, err := dave.AddInvoice(ctxt, invoice) + if err != nil { + t.Fatalf("unable to add invoice: %v", err) + } + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + err = completePaymentRequests( + ctxt, carol, []string{resp.PaymentRequest}, true, + ) + if err != nil { + t.Fatalf("unable to make payments between Carol and Dave") + } + + // To conclude, we'll close the newly created channel between Carol and + // Dave. This function will also block until the channel is closed and + // will additionally assert the relevant channel closing post + // conditions. + ctxt, cancel = context.WithTimeout(ctxb, channelCloseTimeout) + defer cancel() + closeChannelAndAssert(ctxt, t, net, carol, chanPoint, false) +} + +// openChannelPsbt attempts to open a channel between srcNode and destNode with +// the passed channel funding parameters. If the passed context has a timeout, +// then if the timeout is reached before the channel pending notification is +// received, an error is returned. An error is returned if the expected step +// of funding the PSBT is not received from the source node. +func openChannelPsbt(ctx context.Context, srcNode, destNode *lntest.HarnessNode, + p lntest.OpenChannelParams) (lnrpc.Lightning_OpenChannelClient, []byte, + error) { + + // Wait until srcNode and destNode have the latest chain synced. + // Otherwise, we may run into a check within the funding manager that + // prevents any funding workflows from being kicked off if the chain + // isn't yet synced. + if err := srcNode.WaitForBlockchainSync(ctx); err != nil { + return nil, nil, fmt.Errorf("unable to sync srcNode chain: %v", + err) + } + if err := destNode.WaitForBlockchainSync(ctx); err != nil { + return nil, nil, fmt.Errorf("unable to sync destNode chain: %v", + err) + } + + // Send the request to open a channel to the source node now. This will + // open a long-lived stream where we'll receive status updates about the + // progress of the channel. + respStream, err := srcNode.OpenChannel(ctx, &lnrpc.OpenChannelRequest{ + NodePubkey: destNode.PubKey[:], + LocalFundingAmount: int64(p.Amt), + PushSat: int64(p.PushAmt), + Private: p.Private, + SpendUnconfirmed: p.SpendUnconfirmed, + MinHtlcMsat: int64(p.MinHtlc), + FundingShim: p.FundingShim, + }) + if err != nil { + return nil, nil, fmt.Errorf("unable to open channel between "+ + "source and dest: %v", err) + } + + // Consume the "PSBT funding ready" update. This waits until the node + // notifies us that the PSBT can now be funded. + resp, err := receiveChanUpdate(ctx, respStream) + if err != nil { + return nil, nil, fmt.Errorf("unable to consume channel update "+ + "message: %v", err) + } + upd, ok := resp.Update.(*lnrpc.OpenStatusUpdate_PsbtFund) + if !ok { + return nil, nil, fmt.Errorf("expected PSBT funding update, "+ + "instead got %v", resp) + } + return respStream, upd.PsbtFund.Psbt, nil +} + +// receiveChanUpdate waits until a message is received on the stream or the +// context is canceled. The context must have a timeout or must be canceled +// in case no message is received, otherwise this function will block forever. +func receiveChanUpdate(ctx context.Context, + stream lnrpc.Lightning_OpenChannelClient) (*lnrpc.OpenStatusUpdate, + error) { + + chanMsg := make(chan *lnrpc.OpenStatusUpdate) + errChan := make(chan error) + go func() { + // Consume one message. This will block until the message is + // recieved. + resp, err := stream.Recv() + if err != nil { + errChan <- err + return + } + chanMsg <- resp + }() + + select { + case <-ctx.Done(): + return nil, fmt.Errorf("timeout reached before chan pending " + + "update sent") + + case err := <-errChan: + return nil, err + + case updateMsg := <-chanMsg: + return updateMsg, nil + } +} + +// createPsbtFromSignedTx is a utility function to create a PSBT from an +// already-signed transaction, so we can test reconstructing, signing and +// extracting it. Returned are: an unsigned transaction serialization, a list +// of scriptSigs, one per input, and a list of witnesses, one per input. +func createPsbtFromSignedTx(tx *wire.MsgTx) (*psbt.Packet, [][]byte, + []wire.TxWitness, error) { + + scriptSigs := make([][]byte, 0, len(tx.TxIn)) + witnesses := make([]wire.TxWitness, 0, len(tx.TxIn)) + tx2 := tx.Copy() + + // Blank out signature info in inputs + for i, tin := range tx2.TxIn { + tin.SignatureScript = nil + scriptSigs = append(scriptSigs, tx.TxIn[i].SignatureScript) + tin.Witness = nil + witnesses = append(witnesses, tx.TxIn[i].Witness) + } + + // Outputs always contain: (value, scriptPubkey) so don't need + // amending. Now tx2 is tx with all signing data stripped out + unsignedPsbt, err := psbt.NewFromUnsignedTx(tx2) + if err != nil { + return nil, nil, nil, err + } + return unsignedPsbt, scriptSigs, witnesses, nil +} diff --git a/lntest/node.go b/lntest/node.go index 9b4256c3cd..b08e993517 100644 --- a/lntest/node.go +++ b/lntest/node.go @@ -14,6 +14,7 @@ import ( "path/filepath" "strconv" "sync" + "sync/atomic" "time" "github.com/btcsuite/btcd/chaincfg" @@ -36,38 +37,15 @@ import ( ) const ( - // defaultNodePort is the initial p2p port which will be used by the - // first created lightning node to listen on for incoming p2p - // connections. Subsequent allocated ports for future Lightning nodes - // instances will be monotonically increasing numbers calculated as - // such: defaultP2pPort + (4 * harness.nodeNum). + // defaultNodePort is the start of the range for listening ports of + // harness nodes. Ports are monotonically increasing starting from this + // number and are determined by the results of nextAvailablePort(). defaultNodePort = 19555 - // defaultClientPort is the initial rpc port which will be used by the - // first created lightning node to listen on for incoming rpc - // connections. Subsequent allocated ports for future rpc harness - // instances will be monotonically increasing numbers calculated - // as such: defaultP2pPort + (4 * harness.nodeNum). - defaultClientPort = defaultNodePort + 1 - - // defaultRestPort is the initial rest port which will be used by the - // first created lightning node to listen on for incoming rest - // connections. Subsequent allocated ports for future rpc harness - // instances will be monotonically increasing numbers calculated - // as such: defaultP2pPort + (4 * harness.nodeNum). - defaultRestPort = defaultNodePort + 2 - - // defaultProfilePort is the initial port which will be used for - // profiling by the first created lightning node. Subsequent allocated - // ports for future rpc harness instances will be monotonically - // increasing numbers calculated as such: - // defaultProfilePort + (4 * harness.nodeNum). - defaultProfilePort = defaultNodePort + 3 - - // logPubKeyBytes is the number of bytes of the node's PubKey that - // will be appended to the log file name. The whole PubKey is too - // long and not really necessary to quickly identify what node - // produced which log file. + // logPubKeyBytes is the number of bytes of the node's PubKey that will + // be appended to the log file name. The whole PubKey is too long and + // not really necessary to quickly identify what node produced which + // log file. logPubKeyBytes = 4 // trickleDelay is the amount of time in milliseconds between each @@ -77,7 +55,12 @@ const ( var ( // numActiveNodes is the number of active nodes within the test network. - numActiveNodes = 0 + numActiveNodes = 0 + numActiveNodesMtx sync.Mutex + + // lastPort is the last port determined to be free for use by a new + // node. It should be used atomically. + lastPort uint32 = defaultNodePort // logOutput is a flag that can be set to append the output from the // seed nodes to log files. @@ -90,16 +73,42 @@ var ( "write goroutine dump from node n to file pprof-n.log") ) -// generateListeningPorts returns three ints representing ports to listen on -// designated for the current lightning network test. If there haven't been any -// test instances created, the default ports are used. Otherwise, in order to -// support multiple test nodes running at once, the p2p, rpc, rest and -// profiling ports are incremented after each initialization. +// nextAvailablePort returns the first port that is available for listening by +// a new node. It panics if no port is found and the maximum available TCP port +// is reached. +func nextAvailablePort() int { + port := atomic.AddUint32(&lastPort, 1) + for port < 65535 { + // If there are no errors while attempting to listen on this + // port, close the socket and return it as available. While it + // could be the case that some other process picks up this port + // between the time the socket is closed and it's reopened in + // the harness node, in practice in CI servers this seems much + // less likely than simply some other process already being + // bound at the start of the tests. + addr := fmt.Sprintf("127.0.0.1:%d", port) + l, err := net.Listen("tcp4", addr) + if err == nil { + err := l.Close() + if err == nil { + return int(port) + } + } + port = atomic.AddUint32(&lastPort, 1) + } + + // No ports available? Must be a mistake. + panic("no ports available for listening") +} + +// generateListeningPorts returns four ints representing ports to listen on +// designated for the current lightning network test. This returns the next +// available ports for the p2p, rpc, rest and profiling services. func generateListeningPorts() (int, int, int, int) { - p2p := defaultNodePort + (4 * numActiveNodes) - rpc := defaultClientPort + (4 * numActiveNodes) - rest := defaultRestPort + (4 * numActiveNodes) - profile := defaultProfilePort + (4 * numActiveNodes) + p2p := nextAvailablePort() + rpc := nextAvailablePort() + rest := nextAvailablePort() + profile := nextAvailablePort() return p2p, rpc, rest, profile } @@ -114,14 +123,14 @@ type BackendConfig interface { // ConnectMiner is called to establish a connection to the test miner. ConnectMiner() error - // DisconnectMiner is called to bitconneeeect the miner. + // DisconnectMiner is called to disconnect the miner. DisconnectMiner() error // Name returns the name of the backend type. Name() string } -type nodeConfig struct { +type NodeConfig struct { Name string BackendCfg BackendConfig NetParams *chaincfg.Params @@ -143,26 +152,28 @@ type nodeConfig struct { RPCPort int RESTPort int ProfilePort int + + AcceptKeySend bool } -func (cfg nodeConfig) P2PAddr() string { +func (cfg NodeConfig) P2PAddr() string { return net.JoinHostPort("127.0.0.1", strconv.Itoa(cfg.P2PPort)) } -func (cfg nodeConfig) RPCAddr() string { +func (cfg NodeConfig) RPCAddr() string { return net.JoinHostPort("127.0.0.1", strconv.Itoa(cfg.RPCPort)) } -func (cfg nodeConfig) RESTAddr() string { +func (cfg NodeConfig) RESTAddr() string { return net.JoinHostPort("127.0.0.1", strconv.Itoa(cfg.RESTPort)) } -func (cfg nodeConfig) DBPath() string { +func (cfg NodeConfig) DBPath() string { return filepath.Join(cfg.DataDir, "graph", fmt.Sprintf("%v/channel.db", cfg.NetParams.Name)) } -func (cfg nodeConfig) ChanBackupPath() string { +func (cfg NodeConfig) ChanBackupPath() string { return filepath.Join( cfg.DataDir, "chain", "bitcoin", fmt.Sprintf( @@ -174,7 +185,7 @@ func (cfg nodeConfig) ChanBackupPath() string { // genArgs generates a slice of command line arguments from the lightning node // config struct. -func (cfg nodeConfig) genArgs() []string { +func (cfg NodeConfig) genArgs() []string { var args []string switch cfg.NetParams { @@ -216,6 +227,10 @@ func (cfg nodeConfig) genArgs() []string { args = append(args, cfg.ExtraArgs...) } + if cfg.AcceptKeySend { + args = append(args, "--accept-keysend") + } + return args } @@ -223,7 +238,7 @@ func (cfg nodeConfig) genArgs() []string { // harness. Each HarnessNode instance also fully embeds an RPC client in // order to pragmatically drive the node. type HarnessNode struct { - cfg *nodeConfig + Cfg *NodeConfig // NodeID is a unique identifier for the node within a NetworkHarness. NodeID int @@ -263,6 +278,9 @@ type HarnessNode struct { invoicesrpc.InvoicesClient + // conn is the underlying connection to the grpc endpoint of the node. + conn *grpc.ClientConn + // RouterClient, WalletKitClient, WatchtowerClient cannot be embedded, // because a name collision would occur with LightningClient. RouterClient routerrpc.RouterClient @@ -277,7 +295,7 @@ var _ lnrpc.WalletUnlockerClient = (*HarnessNode)(nil) var _ invoicesrpc.InvoicesClient = (*HarnessNode)(nil) // newNode creates a new test lightning node instance from the passed config. -func newNode(cfg nodeConfig) (*HarnessNode, error) { +func newNode(cfg NodeConfig) (*HarnessNode, error) { if cfg.BaseDir == "" { var err error cfg.BaseDir, err = ioutil.TempDir("", "lndtest-node") @@ -289,17 +307,28 @@ func newNode(cfg nodeConfig) (*HarnessNode, error) { cfg.LogDir = filepath.Join(cfg.BaseDir, "log") cfg.TLSCertPath = filepath.Join(cfg.DataDir, "tls.cert") cfg.TLSKeyPath = filepath.Join(cfg.DataDir, "tls.key") - cfg.AdminMacPath = filepath.Join(cfg.DataDir, "admin.macaroon") - cfg.ReadMacPath = filepath.Join(cfg.DataDir, "readonly.macaroon") - cfg.InvoiceMacPath = filepath.Join(cfg.DataDir, "invoice.macaroon") + + networkDir := filepath.Join( + cfg.DataDir, "chain", "bitcoin", cfg.NetParams.Name, + ) + cfg.AdminMacPath = filepath.Join(networkDir, "admin.macaroon") + cfg.ReadMacPath = filepath.Join(networkDir, "readonly.macaroon") + cfg.InvoiceMacPath = filepath.Join(networkDir, "invoice.macaroon") cfg.P2PPort, cfg.RPCPort, cfg.RESTPort, cfg.ProfilePort = generateListeningPorts() + // Run all tests with accept keysend. The keysend code is very isolated + // and it is highly unlikely that it would affect regular itests when + // enabled. + cfg.AcceptKeySend = true + + numActiveNodesMtx.Lock() nodeNum := numActiveNodes numActiveNodes++ + numActiveNodesMtx.Unlock() return &HarnessNode{ - cfg: &cfg, + Cfg: &cfg, NodeID: nodeNum, chanWatchRequests: make(chan *chanWatchRequest), openChans: make(map[wire.OutPoint]int), @@ -312,28 +341,44 @@ func newNode(cfg nodeConfig) (*HarnessNode, error) { // DBPath returns the filepath to the channeldb database file for this node. func (hn *HarnessNode) DBPath() string { - return hn.cfg.DBPath() + return hn.Cfg.DBPath() } // Name returns the name of this node set during initialization. func (hn *HarnessNode) Name() string { - return hn.cfg.Name + return hn.Cfg.Name } // TLSCertStr returns the path where the TLS certificate is stored. func (hn *HarnessNode) TLSCertStr() string { - return hn.cfg.TLSCertPath + return hn.Cfg.TLSCertPath } // TLSKeyStr returns the path where the TLS key is stored. func (hn *HarnessNode) TLSKeyStr() string { - return hn.cfg.TLSKeyPath + return hn.Cfg.TLSKeyPath } // ChanBackupPath returns the fielpath to the on-disk channels.backup file for // this node. func (hn *HarnessNode) ChanBackupPath() string { - return hn.cfg.ChanBackupPath() + return hn.Cfg.ChanBackupPath() +} + +// AdminMacPath returns the filepath to the admin.macaroon file for this node. +func (hn *HarnessNode) AdminMacPath() string { + return hn.Cfg.AdminMacPath +} + +// ReadMacPath returns the filepath to the readonly.macaroon file for this node. +func (hn *HarnessNode) ReadMacPath() string { + return hn.Cfg.ReadMacPath +} + +// InvoiceMacPath returns the filepath to the invoice.macaroon file for this +// node. +func (hn *HarnessNode) InvoiceMacPath() string { + return hn.Cfg.InvoiceMacPath } // Start launches a new process running lnd. Additionally, the PID of the @@ -342,11 +387,11 @@ func (hn *HarnessNode) ChanBackupPath() string { // // This may not clean up properly if an error is returned, so the caller should // call shutdown() regardless of the return value. -func (hn *HarnessNode) start(lndError chan<- error) error { +func (hn *HarnessNode) start(lndBinary string, lndError chan<- error) error { hn.quit = make(chan struct{}) - args := hn.cfg.genArgs() - hn.cmd = exec.Command("../../lnd-itest", args...) + args := hn.Cfg.genArgs() + hn.cmd = exec.Command(lndBinary, args...) // Redirect stderr output to buffer var errb bytes.Buffer @@ -364,14 +409,14 @@ func (hn *HarnessNode) start(lndError chan<- error) error { // log files. if *logOutput { fileName := fmt.Sprintf("output-%d-%s-%s.log", hn.NodeID, - hn.cfg.Name, hex.EncodeToString(hn.PubKey[:logPubKeyBytes])) + hn.Cfg.Name, hex.EncodeToString(hn.PubKey[:logPubKeyBytes])) // If the node's PubKey is not yet initialized, create a temporary // file name. Later, after the PubKey has been initialized, the // file can be moved to its final name with the PubKey included. if bytes.Equal(hn.PubKey[:4], []byte{0, 0, 0, 0}) { fileName = fmt.Sprintf("output-%d-%s-tmp__.log", hn.NodeID, - hn.cfg.Name) + hn.Cfg.Name) // Once the node has done its work, the log file can be renamed. finalizeLogfile = func() { @@ -379,7 +424,7 @@ func (hn *HarnessNode) start(lndError chan<- error) error { hn.logFile.Close() newFileName := fmt.Sprintf("output-%d-%s-%s.log", - hn.NodeID, hn.cfg.Name, + hn.NodeID, hn.Cfg.Name, hex.EncodeToString(hn.PubKey[:logPubKeyBytes])) err := os.Rename(fileName, newFileName) if err != nil { @@ -442,7 +487,7 @@ func (hn *HarnessNode) start(lndError chan<- error) error { // Since Stop uses the LightningClient to stop the node, if we fail to get a // connected client, we have to kill the process. - useMacaroons := !hn.cfg.HasSeed + useMacaroons := !hn.Cfg.HasSeed conn, err := hn.ConnectRPC(useMacaroons) if err != nil { hn.cmd.Process.Kill() @@ -453,7 +498,7 @@ func (hn *HarnessNode) start(lndError chan<- error) error { // additional step to unlock the wallet. The connection returned will // only use the TLS certs, and can only perform operations necessary to // unlock the daemon. - if hn.cfg.HasSeed { + if hn.Cfg.HasSeed { hn.WalletUnlockerClient = lnrpc.NewWalletUnlockerClient(conn) return nil } @@ -525,6 +570,7 @@ func (hn *HarnessNode) Unlock(ctx context.Context, func (hn *HarnessNode) initLightningClient(conn *grpc.ClientConn) error { // Construct the LightningClient that will allow us to use the // HarnessNode directly for normal rpc operations. + hn.conn = conn hn.LightningClient = lnrpc.NewLightningClient(conn) hn.InvoicesClient = invoicesrpc.NewInvoicesClient(conn) hn.RouterClient = routerrpc.NewRouterClient(conn) @@ -618,7 +664,7 @@ func (hn *HarnessNode) AddToLog(line string) error { // writePidFile writes the process ID of the running lnd process to a .pid file. func (hn *HarnessNode) writePidFile() error { - filePath := filepath.Join(hn.cfg.BaseDir, fmt.Sprintf("%v.pid", hn.NodeID)) + filePath := filepath.Join(hn.Cfg.BaseDir, fmt.Sprintf("%v.pid", hn.NodeID)) pid, err := os.Create(filePath) if err != nil { @@ -635,71 +681,99 @@ func (hn *HarnessNode) writePidFile() error { return nil } -// ConnectRPC uses the TLS certificate and admin macaroon files written by the -// lnd node to create a gRPC client connection. -func (hn *HarnessNode) ConnectRPC(useMacs bool) (*grpc.ClientConn, error) { - // Wait until TLS certificate and admin macaroon are created before - // using them, up to 20 sec. - tlsTimeout := time.After(30 * time.Second) - for !fileExists(hn.cfg.TLSCertPath) { +// ReadMacaroon waits a given duration for the macaroon file to be created. If +// the file is readable within the timeout, its content is de-serialized as a +// macaroon and returned. +func (hn *HarnessNode) ReadMacaroon(macPath string, timeout time.Duration) ( + *macaroon.Macaroon, error) { + + // Wait until macaroon file is created before using it. + macTimeout := time.After(timeout) + for !fileExists(macPath) { select { - case <-tlsTimeout: - return nil, fmt.Errorf("timeout waiting for TLS cert " + - "file to be created after 30 seconds") + case <-macTimeout: + return nil, fmt.Errorf("timeout waiting for macaroon "+ + "file %s to be created after %d seconds", + macPath, timeout/time.Second) case <-time.After(100 * time.Millisecond): } } - opts := []grpc.DialOption{ - grpc.WithBlock(), - grpc.WithTimeout(time.Second * 20), - } - - tlsCreds, err := credentials.NewClientTLSFromFile(hn.cfg.TLSCertPath, "") + // Now that we know the file exists, read it and return the macaroon. + macBytes, err := ioutil.ReadFile(macPath) if err != nil { return nil, err } - - opts = append(opts, grpc.WithTransportCredentials(tlsCreds)) - - if !useMacs { - return grpc.Dial(hn.cfg.RPCAddr(), opts...) + mac := &macaroon.Macaroon{} + if err = mac.UnmarshalBinary(macBytes); err != nil { + return nil, err } + return mac, nil +} + +// ConnectRPCWithMacaroon uses the TLS certificate and given macaroon to +// create a gRPC client connection. +func (hn *HarnessNode) ConnectRPCWithMacaroon(mac *macaroon.Macaroon) ( + *grpc.ClientConn, error) { - macTimeout := time.After(30 * time.Second) - for !fileExists(hn.cfg.AdminMacPath) { + // Wait until TLS certificate is created before using it, up to 30 sec. + tlsTimeout := time.After(DefaultTimeout) + for !fileExists(hn.Cfg.TLSCertPath) { select { - case <-macTimeout: - return nil, fmt.Errorf("timeout waiting for admin " + - "macaroon file to be created after 30 seconds") + case <-tlsTimeout: + return nil, fmt.Errorf("timeout waiting for TLS cert " + + "file to be created") case <-time.After(100 * time.Millisecond): } } - macBytes, err := ioutil.ReadFile(hn.cfg.AdminMacPath) + opts := []grpc.DialOption{grpc.WithBlock()} + tlsCreds, err := credentials.NewClientTLSFromFile( + hn.Cfg.TLSCertPath, "", + ) if err != nil { return nil, err } - mac := &macaroon.Macaroon{} - if err = mac.UnmarshalBinary(macBytes); err != nil { - return nil, err - } + opts = append(opts, grpc.WithTransportCredentials(tlsCreds)) + if mac == nil { + return grpc.Dial(hn.Cfg.RPCAddr(), opts...) + } macCred := macaroons.NewMacaroonCredential(mac) opts = append(opts, grpc.WithPerRPCCredentials(macCred)) - return grpc.Dial(hn.cfg.RPCAddr(), opts...) + ctx, cancel := context.WithTimeout(context.Background(), DefaultTimeout) + defer cancel() + return grpc.DialContext(ctx, hn.Cfg.RPCAddr(), opts...) +} + +// ConnectRPC uses the TLS certificate and admin macaroon files written by the +// lnd node to create a gRPC client connection. +func (hn *HarnessNode) ConnectRPC(useMacs bool) (*grpc.ClientConn, error) { + // If we don't want to use macaroons, just pass nil, the next method + // will handle it correctly. + if !useMacs { + return hn.ConnectRPCWithMacaroon(nil) + } + + // If we should use a macaroon, always take the admin macaroon as a + // default. + mac, err := hn.ReadMacaroon(hn.Cfg.AdminMacPath, DefaultTimeout) + if err != nil { + return nil, err + } + return hn.ConnectRPCWithMacaroon(mac) } // SetExtraArgs assigns the ExtraArgs field for the node's configuration. The // changes will take effect on restart. func (hn *HarnessNode) SetExtraArgs(extraArgs []string) { - hn.cfg.ExtraArgs = extraArgs + hn.Cfg.ExtraArgs = extraArgs } // cleanup cleans up all the temporary files created by the node's process. func (hn *HarnessNode) cleanup() error { - return os.RemoveAll(hn.cfg.BaseDir) + return os.RemoveAll(hn.Cfg.BaseDir) } // Stop attempts to stop the active lnd process. @@ -735,6 +809,15 @@ func (hn *HarnessNode) stop() error { hn.WalletUnlockerClient = nil hn.Watchtower = nil hn.WatchtowerClient = nil + + // Close any attempts at further grpc connections. + if hn.conn != nil { + err := hn.conn.Close() + if err != nil { + return fmt.Errorf("error attempting to stop grpc client: %v", err) + } + } + return nil } diff --git a/lntest/timeouts.go b/lntest/timeouts.go index 0521384605..d099d06b1d 100644 --- a/lntest/timeouts.go +++ b/lntest/timeouts.go @@ -23,5 +23,5 @@ const ( // AsyncBenchmarkTimeout is the timeout used when running the async // payments benchmark. - AsyncBenchmarkTimeout = time.Minute + AsyncBenchmarkTimeout = 2 * time.Minute ) diff --git a/lntest/wait/wait.go b/lntest/wait/wait.go index 1ff16914f2..88cdb27d5f 100644 --- a/lntest/wait/wait.go +++ b/lntest/wait/wait.go @@ -76,3 +76,23 @@ func Invariant(statement func() bool, timeout time.Duration) error { } } } + +// InvariantNoError is a wrapper around Invariant that waits out the duration +// specified by timeout. It fails if the predicate ever returns an error during +// that time. +func InvariantNoError(f func() error, timeout time.Duration) error { + var predErr error + pred := func() bool { + if err := f(); err != nil { + predErr = err + return false + } + return true + } + + if err := Invariant(pred, timeout); err != nil { + return predErr + } + + return nil +} diff --git a/lnwallet/btcwallet/btcwallet.go b/lnwallet/btcwallet/btcwallet.go index 84945c86aa..3e51605a50 100644 --- a/lnwallet/btcwallet/btcwallet.go +++ b/lnwallet/btcwallet/btcwallet.go @@ -21,6 +21,7 @@ import ( "github.com/btcsuite/btcwallet/walletdb" "github.com/lightningnetwork/lnd/keychain" "github.com/lightningnetwork/lnd/lnwallet" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" ) const ( @@ -289,7 +290,7 @@ func (b *BtcWallet) IsOurAddress(a btcutil.Address) bool { // // This is a part of the WalletController interface. func (b *BtcWallet) SendOutputs(outputs []*wire.TxOut, - feeRate lnwallet.SatPerKWeight) (*wire.MsgTx, error) { + feeRate chainfee.SatPerKWeight) (*wire.MsgTx, error) { // Convert our fee rate from sat/kw to sat/kb since it's required by // SendOutputs. @@ -314,7 +315,7 @@ func (b *BtcWallet) SendOutputs(outputs []*wire.TxOut, // // This is a part of the WalletController interface. func (b *BtcWallet) CreateSimpleTx(outputs []*wire.TxOut, - feeRate lnwallet.SatPerKWeight, dryRun bool) (*txauthor.AuthoredTx, error) { + feeRate chainfee.SatPerKWeight, dryRun bool) (*txauthor.AuthoredTx, error) { // The fee rate is passed in using units of sat/kw, so we'll convert // this to sat/KB as the CreateSimpleTx method requires this unit. diff --git a/lnwallet/btcwallet/btcwallet_rpctest.go b/lnwallet/btcwallet/btcwallet_rpctest.go new file mode 100644 index 0000000000..7139e4cc17 --- /dev/null +++ b/lnwallet/btcwallet/btcwallet_rpctest.go @@ -0,0 +1,23 @@ +// +build rpctest + +package btcwallet + +import ( + "github.com/btcsuite/btcwallet/snacl" + "github.com/btcsuite/btcwallet/waddrmgr" +) + +func init() { + // Instruct waddrmgr to use the cranked down scrypt parameters when + // creating new wallet encryption keys. This will speed up the itests + // considerably. + fastScrypt := waddrmgr.FastScryptOptions + keyGen := func(passphrase *[]byte, config *waddrmgr.ScryptOptions) ( + *snacl.SecretKey, error) { + + return snacl.NewSecretKey( + passphrase, fastScrypt.N, fastScrypt.R, fastScrypt.P, + ) + } + waddrmgr.SetSecretKeyGen(keyGen) +} diff --git a/lnwallet/btcwallet/signer.go b/lnwallet/btcwallet/signer.go index 35e158c028..a79a5fd927 100644 --- a/lnwallet/btcwallet/signer.go +++ b/lnwallet/btcwallet/signer.go @@ -225,7 +225,7 @@ func maybeTweakPrivKey(signDesc *input.SignDescriptor, // // This is a part of the WalletController interface. func (b *BtcWallet) SignOutputRaw(tx *wire.MsgTx, - signDesc *input.SignDescriptor) ([]byte, error) { + signDesc *input.SignDescriptor) (input.Signature, error) { witnessScript := signDesc.WitnessScript @@ -256,7 +256,7 @@ func (b *BtcWallet) SignOutputRaw(tx *wire.MsgTx, } // Chop off the sighash flag at the end of the signature. - return sig[:len(sig)-1], nil + return btcec.ParseDERSignature(sig[:len(sig)-1], btcec.S256()) } // ComputeInputScript generates a complete InputScript for the passed @@ -358,7 +358,7 @@ var _ input.Signer = (*BtcWallet)(nil) // // NOTE: This is a part of the MessageSigner interface. func (b *BtcWallet) SignMessage(pubKey *btcec.PublicKey, - msg []byte) (*btcec.Signature, error) { + msg []byte) (input.Signature, error) { // First attempt to fetch the private key which corresponds to the // specified public key. diff --git a/lnwallet/fee_estimator.go b/lnwallet/chainfee/estimator.go similarity index 67% rename from lnwallet/fee_estimator.go rename to lnwallet/chainfee/estimator.go index bda93d63b1..98c74f6130 100644 --- a/lnwallet/fee_estimator.go +++ b/lnwallet/chainfee/estimator.go @@ -1,4 +1,4 @@ -package lnwallet +package chainfee import ( "encoding/json" @@ -10,85 +10,41 @@ import ( "sync" "time" - "github.com/btcsuite/btcd/blockchain" "github.com/btcsuite/btcd/rpcclient" "github.com/btcsuite/btcutil" ) const ( - // FeePerKwFloor is the lowest fee rate in sat/kw that we should use for - // determining transaction fees. - FeePerKwFloor SatPerKWeight = 253 - // maxBlockTarget is the highest number of blocks confirmations that - // a WebAPIFeeEstimator will cache fees for. This number is chosen + // a WebAPIEstimator will cache fees for. This number is chosen // because it's the highest number of confs bitcoind will return a fee // estimate for. maxBlockTarget uint32 = 1009 // minBlockTarget is the lowest number of blocks confirmations that - // a WebAPIFeeEstimator will cache fees for. Requesting an estimate for + // a WebAPIEstimator will cache fees for. Requesting an estimate for // less than this will result in an error. minBlockTarget uint32 = 2 // minFeeUpdateTimeout represents the minimum interval in which a - // WebAPIFeeEstimator will request fresh fees from its API. + // WebAPIEstimator will request fresh fees from its API. minFeeUpdateTimeout = 5 * time.Minute // maxFeeUpdateTimeout represents the maximum interval in which a - // WebAPIFeeEstimator will request fresh fees from its API. + // WebAPIEstimator will request fresh fees from its API. maxFeeUpdateTimeout = 20 * time.Minute ) -// SatPerKVByte represents a fee rate in sat/kb. -type SatPerKVByte btcutil.Amount - -// FeeForVSize calculates the fee resulting from this fee rate and the given -// vsize in vbytes. -func (s SatPerKVByte) FeeForVSize(vbytes int64) btcutil.Amount { - return btcutil.Amount(s) * btcutil.Amount(vbytes) / 1000 -} - -// FeePerKWeight converts the current fee rate from sat/kb to sat/kw. -func (s SatPerKVByte) FeePerKWeight() SatPerKWeight { - return SatPerKWeight(s / blockchain.WitnessScaleFactor) -} - -// String returns a human-readable string of the fee rate. -func (s SatPerKVByte) String() string { - return fmt.Sprintf("%v sat/kb", int64(s)) -} - -// SatPerKWeight represents a fee rate in sat/kw. -type SatPerKWeight btcutil.Amount - -// FeeForWeight calculates the fee resulting from this fee rate and the given -// weight in weight units (wu). -func (s SatPerKWeight) FeeForWeight(wu int64) btcutil.Amount { - // The resulting fee is rounded down, as specified in BOLT#03. - return btcutil.Amount(s) * btcutil.Amount(wu) / 1000 -} - -// FeePerKVByte converts the current fee rate from sat/kw to sat/kb. -func (s SatPerKWeight) FeePerKVByte() SatPerKVByte { - return SatPerKVByte(s * blockchain.WitnessScaleFactor) -} - -// String returns a human-readable string of the fee rate. -func (s SatPerKWeight) String() string { - return fmt.Sprintf("%v sat/kw", int64(s)) -} - -// FeeEstimator provides the ability to estimate on-chain transaction fees for +// Estimator provides the ability to estimate on-chain transaction fees for // various combinations of transaction sizes and desired confirmation time // (measured by number of blocks). -type FeeEstimator interface { +type Estimator interface { // EstimateFeePerKW takes in a target for the number of blocks until an // initial confirmation and returns the estimated fee expressed in // sat/kw. EstimateFeePerKW(numBlocks uint32) (SatPerKWeight, error) - // Start signals the FeeEstimator to start any processes or goroutines + // Start signals the Estimator to start any processes or goroutines // it needs to perform its duty. Start() error @@ -102,11 +58,11 @@ type FeeEstimator interface { RelayFeePerKW() SatPerKWeight } -// StaticFeeEstimator will return a static value for all fee calculation -// requests. It is designed to be replaced by a proper fee calculation -// implementation. The fees are not accessible directly, because changing them -// would not be thread safe. -type StaticFeeEstimator struct { +// StaticEstimator will return a static value for all fee calculation requests. +// It is designed to be replaced by a proper fee calculation implementation. +// The fees are not accessible directly, because changing them would not be +// thread safe. +type StaticEstimator struct { // feePerKW is the static fee rate in satoshis-per-vbyte that will be // returned by this fee estimator. feePerKW SatPerKWeight @@ -116,11 +72,10 @@ type StaticFeeEstimator struct { relayFee SatPerKWeight } -// NewStaticFeeEstimator returns a new static fee estimator instance. -func NewStaticFeeEstimator(feePerKW, - relayFee SatPerKWeight) *StaticFeeEstimator { +// NewStaticEstimator returns a new static fee estimator instance. +func NewStaticEstimator(feePerKW, relayFee SatPerKWeight) *StaticEstimator { - return &StaticFeeEstimator{ + return &StaticEstimator{ feePerKW: feePerKW, relayFee: relayFee, } @@ -128,43 +83,43 @@ func NewStaticFeeEstimator(feePerKW, // EstimateFeePerKW will return a static value for fee calculations. // -// NOTE: This method is part of the FeeEstimator interface. -func (e StaticFeeEstimator) EstimateFeePerKW(numBlocks uint32) (SatPerKWeight, error) { +// NOTE: This method is part of the Estimator interface. +func (e StaticEstimator) EstimateFeePerKW(numBlocks uint32) (SatPerKWeight, error) { return e.feePerKW, nil } // RelayFeePerKW returns the minimum fee rate required for transactions to be // relayed. // -// NOTE: This method is part of the FeeEstimator interface. -func (e StaticFeeEstimator) RelayFeePerKW() SatPerKWeight { +// NOTE: This method is part of the Estimator interface. +func (e StaticEstimator) RelayFeePerKW() SatPerKWeight { return e.relayFee } -// Start signals the FeeEstimator to start any processes or goroutines +// Start signals the Estimator to start any processes or goroutines // it needs to perform its duty. // -// NOTE: This method is part of the FeeEstimator interface. -func (e StaticFeeEstimator) Start() error { +// NOTE: This method is part of the Estimator interface. +func (e StaticEstimator) Start() error { return nil } // Stop stops any spawned goroutines and cleans up the resources used // by the fee estimator. // -// NOTE: This method is part of the FeeEstimator interface. -func (e StaticFeeEstimator) Stop() error { +// NOTE: This method is part of the Estimator interface. +func (e StaticEstimator) Stop() error { return nil } // A compile-time assertion to ensure that StaticFeeEstimator implements the -// FeeEstimator interface. -var _ FeeEstimator = (*StaticFeeEstimator)(nil) +// Estimator interface. +var _ Estimator = (*StaticEstimator)(nil) -// BtcdFeeEstimator is an implementation of the FeeEstimator interface backed +// BtcdEstimator is an implementation of the Estimator interface backed // by the RPC interface of an active btcd node. This implementation will proxy // any fee estimation requests to btcd's RPC interface. -type BtcdFeeEstimator struct { +type BtcdEstimator struct { // fallbackFeePerKW is the fall back fee rate in sat/kw that is returned // if the fee estimator does not yet have enough data to actually // produce fee estimates. @@ -179,13 +134,13 @@ type BtcdFeeEstimator struct { btcdConn *rpcclient.Client } -// NewBtcdFeeEstimator creates a new BtcdFeeEstimator given a fully populated +// NewBtcdEstimator creates a new BtcdEstimator given a fully populated // rpc config that is able to successfully connect and authenticate with the // btcd node, and also a fall back fee rate. The fallback fee rate is used in // the occasion that the estimator has insufficient data, or returns zero for a // fee estimate. -func NewBtcdFeeEstimator(rpcConfig rpcclient.ConnConfig, - fallBackFeeRate SatPerKWeight) (*BtcdFeeEstimator, error) { +func NewBtcdEstimator(rpcConfig rpcclient.ConnConfig, + fallBackFeeRate SatPerKWeight) (*BtcdEstimator, error) { rpcConfig.DisableConnectOnNew = true rpcConfig.DisableAutoReconnect = false @@ -194,17 +149,17 @@ func NewBtcdFeeEstimator(rpcConfig rpcclient.ConnConfig, return nil, err } - return &BtcdFeeEstimator{ + return &BtcdEstimator{ fallbackFeePerKW: fallBackFeeRate, btcdConn: chainConn, }, nil } -// Start signals the FeeEstimator to start any processes or goroutines +// Start signals the Estimator to start any processes or goroutines // it needs to perform its duty. // -// NOTE: This method is part of the FeeEstimator interface. -func (b *BtcdFeeEstimator) Start() error { +// NOTE: This method is part of the Estimator interface. +func (b *BtcdEstimator) Start() error { if err := b.btcdConn.Connect(20); err != nil { return err } @@ -233,7 +188,7 @@ func (b *BtcdFeeEstimator) Start() error { b.minFeePerKW = FeePerKwFloor } - walletLog.Debugf("Using minimum fee rate of %v sat/kw", + log.Debugf("Using minimum fee rate of %v sat/kw", int64(b.minFeePerKW)) return nil @@ -242,8 +197,8 @@ func (b *BtcdFeeEstimator) Start() error { // Stop stops any spawned goroutines and cleans up the resources used // by the fee estimator. // -// NOTE: This method is part of the FeeEstimator interface. -func (b *BtcdFeeEstimator) Stop() error { +// NOTE: This method is part of the Estimator interface. +func (b *BtcdEstimator) Stop() error { b.btcdConn.Shutdown() return nil @@ -252,15 +207,15 @@ func (b *BtcdFeeEstimator) Stop() error { // EstimateFeePerKW takes in a target for the number of blocks until an initial // confirmation and returns the estimated fee expressed in sat/kw. // -// NOTE: This method is part of the FeeEstimator interface. -func (b *BtcdFeeEstimator) EstimateFeePerKW(numBlocks uint32) (SatPerKWeight, error) { +// NOTE: This method is part of the Estimator interface. +func (b *BtcdEstimator) EstimateFeePerKW(numBlocks uint32) (SatPerKWeight, error) { feeEstimate, err := b.fetchEstimate(numBlocks) switch { // If the estimator doesn't have enough data, or returns an error, then // to return a proper value, then we'll return the default fall back // fee rate. case err != nil: - walletLog.Errorf("unable to query estimator: %v", err) + log.Errorf("unable to query estimator: %v", err) fallthrough case feeEstimate == 0: @@ -273,14 +228,14 @@ func (b *BtcdFeeEstimator) EstimateFeePerKW(numBlocks uint32) (SatPerKWeight, er // RelayFeePerKW returns the minimum fee rate required for transactions to be // relayed. // -// NOTE: This method is part of the FeeEstimator interface. -func (b *BtcdFeeEstimator) RelayFeePerKW() SatPerKWeight { +// NOTE: This method is part of the Estimator interface. +func (b *BtcdEstimator) RelayFeePerKW() SatPerKWeight { return b.minFeePerKW } // fetchEstimate returns a fee estimate for a transaction to be confirmed in // confTarget blocks. The estimate is returned in sat/kw. -func (b *BtcdFeeEstimator) fetchEstimate(confTarget uint32) (SatPerKWeight, error) { +func (b *BtcdEstimator) fetchEstimate(confTarget uint32) (SatPerKWeight, error) { // First, we'll fetch the estimate for our confirmation target. btcPerKB, err := b.btcdConn.EstimateFee(int64(confTarget)) if err != nil { @@ -300,26 +255,26 @@ func (b *BtcdFeeEstimator) fetchEstimate(confTarget uint32) (SatPerKWeight, erro // Finally, we'll enforce our fee floor. if satPerKw < b.minFeePerKW { - walletLog.Debugf("Estimated fee rate of %v sat/kw is too low, "+ + log.Debugf("Estimated fee rate of %v sat/kw is too low, "+ "using fee floor of %v sat/kw instead", satPerKw, b.minFeePerKW) satPerKw = b.minFeePerKW } - walletLog.Debugf("Returning %v sat/kw for conf target of %v", + log.Debugf("Returning %v sat/kw for conf target of %v", int64(satPerKw), confTarget) return satPerKw, nil } -// A compile-time assertion to ensure that BtcdFeeEstimator implements the -// FeeEstimator interface. -var _ FeeEstimator = (*BtcdFeeEstimator)(nil) +// A compile-time assertion to ensure that BtcdEstimator implements the +// Estimator interface. +var _ Estimator = (*BtcdEstimator)(nil) -// BitcoindFeeEstimator is an implementation of the FeeEstimator interface -// backed by the RPC interface of an active bitcoind node. This implementation -// will proxy any fee estimation requests to bitcoind's RPC interface. -type BitcoindFeeEstimator struct { +// BitcoindEstimator is an implementation of the Estimator interface backed by +// the RPC interface of an active bitcoind node. This implementation will proxy +// any fee estimation requests to bitcoind's RPC interface. +type BitcoindEstimator struct { // fallbackFeePerKW is the fallback fee rate in sat/kw that is returned // if the fee estimator does not yet have enough data to actually // produce fee estimates. @@ -331,16 +286,21 @@ type BitcoindFeeEstimator struct { // through the network. minFeePerKW SatPerKWeight + // feeMode is the estimate_mode to use when calling "estimatesmartfee". + // It can be either "ECONOMICAL" or "CONSERVATIVE", and it's default + // to "CONSERVATIVE". + feeMode string + bitcoindConn *rpcclient.Client } -// NewBitcoindFeeEstimator creates a new BitcoindFeeEstimator given a fully -// populated rpc config that is able to successfully connect and authenticate -// with the bitcoind node, and also a fall back fee rate. The fallback fee rate -// is used in the occasion that the estimator has insufficient data, or returns -// zero for a fee estimate. -func NewBitcoindFeeEstimator(rpcConfig rpcclient.ConnConfig, - fallBackFeeRate SatPerKWeight) (*BitcoindFeeEstimator, error) { +// NewBitcoindEstimator creates a new BitcoindEstimator given a fully populated +// rpc config that is able to successfully connect and authenticate with the +// bitcoind node, and also a fall back fee rate. The fallback fee rate is used +// in the occasion that the estimator has insufficient data, or returns zero +// for a fee estimate. +func NewBitcoindEstimator(rpcConfig rpcclient.ConnConfig, feeMode string, + fallBackFeeRate SatPerKWeight) (*BitcoindEstimator, error) { rpcConfig.DisableConnectOnNew = true rpcConfig.DisableAutoReconnect = false @@ -351,17 +311,18 @@ func NewBitcoindFeeEstimator(rpcConfig rpcclient.ConnConfig, return nil, err } - return &BitcoindFeeEstimator{ + return &BitcoindEstimator{ fallbackFeePerKW: fallBackFeeRate, bitcoindConn: chainConn, + feeMode: feeMode, }, nil } -// Start signals the FeeEstimator to start any processes or goroutines +// Start signals the Estimator to start any processes or goroutines // it needs to perform its duty. // -// NOTE: This method is part of the FeeEstimator interface. -func (b *BitcoindFeeEstimator) Start() error { +// NOTE: This method is part of the Estimator interface. +func (b *BitcoindEstimator) Start() error { // Once the connection to the backend node has been established, we'll // query it for its minimum relay fee. Since the `getinfo` RPC has been // deprecated for `bitcoind`, we'll need to send a `getnetworkinfo` @@ -396,7 +357,7 @@ func (b *BitcoindFeeEstimator) Start() error { b.minFeePerKW = FeePerKwFloor } - walletLog.Debugf("Using minimum fee rate of %v sat/kw", + log.Debugf("Using minimum fee rate of %v sat/kw", int64(b.minFeePerKW)) return nil @@ -405,23 +366,23 @@ func (b *BitcoindFeeEstimator) Start() error { // Stop stops any spawned goroutines and cleans up the resources used // by the fee estimator. // -// NOTE: This method is part of the FeeEstimator interface. -func (b *BitcoindFeeEstimator) Stop() error { +// NOTE: This method is part of the Estimator interface. +func (b *BitcoindEstimator) Stop() error { return nil } // EstimateFeePerKW takes in a target for the number of blocks until an initial // confirmation and returns the estimated fee expressed in sat/kw. // -// NOTE: This method is part of the FeeEstimator interface. -func (b *BitcoindFeeEstimator) EstimateFeePerKW(numBlocks uint32) (SatPerKWeight, error) { +// NOTE: This method is part of the Estimator interface. +func (b *BitcoindEstimator) EstimateFeePerKW(numBlocks uint32) (SatPerKWeight, error) { feeEstimate, err := b.fetchEstimate(numBlocks) switch { // If the estimator doesn't have enough data, or returns an error, then // to return a proper value, then we'll return the default fall back // fee rate. case err != nil: - walletLog.Errorf("unable to query estimator: %v", err) + log.Errorf("unable to query estimator: %v", err) fallthrough case feeEstimate == 0: @@ -434,23 +395,29 @@ func (b *BitcoindFeeEstimator) EstimateFeePerKW(numBlocks uint32) (SatPerKWeight // RelayFeePerKW returns the minimum fee rate required for transactions to be // relayed. // -// NOTE: This method is part of the FeeEstimator interface. -func (b *BitcoindFeeEstimator) RelayFeePerKW() SatPerKWeight { +// NOTE: This method is part of the Estimator interface. +func (b *BitcoindEstimator) RelayFeePerKW() SatPerKWeight { return b.minFeePerKW } // fetchEstimate returns a fee estimate for a transaction to be confirmed in // confTarget blocks. The estimate is returned in sat/kw. -func (b *BitcoindFeeEstimator) fetchEstimate(confTarget uint32) (SatPerKWeight, error) { +func (b *BitcoindEstimator) fetchEstimate(confTarget uint32) (SatPerKWeight, error) { // First, we'll send an "estimatesmartfee" command as a raw request, // since it isn't supported by btcd but is available in bitcoind. target, err := json.Marshal(uint64(confTarget)) if err != nil { return 0, err } - // TODO: Allow selection of economical/conservative modifiers. + + // The mode must be either ECONOMICAL or CONSERVATIVE. + mode, err := json.Marshal(b.feeMode) + if err != nil { + return 0, err + } + resp, err := b.bitcoindConn.RawRequest( - "estimatesmartfee", []json.RawMessage{target}, + "estimatesmartfee", []json.RawMessage{target, mode}, ) if err != nil { return 0, err @@ -478,26 +445,26 @@ func (b *BitcoindFeeEstimator) fetchEstimate(confTarget uint32) (SatPerKWeight, // Finally, we'll enforce our fee floor. if satPerKw < b.minFeePerKW { - walletLog.Debugf("Estimated fee rate of %v sat/kw is too low, "+ + log.Debugf("Estimated fee rate of %v sat/kw is too low, "+ "using fee floor of %v sat/kw instead", satPerKw, b.minFeePerKW) satPerKw = b.minFeePerKW } - walletLog.Debugf("Returning %v sat/kw for conf target of %v", + log.Debugf("Returning %v sat/kw for conf target of %v", int64(satPerKw), confTarget) return satPerKw, nil } -// A compile-time assertion to ensure that BitcoindFeeEstimator implements the -// FeeEstimator interface. -var _ FeeEstimator = (*BitcoindFeeEstimator)(nil) +// A compile-time assertion to ensure that BitcoindEstimator implements the +// Estimator interface. +var _ Estimator = (*BitcoindEstimator)(nil) -// WebAPIFeeSource is an interface allows the WebAPIFeeEstimator to query an +// WebAPIFeeSource is an interface allows the WebAPIEstimator to query an // arbitrary HTTP-based fee estimator. Each new set/network will gain an -// implementation of this interface in order to allow the WebAPIFeeEstimator to +// implementation of this interface in order to allow the WebAPIEstimator to // be fully generic in its logic. type WebAPIFeeSource interface { // GenQueryURL generates the full query URL. The value returned by this @@ -554,9 +521,9 @@ func (s SparseConfFeeSource) ParseResponse(r io.Reader) (map[uint32]uint32, erro // WebAPIFeeSource interface. var _ WebAPIFeeSource = (*SparseConfFeeSource)(nil) -// WebAPIFeeEstimator is an implementation of the FeeEstimator interface that +// WebAPIEstimator is an implementation of the Estimator interface that // queries an HTTP-based fee estimation from an existing web API. -type WebAPIFeeEstimator struct { +type WebAPIEstimator struct { started sync.Once stopped sync.Once @@ -581,12 +548,12 @@ type WebAPIFeeEstimator struct { wg sync.WaitGroup } -// NewWebAPIFeeEstimator creates a new WebAPIFeeEstimator from a given URL and a +// NewWebAPIEstimator creates a new WebAPIEstimator from a given URL and a // fallback default fee. The fees are updated whenever a new block is mined. -func NewWebAPIFeeEstimator( - api WebAPIFeeSource, defaultFee SatPerKWeight) *WebAPIFeeEstimator { +func NewWebAPIEstimator( + api WebAPIFeeSource, defaultFee SatPerKWeight) *WebAPIEstimator { - return &WebAPIFeeEstimator{ + return &WebAPIEstimator{ apiSource: api, feeByBlockTarget: make(map[uint32]uint32), defaultFeePerKw: defaultFee, @@ -597,8 +564,8 @@ func NewWebAPIFeeEstimator( // EstimateFeePerKW takes in a target for the number of blocks until an initial // confirmation and returns the estimated fee expressed in sat/kw. // -// NOTE: This method is part of the FeeEstimator interface. -func (w *WebAPIFeeEstimator) EstimateFeePerKW(numBlocks uint32) (SatPerKWeight, error) { +// NOTE: This method is part of the Estimator interface. +func (w *WebAPIEstimator) EstimateFeePerKW(numBlocks uint32) (SatPerKWeight, error) { if numBlocks > maxBlockTarget { numBlocks = maxBlockTarget } else if numBlocks < minBlockTarget { @@ -618,20 +585,20 @@ func (w *WebAPIFeeEstimator) EstimateFeePerKW(numBlocks uint32) (SatPerKWeight, satPerKw = FeePerKwFloor } - walletLog.Debugf("Web API returning %v sat/kw for conf target of %v", + log.Debugf("Web API returning %v sat/kw for conf target of %v", int64(satPerKw), numBlocks) return satPerKw, nil } -// Start signals the FeeEstimator to start any processes or goroutines it needs +// Start signals the Estimator to start any processes or goroutines it needs // to perform its duty. // -// NOTE: This method is part of the FeeEstimator interface. -func (w *WebAPIFeeEstimator) Start() error { +// NOTE: This method is part of the Estimator interface. +func (w *WebAPIEstimator) Start() error { var err error w.started.Do(func() { - walletLog.Infof("Starting web API fee estimator") + log.Infof("Starting web API fee estimator") w.updateFeeTicker = time.NewTicker(w.randomFeeUpdateTimeout()) w.updateFeeEstimates() @@ -646,10 +613,10 @@ func (w *WebAPIFeeEstimator) Start() error { // Stop stops any spawned goroutines and cleans up the resources used by the // fee estimator. // -// NOTE: This method is part of the FeeEstimator interface. -func (w *WebAPIFeeEstimator) Stop() error { +// NOTE: This method is part of the Estimator interface. +func (w *WebAPIEstimator) Stop() error { w.stopped.Do(func() { - walletLog.Infof("Stopping web API fee estimator") + log.Infof("Stopping web API fee estimator") w.updateFeeTicker.Stop() @@ -662,15 +629,15 @@ func (w *WebAPIFeeEstimator) Stop() error { // RelayFeePerKW returns the minimum fee rate required for transactions to be // relayed. // -// NOTE: This method is part of the FeeEstimator interface. -func (w *WebAPIFeeEstimator) RelayFeePerKW() SatPerKWeight { +// NOTE: This method is part of the Estimator interface. +func (w *WebAPIEstimator) RelayFeePerKW() SatPerKWeight { return FeePerKwFloor } // randomFeeUpdateTimeout returns a random timeout between minFeeUpdateTimeout // and maxFeeUpdateTimeout that will be used to determine how often the Estimator // should retrieve fresh fees from its API. -func (w *WebAPIFeeEstimator) randomFeeUpdateTimeout() time.Duration { +func (w *WebAPIEstimator) randomFeeUpdateTimeout() time.Duration { lower := int64(minFeeUpdateTimeout) upper := int64(maxFeeUpdateTimeout) return time.Duration(prand.Int63n(upper-lower) + lower) @@ -679,7 +646,7 @@ func (w *WebAPIFeeEstimator) randomFeeUpdateTimeout() time.Duration { // getCachedFee takes in a target for the number of blocks until an initial // confirmation and returns an estimated fee (if one was returned by the API). If // the fee was not previously cached, we cache it here. -func (w *WebAPIFeeEstimator) getCachedFee(numBlocks uint32) (uint32, error) { +func (w *WebAPIEstimator) getCachedFee(numBlocks uint32) (uint32, error) { w.feesMtx.Lock() defer w.feesMtx.Unlock() @@ -704,7 +671,7 @@ func (w *WebAPIFeeEstimator) getCachedFee(numBlocks uint32) (uint32, error) { } // updateFeeEstimates re-queries the API for fresh fees and caches them. -func (w *WebAPIFeeEstimator) updateFeeEstimates() { +func (w *WebAPIEstimator) updateFeeEstimates() { // Rather than use the default http.Client, we'll make a custom one // which will allow us to control how long we'll wait to read the // response from the service. This way, if the service is down or @@ -725,7 +692,7 @@ func (w *WebAPIFeeEstimator) updateFeeEstimates() { targetURL := w.apiSource.GenQueryURL() resp, err := netClient.Get(targetURL) if err != nil { - walletLog.Errorf("unable to query web api for fee response: %v", + log.Errorf("unable to query web api for fee response: %v", err) return } @@ -735,7 +702,7 @@ func (w *WebAPIFeeEstimator) updateFeeEstimates() { // to parse out the body to obtain our final result. feesByBlockTarget, err := w.apiSource.ParseResponse(resp.Body) if err != nil { - walletLog.Errorf("unable to query web api for fee response: %v", + log.Errorf("unable to query web api for fee response: %v", err) return } @@ -746,7 +713,7 @@ func (w *WebAPIFeeEstimator) updateFeeEstimates() { } // feeUpdateManager updates the fee estimates whenever a new block comes in. -func (w *WebAPIFeeEstimator) feeUpdateManager() { +func (w *WebAPIEstimator) feeUpdateManager() { defer w.wg.Done() for { @@ -759,6 +726,6 @@ func (w *WebAPIFeeEstimator) feeUpdateManager() { } } -// A compile-time assertion to ensure that WebAPIFeeEstimator implements the -// FeeEstimator interface. -var _ FeeEstimator = (*WebAPIFeeEstimator)(nil) +// A compile-time assertion to ensure that WebAPIEstimator implements the +// Estimator interface. +var _ Estimator = (*WebAPIEstimator)(nil) diff --git a/lnwallet/fee_estimator_test.go b/lnwallet/chainfee/estimator_test.go similarity index 89% rename from lnwallet/fee_estimator_test.go rename to lnwallet/chainfee/estimator_test.go index c2cfeb52d0..b8b8186f9e 100644 --- a/lnwallet/fee_estimator_test.go +++ b/lnwallet/chainfee/estimator_test.go @@ -1,4 +1,4 @@ -package lnwallet_test +package chainfee import ( "bytes" @@ -9,8 +9,6 @@ import ( "testing" "github.com/btcsuite/btcutil" - - "github.com/lightningnetwork/lnd/lnwallet" ) type mockSparseConfFeeSource struct { @@ -38,9 +36,9 @@ func TestFeeRateTypes(t *testing.T) { const weight = vsize * 4 // Test the conversion from sat/kw to sat/kb. - for feePerKw := lnwallet.SatPerKWeight(250); feePerKw < 10000; feePerKw += 50 { + for feePerKw := SatPerKWeight(250); feePerKw < 10000; feePerKw += 50 { feePerKB := feePerKw.FeePerKVByte() - if feePerKB != lnwallet.SatPerKVByte(feePerKw*4) { + if feePerKB != SatPerKVByte(feePerKw*4) { t.Fatalf("expected %d sat/kb, got %d sat/kb when "+ "converting from %d sat/kw", feePerKw*4, feePerKB, feePerKw) @@ -62,9 +60,9 @@ func TestFeeRateTypes(t *testing.T) { } // Test the conversion from sat/kb to sat/kw. - for feePerKB := lnwallet.SatPerKVByte(1000); feePerKB < 40000; feePerKB += 1000 { + for feePerKB := SatPerKVByte(1000); feePerKB < 40000; feePerKB += 1000 { feePerKw := feePerKB.FeePerKWeight() - if feePerKw != lnwallet.SatPerKWeight(feePerKB/4) { + if feePerKw != SatPerKWeight(feePerKB/4) { t.Fatalf("expected %d sat/kw, got %d sat/kw when "+ "converting from %d sat/kb", feePerKB/4, feePerKw, feePerKB) @@ -91,9 +89,9 @@ func TestFeeRateTypes(t *testing.T) { func TestStaticFeeEstimator(t *testing.T) { t.Parallel() - const feePerKw = lnwallet.FeePerKwFloor + const feePerKw = FeePerKwFloor - feeEstimator := lnwallet.NewStaticFeeEstimator(feePerKw, 0) + feeEstimator := NewStaticEstimator(feePerKw, 0) if err := feeEstimator.Start(); err != nil { t.Fatalf("unable to start fee estimator: %v", err) } @@ -116,7 +114,7 @@ func TestSparseConfFeeSource(t *testing.T) { // Test that GenQueryURL returns the URL as is. url := "test" - feeSource := lnwallet.SparseConfFeeSource{URL: url} + feeSource := SparseConfFeeSource{URL: url} queryURL := feeSource.GenQueryURL() if queryURL != url { t.Fatalf("expected query URL of %v, got %v", url, queryURL) @@ -166,7 +164,7 @@ func TestSparseConfFeeSource(t *testing.T) { func TestWebAPIFeeEstimator(t *testing.T) { t.Parallel() - feeFloor := uint32(lnwallet.FeePerKwFloor.FeePerKVByte()) + feeFloor := uint32(FeePerKwFloor.FeePerKVByte()) testCases := []struct { name string target uint32 @@ -194,7 +192,7 @@ func TestWebAPIFeeEstimator(t *testing.T) { fees: testFees, } - estimator := lnwallet.NewWebAPIFeeEstimator(feeSource, 10) + estimator := NewWebAPIEstimator(feeSource, 10) // Test that requesting a fee when no fees have been cached fails. _, err := estimator.EstimateFeePerKW(5) @@ -210,6 +208,7 @@ func TestWebAPIFeeEstimator(t *testing.T) { defer estimator.Stop() for _, tc := range testCases { + tc := tc t.Run(tc.name, func(t *testing.T) { est, err := estimator.EstimateFeePerKW(tc.target) if tc.err != "" { @@ -220,7 +219,7 @@ func TestWebAPIFeeEstimator(t *testing.T) { "fail, instead got: %v", err) } } else { - exp := lnwallet.SatPerKVByte(tc.est).FeePerKWeight() + exp := SatPerKVByte(tc.est).FeePerKWeight() if err != nil { t.Fatalf("unable to estimate fee for "+ "%v block target, got: %v", diff --git a/lnwallet/chainfee/log.go b/lnwallet/chainfee/log.go new file mode 100644 index 0000000000..d5d0405782 --- /dev/null +++ b/lnwallet/chainfee/log.go @@ -0,0 +1,29 @@ +package chainfee + +import ( + "github.com/btcsuite/btclog" + "github.com/lightningnetwork/lnd/build" +) + +// log is a logger that is initialized with no output filters. This means the +// package will not perform any logging by default until the caller requests +// it. +var log btclog.Logger + +// The default amount of logging is none. +func init() { + UseLogger(build.NewSubLogger("CFEE", nil)) +} + +// DisableLog disables all library log output. Logging output is disabled by +// default until UseLogger is called. +func DisableLog() { + UseLogger(btclog.Disabled) +} + +// UseLogger uses a specified Logger to output package logging info. This +// should be used in preference to SetLogWriter if the caller is also using +// btclog. +func UseLogger(logger btclog.Logger) { + log = logger +} diff --git a/lnwallet/chainfee/rates.go b/lnwallet/chainfee/rates.go new file mode 100644 index 0000000000..69c458a4a0 --- /dev/null +++ b/lnwallet/chainfee/rates.go @@ -0,0 +1,58 @@ +package chainfee + +import ( + "fmt" + + "github.com/btcsuite/btcd/blockchain" + "github.com/btcsuite/btcutil" +) + +const ( + // FeePerKwFloor is the lowest fee rate in sat/kw that we should use for + // estimating transaction fees before signing. + FeePerKwFloor SatPerKWeight = 253 + + // AbsoluteFeePerKwFloor is the lowest fee rate in sat/kw of a + // transaction that we should ever _create_. This is the the equivalent + // of 1 sat/byte in sat/kw. + AbsoluteFeePerKwFloor SatPerKWeight = 250 +) + +// SatPerKVByte represents a fee rate in sat/kb. +type SatPerKVByte btcutil.Amount + +// FeeForVSize calculates the fee resulting from this fee rate and the given +// vsize in vbytes. +func (s SatPerKVByte) FeeForVSize(vbytes int64) btcutil.Amount { + return btcutil.Amount(s) * btcutil.Amount(vbytes) / 1000 +} + +// FeePerKWeight converts the current fee rate from sat/kb to sat/kw. +func (s SatPerKVByte) FeePerKWeight() SatPerKWeight { + return SatPerKWeight(s / blockchain.WitnessScaleFactor) +} + +// String returns a human-readable string of the fee rate. +func (s SatPerKVByte) String() string { + return fmt.Sprintf("%v sat/kb", int64(s)) +} + +// SatPerKWeight represents a fee rate in sat/kw. +type SatPerKWeight btcutil.Amount + +// FeeForWeight calculates the fee resulting from this fee rate and the given +// weight in weight units (wu). +func (s SatPerKWeight) FeeForWeight(wu int64) btcutil.Amount { + // The resulting fee is rounded down, as specified in BOLT#03. + return btcutil.Amount(s) * btcutil.Amount(wu) / 1000 +} + +// FeePerKVByte converts the current fee rate from sat/kw to sat/kb. +func (s SatPerKWeight) FeePerKVByte() SatPerKVByte { + return SatPerKVByte(s * blockchain.WitnessScaleFactor) +} + +// String returns a human-readable string of the fee rate. +func (s SatPerKWeight) String() string { + return fmt.Sprintf("%v sat/kw", int64(s)) +} diff --git a/lnwallet/chanfunding/assembler.go b/lnwallet/chanfunding/assembler.go new file mode 100644 index 0000000000..208cab68bb --- /dev/null +++ b/lnwallet/chanfunding/assembler.go @@ -0,0 +1,137 @@ +package chanfunding + +import ( + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" +) + +// CoinSource is an interface that allows a caller to access a source of UTXOs +// to use when attempting to fund a new channel. +type CoinSource interface { + // ListCoins returns all UTXOs from the source that have between + // minConfs and maxConfs number of confirmations. + ListCoins(minConfs, maxConfs int32) ([]Coin, error) + + // CoinFromOutPoint attempts to locate details pertaining to a coin + // based on its outpoint. If the coin isn't under the control of the + // backing CoinSource, then an error should be returned. + CoinFromOutPoint(wire.OutPoint) (*Coin, error) +} + +// CoinSelectionLocker is an interface that allows the caller to perform an +// operation, which is synchronized with all coin selection attempts. This can +// be used when an operation requires that all coin selection operations cease +// forward progress. Think of this as an exclusive lock on coin selection +// operations. +type CoinSelectionLocker interface { + // WithCoinSelectLock will execute the passed function closure in a + // synchronized manner preventing any coin selection operations from + // proceeding while the closure is executing. This can be seen as the + // ability to execute a function closure under an exclusive coin + // selection lock. + WithCoinSelectLock(func() error) error +} + +// OutpointLocker allows a caller to lock/unlock an outpoint. When locked, the +// outpoints shouldn't be used for any sort of channel funding of coin +// selection. Locked outpoints are not expected to be persisted between +// restarts. +type OutpointLocker interface { + // LockOutpoint locks a target outpoint, rendering it unusable for coin + // selection. + LockOutpoint(o wire.OutPoint) + + // UnlockOutpoint unlocks a target outpoint, allowing it to be used for + // coin selection once again. + UnlockOutpoint(o wire.OutPoint) +} + +// Request is a new request for funding a channel. The items in the struct +// governs how the final channel point will be provisioned by the target +// Assembler. +type Request struct { + // LocalAmt is the amount of coins we're placing into the funding + // output. + LocalAmt btcutil.Amount + + // RemoteAmt is the amount of coins the remote party is contributing to + // the funding output. + RemoteAmt btcutil.Amount + + // MinConfs controls how many confirmations a coin need to be eligible + // to be used as an input to the funding transaction. If this value is + // set to zero, then zero conf outputs may be spent. + MinConfs int32 + + // SubtractFees should be set if we intend to spend exactly LocalAmt + // when opening the channel, subtracting the fees from the funding + // output. This can be used for instance to use all our remaining funds + // to open the channel, since it will take fees into + // account. + SubtractFees bool + + // FeeRate is the fee rate in sat/kw that the funding transaction + // should carry. + FeeRate chainfee.SatPerKWeight + + // ChangeAddr is a closure that will provide the Assembler with a + // change address for the funding transaction if needed. + ChangeAddr func() (btcutil.Address, error) +} + +// Intent is returned by an Assembler and represents the base functionality the +// caller needs to proceed with channel funding on a higher level. If the +// Cancel method is called, then all resources assembled to fund the channel +// will be released back to the eligible pool. +type Intent interface { + // FundingOutput returns the witness script, and the output that + // creates the funding output. + FundingOutput() ([]byte, *wire.TxOut, error) + + // ChanPoint returns the final outpoint that will create the funding + // output described above. + ChanPoint() (*wire.OutPoint, error) + + // RemoteFundingAmt is the amount the remote party put into the + // channel. + RemoteFundingAmt() btcutil.Amount + + // LocalFundingAmt is the amount we put into the channel. This may + // differ from the local amount requested, as depending on coin + // selection, we may bleed from of that LocalAmt into fees to minimize + // change. + LocalFundingAmt() btcutil.Amount + + // Cancel allows the caller to cancel a funding Intent at any time. + // This will return any resources such as coins back to the eligible + // pool to be used in order channel fundings. + Cancel() +} + +// Assembler is an abstract object that is capable of assembling everything +// needed to create a new funding output. As an example, this assembler may be +// our core backing wallet, an interactive PSBT based assembler, an assembler +// than can aggregate multiple intents into a single funding transaction, or an +// external protocol that creates a funding output out-of-band such as channel +// factories. +type Assembler interface { + // ProvisionChannel returns a populated Intent that can be used to + // further the channel funding workflow. Depending on the + // implementation of Assembler, additional state machine (Intent) + // actions may be required before the FundingOutput and ChanPoint are + // made available to the caller. + ProvisionChannel(*Request) (Intent, error) +} + +// FundingTxAssembler is a super-set of the regular Assembler interface that's +// also able to provide a fully populated funding transaction via the intents +// that it produces. +type FundingTxAssembler interface { + Assembler + + // FundingTxAvailable is an empty method that an assembler can + // implement to signal to callers that its able to provide the funding + // transaction for the channel via the intent it returns. + FundingTxAvailable() +} diff --git a/lnwallet/chanfunding/canned_assembler.go b/lnwallet/chanfunding/canned_assembler.go new file mode 100644 index 0000000000..b512a7223a --- /dev/null +++ b/lnwallet/chanfunding/canned_assembler.go @@ -0,0 +1,207 @@ +package chanfunding + +import ( + "fmt" + + "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" + "github.com/lightningnetwork/lnd/input" + "github.com/lightningnetwork/lnd/keychain" +) + +// ShimIntent is an intent created by the CannedAssembler which represents a +// funding output to be created that was constructed outside the wallet. This +// might be used when a hardware wallet, or a channel factory is the entity +// crafting the funding transaction, and not lnd. +type ShimIntent struct { + // localFundingAmt is the final amount we put into the funding output. + localFundingAmt btcutil.Amount + + // remoteFundingAmt is the final amount the remote party put into the + // funding output. + remoteFundingAmt btcutil.Amount + + // localKey is our multi-sig key. + localKey *keychain.KeyDescriptor + + // remoteKey is the remote party's multi-sig key. + remoteKey *btcec.PublicKey + + // chanPoint is the final channel point for the to be created channel. + chanPoint *wire.OutPoint + + // thawHeight, if non-zero is the height where this channel will become + // a normal channel. Until this height, it's considered frozen, so it + // can only be cooperatively closed by the responding party. + thawHeight uint32 +} + +// FundingOutput returns the witness script, and the output that creates the +// funding output. +// +// NOTE: This method satisfies the chanfunding.Intent interface. +func (s *ShimIntent) FundingOutput() ([]byte, *wire.TxOut, error) { + if s.localKey == nil || s.remoteKey == nil { + return nil, nil, fmt.Errorf("unable to create witness " + + "script, no funding keys") + } + + totalAmt := s.localFundingAmt + s.remoteFundingAmt + return input.GenFundingPkScript( + s.localKey.PubKey.SerializeCompressed(), + s.remoteKey.SerializeCompressed(), + int64(totalAmt), + ) +} + +// Cancel allows the caller to cancel a funding Intent at any time. This will +// return any resources such as coins back to the eligible pool to be used in +// order channel fundings. +// +// NOTE: This method satisfies the chanfunding.Intent interface. +func (s *ShimIntent) Cancel() { +} + +// RemoteFundingAmt is the amount the remote party put into the channel. +// +// NOTE: This method satisfies the chanfunding.Intent interface. +func (s *ShimIntent) LocalFundingAmt() btcutil.Amount { + return s.localFundingAmt +} + +// LocalFundingAmt is the amount we put into the channel. This may differ from +// the local amount requested, as depending on coin selection, we may bleed +// from of that LocalAmt into fees to minimize change. +// +// NOTE: This method satisfies the chanfunding.Intent interface. +func (s *ShimIntent) RemoteFundingAmt() btcutil.Amount { + return s.remoteFundingAmt +} + +// ChanPoint returns the final outpoint that will create the funding output +// described above. +// +// NOTE: This method satisfies the chanfunding.Intent interface. +func (s *ShimIntent) ChanPoint() (*wire.OutPoint, error) { + if s.chanPoint == nil { + return nil, fmt.Errorf("chan point unknown, funding output " + + "not constructed") + } + + return s.chanPoint, nil +} + +// ThawHeight returns the height where this channel goes back to being a normal +// channel. +func (s *ShimIntent) ThawHeight() uint32 { + return s.thawHeight +} + +// FundingKeys couples our multi-sig key along with the remote party's key. +type FundingKeys struct { + // LocalKey is our multi-sig key. + LocalKey *keychain.KeyDescriptor + + // RemoteKey is the multi-sig key of the remote party. + RemoteKey *btcec.PublicKey +} + +// MultiSigKeys returns the committed multi-sig keys, but only if they've been +// specified/provided. +func (s *ShimIntent) MultiSigKeys() (*FundingKeys, error) { + if s.localKey == nil || s.remoteKey == nil { + return nil, fmt.Errorf("unknown funding keys") + } + + return &FundingKeys{ + LocalKey: s.localKey, + RemoteKey: s.remoteKey, + }, nil +} + +// A compile-time check to ensure ShimIntent adheres to the Intent interface. +var _ Intent = (*ShimIntent)(nil) + +// CannedAssembler is a type of chanfunding.Assembler wherein the funding +// transaction is constructed outside of lnd, and may already exist. This +// Assembler serves as a shim which gives the funding flow the only thing it +// actually needs to proceed: the channel point. +type CannedAssembler struct { + // fundingAmt is the total amount of coins in the funding output. + fundingAmt btcutil.Amount + + // localKey is our multi-sig key. + localKey *keychain.KeyDescriptor + + // remoteKey is the remote party's multi-sig key. + remoteKey *btcec.PublicKey + + // chanPoint is the final channel point for the to be created channel. + chanPoint wire.OutPoint + + // initiator indicates if we're the initiator or the channel or not. + initiator bool + + // thawHeight, if non-zero is the height where this channel will become + // a normal channel. Until this height, it's considered frozen, so it + // can only be cooperatively closed by the responding party. + thawHeight uint32 +} + +// NewCannedAssembler creates a new CannedAssembler from the material required +// to construct a funding output and channel point. +func NewCannedAssembler(thawHeight uint32, chanPoint wire.OutPoint, + fundingAmt btcutil.Amount, localKey *keychain.KeyDescriptor, + remoteKey *btcec.PublicKey, initiator bool) *CannedAssembler { + + return &CannedAssembler{ + initiator: initiator, + localKey: localKey, + remoteKey: remoteKey, + fundingAmt: fundingAmt, + chanPoint: chanPoint, + thawHeight: thawHeight, + } +} + +// ProvisionChannel creates a new ShimIntent given the passed funding Request. +// The returned intent is immediately able to provide the channel point and +// funding output as they've already been created outside lnd. +// +// NOTE: This method satisfies the chanfunding.Assembler interface. +func (c *CannedAssembler) ProvisionChannel(req *Request) (Intent, error) { + // We'll exit out if this field is set as the funding transaction has + // already been assembled, so we don't influence coin selection.. + if req.SubtractFees { + return nil, fmt.Errorf("SubtractFees ignored, funding " + + "transaction is frozen") + } + + intent := &ShimIntent{ + localKey: c.localKey, + remoteKey: c.remoteKey, + chanPoint: &c.chanPoint, + thawHeight: c.thawHeight, + } + + if c.initiator { + intent.localFundingAmt = c.fundingAmt + } else { + intent.remoteFundingAmt = c.fundingAmt + } + + // A simple sanity check to ensure the provisioned request matches the + // re-made shim intent. + if req.LocalAmt+req.RemoteAmt != c.fundingAmt { + return nil, fmt.Errorf("intent doesn't match canned "+ + "assembler: local_amt=%v, remote_amt=%v, funding_amt=%v", + req.LocalAmt, req.RemoteAmt, c.fundingAmt) + } + + return intent, nil +} + +// A compile-time assertion to ensure CannedAssembler meets the Assembler +// interface. +var _ Assembler = (*CannedAssembler)(nil) diff --git a/lnwallet/chanfunding/coin_select.go b/lnwallet/chanfunding/coin_select.go new file mode 100644 index 0000000000..f1ce008d6f --- /dev/null +++ b/lnwallet/chanfunding/coin_select.go @@ -0,0 +1,216 @@ +package chanfunding + +import ( + "fmt" + + "github.com/btcsuite/btcd/txscript" + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" + "github.com/lightningnetwork/lnd/input" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" +) + +// ErrInsufficientFunds is a type matching the error interface which is +// returned when coin selection for a new funding transaction fails to due +// having an insufficient amount of confirmed funds. +type ErrInsufficientFunds struct { + amountAvailable btcutil.Amount + amountSelected btcutil.Amount +} + +// Error returns a human readable string describing the error. +func (e *ErrInsufficientFunds) Error() string { + return fmt.Sprintf("not enough witness outputs to create funding "+ + "transaction, need %v only have %v available", + e.amountAvailable, e.amountSelected) +} + +// Coin represents a spendable UTXO which is available for channel funding. +// This UTXO need not reside in our internal wallet as an example, and instead +// may be derived from an existing watch-only wallet. It wraps both the output +// present within the UTXO set, and also the outpoint that generates this coin. +type Coin struct { + wire.TxOut + + wire.OutPoint +} + +// selectInputs selects a slice of inputs necessary to meet the specified +// selection amount. If input selection is unable to succeed due to insufficient +// funds, a non-nil error is returned. Additionally, the total amount of the +// selected coins are returned in order for the caller to properly handle +// change+fees. +func selectInputs(amt btcutil.Amount, coins []Coin) (btcutil.Amount, []Coin, error) { + satSelected := btcutil.Amount(0) + for i, coin := range coins { + satSelected += btcutil.Amount(coin.Value) + if satSelected >= amt { + return satSelected, coins[:i+1], nil + } + } + + return 0, nil, &ErrInsufficientFunds{amt, satSelected} +} + +// CoinSelect attempts to select a sufficient amount of coins, including a +// change output to fund amt satoshis, adhering to the specified fee rate. The +// specified fee rate should be expressed in sat/kw for coin selection to +// function properly. +func CoinSelect(feeRate chainfee.SatPerKWeight, amt btcutil.Amount, + coins []Coin) ([]Coin, btcutil.Amount, error) { + + amtNeeded := amt + for { + // First perform an initial round of coin selection to estimate + // the required fee. + totalSat, selectedUtxos, err := selectInputs(amtNeeded, coins) + if err != nil { + return nil, 0, err + } + + var weightEstimate input.TxWeightEstimator + + for _, utxo := range selectedUtxos { + switch { + + case txscript.IsPayToWitnessPubKeyHash(utxo.PkScript): + weightEstimate.AddP2WKHInput() + + case txscript.IsPayToScriptHash(utxo.PkScript): + weightEstimate.AddNestedP2WKHInput() + + default: + return nil, 0, fmt.Errorf("unsupported address type: %x", + utxo.PkScript) + } + } + + // Channel funding multisig output is P2WSH. + weightEstimate.AddP2WSHOutput() + + // Assume that change output is a P2WKH output. + // + // TODO: Handle wallets that generate non-witness change + // addresses. + // TODO(halseth): make coinSelect not estimate change output + // for dust change. + weightEstimate.AddP2WKHOutput() + + // The difference between the selected amount and the amount + // requested will be used to pay fees, and generate a change + // output with the remaining. + overShootAmt := totalSat - amt + + // Based on the estimated size and fee rate, if the excess + // amount isn't enough to pay fees, then increase the requested + // coin amount by the estimate required fee, performing another + // round of coin selection. + totalWeight := int64(weightEstimate.Weight()) + requiredFee := feeRate.FeeForWeight(totalWeight) + if overShootAmt < requiredFee { + amtNeeded = amt + requiredFee + continue + } + + // If the fee is sufficient, then calculate the size of the + // change output. + changeAmt := overShootAmt - requiredFee + + return selectedUtxos, changeAmt, nil + } +} + +// CoinSelectSubtractFees attempts to select coins such that we'll spend up to +// amt in total after fees, adhering to the specified fee rate. The selected +// coins, the final output and change values are returned. +func CoinSelectSubtractFees(feeRate chainfee.SatPerKWeight, amt, + dustLimit btcutil.Amount, coins []Coin) ([]Coin, btcutil.Amount, + btcutil.Amount, error) { + + // First perform an initial round of coin selection to estimate + // the required fee. + totalSat, selectedUtxos, err := selectInputs(amt, coins) + if err != nil { + return nil, 0, 0, err + } + + var weightEstimate input.TxWeightEstimator + for _, utxo := range selectedUtxos { + switch { + + case txscript.IsPayToWitnessPubKeyHash(utxo.PkScript): + weightEstimate.AddP2WKHInput() + + case txscript.IsPayToScriptHash(utxo.PkScript): + weightEstimate.AddNestedP2WKHInput() + + default: + return nil, 0, 0, fmt.Errorf("unsupported address "+ + "type: %x", utxo.PkScript) + } + } + + // Channel funding multisig output is P2WSH. + weightEstimate.AddP2WSHOutput() + + // At this point we've got two possibilities, either create a + // change output, or not. We'll first try without creating a + // change output. + // + // Estimate the fee required for a transaction without a change + // output. + totalWeight := int64(weightEstimate.Weight()) + requiredFee := feeRate.FeeForWeight(totalWeight) + + // For a transaction without a change output, we'll let everything go + // to our multi-sig output after subtracting fees. + outputAmt := totalSat - requiredFee + changeAmt := btcutil.Amount(0) + + // If the the output is too small after subtracting the fee, the coin + // selection cannot be performed with an amount this small. + if outputAmt <= dustLimit { + return nil, 0, 0, fmt.Errorf("output amount(%v) after "+ + "subtracting fees(%v) below dust limit(%v)", outputAmt, + requiredFee, dustLimit) + } + + // We were able to create a transaction with no change from the + // selected inputs. We'll remember the resulting values for + // now, while we try to add a change output. Assume that change output + // is a P2WKH output. + weightEstimate.AddP2WKHOutput() + + // Now that we have added the change output, redo the fee + // estimate. + totalWeight = int64(weightEstimate.Weight()) + requiredFee = feeRate.FeeForWeight(totalWeight) + + // For a transaction with a change output, everything we don't spend + // will go to change. + newChange := totalSat - amt + newOutput := amt - requiredFee + + // If adding a change output leads to both outputs being above + // the dust limit, we'll add the change output. Otherwise we'll + // go with the no change tx we originally found. + if newChange > dustLimit && newOutput > dustLimit { + outputAmt = newOutput + changeAmt = newChange + } + + // Sanity check the resulting output values to make sure we + // don't burn a great part to fees. + totalOut := outputAmt + changeAmt + fee := totalSat - totalOut + + // Fail if more than 20% goes to fees. + // TODO(halseth): smarter fee limit. Make configurable or dynamic wrt + // total funding size? + if fee > totalOut/5 { + return nil, 0, 0, fmt.Errorf("fee %v on total output"+ + "value %v", fee, totalOut) + } + + return selectedUtxos, outputAmt, changeAmt, nil +} diff --git a/lnwallet/wallet_test.go b/lnwallet/chanfunding/coin_select_test.go similarity index 79% rename from lnwallet/wallet_test.go rename to lnwallet/chanfunding/coin_select_test.go index f9a6c0d3a2..67b2497680 100644 --- a/lnwallet/wallet_test.go +++ b/lnwallet/chanfunding/coin_select_test.go @@ -1,16 +1,27 @@ -package lnwallet +package chanfunding import ( + "encoding/hex" "testing" + "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcutil" "github.com/lightningnetwork/lnd/input" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" +) + +var ( + p2wkhScript, _ = hex.DecodeString( + "001411034bdcb6ccb7744fdfdeea958a6fb0b415a032", + ) ) // fundingFee is a helper method that returns the fee estimate used for a tx // with the given number of inputs and the optional change output. This matches // the estimate done by the wallet. -func fundingFee(feeRate SatPerKWeight, numInput int, change bool) btcutil.Amount { +func fundingFee(feeRate chainfee.SatPerKWeight, numInput int, // nolint:unparam + change bool) btcutil.Amount { + var weightEstimate input.TxWeightEstimator // All inputs. @@ -39,13 +50,13 @@ func fundingFee(feeRate SatPerKWeight, numInput int, change bool) btcutil.Amount func TestCoinSelect(t *testing.T) { t.Parallel() - const feeRate = SatPerKWeight(100) + const feeRate = chainfee.SatPerKWeight(100) const dust = btcutil.Amount(100) type testCase struct { name string outputValue btcutil.Amount - coins []*Utxo + coins []Coin expectedInput []btcutil.Amount expectedChange btcutil.Amount @@ -58,10 +69,12 @@ func TestCoinSelect(t *testing.T) { // This will obviously lead to a change output of // almost 0.5 BTC. name: "big change", - coins: []*Utxo{ + coins: []Coin{ { - AddressType: WitnessPubKey, - Value: 1 * btcutil.SatoshiPerBitcoin, + TxOut: wire.TxOut{ + PkScript: p2wkhScript, + Value: 1 * btcutil.SatoshiPerBitcoin, + }, }, }, outputValue: 0.5 * btcutil.SatoshiPerBitcoin, @@ -78,10 +91,12 @@ func TestCoinSelect(t *testing.T) { // This should lead to an error, as we don't have // enough funds to pay the fee. name: "nothing left for fees", - coins: []*Utxo{ + coins: []Coin{ { - AddressType: WitnessPubKey, - Value: 1 * btcutil.SatoshiPerBitcoin, + TxOut: wire.TxOut{ + PkScript: p2wkhScript, + Value: 1 * btcutil.SatoshiPerBitcoin, + }, }, }, outputValue: 1 * btcutil.SatoshiPerBitcoin, @@ -92,10 +107,12 @@ func TestCoinSelect(t *testing.T) { // as big as possible, such that the remaining change // will be dust. name: "dust change", - coins: []*Utxo{ + coins: []Coin{ { - AddressType: WitnessPubKey, - Value: 1 * btcutil.SatoshiPerBitcoin, + TxOut: wire.TxOut{ + PkScript: p2wkhScript, + Value: 1 * btcutil.SatoshiPerBitcoin, + }, }, }, // We tune the output value by subtracting the expected @@ -114,10 +131,12 @@ func TestCoinSelect(t *testing.T) { // as big as possible, such that there is nothing left // for change. name: "no change", - coins: []*Utxo{ + coins: []Coin{ { - AddressType: WitnessPubKey, - Value: 1 * btcutil.SatoshiPerBitcoin, + TxOut: wire.TxOut{ + PkScript: p2wkhScript, + Value: 1 * btcutil.SatoshiPerBitcoin, + }, }, }, // We tune the output value to be the maximum amount @@ -140,7 +159,7 @@ func TestCoinSelect(t *testing.T) { t.Run(test.name, func(t *testing.T) { t.Parallel() - selected, changeAmt, err := coinSelect( + selected, changeAmt, err := CoinSelect( feeRate, test.outputValue, test.coins, ) if !test.expectErr && err != nil { @@ -163,7 +182,7 @@ func TestCoinSelect(t *testing.T) { } for i, coin := range selected { - if coin.Value != test.expectedInput[i] { + if coin.Value != int64(test.expectedInput[i]) { t.Fatalf("expected input %v to have value %v, "+ "had %v", i, test.expectedInput[i], coin.Value) @@ -185,14 +204,14 @@ func TestCoinSelect(t *testing.T) { func TestCoinSelectSubtractFees(t *testing.T) { t.Parallel() - const feeRate = SatPerKWeight(100) + const feeRate = chainfee.SatPerKWeight(100) const dustLimit = btcutil.Amount(1000) const dust = btcutil.Amount(100) type testCase struct { name string spendValue btcutil.Amount - coins []*Utxo + coins []Coin expectedInput []btcutil.Amount expectedFundingAmt btcutil.Amount @@ -206,10 +225,12 @@ func TestCoinSelectSubtractFees(t *testing.T) { // should lead to a funding TX with one output, the // rest goes to fees. name: "spend all", - coins: []*Utxo{ + coins: []Coin{ { - AddressType: WitnessPubKey, - Value: 1 * btcutil.SatoshiPerBitcoin, + TxOut: wire.TxOut{ + PkScript: p2wkhScript, + Value: 1 * btcutil.SatoshiPerBitcoin, + }, }, }, spendValue: 1 * btcutil.SatoshiPerBitcoin, @@ -225,10 +246,12 @@ func TestCoinSelectSubtractFees(t *testing.T) { // The total funds available is below the dust limit // after paying fees. name: "dust output", - coins: []*Utxo{ + coins: []Coin{ { - AddressType: WitnessPubKey, - Value: fundingFee(feeRate, 1, false) + dust, + TxOut: wire.TxOut{ + PkScript: p2wkhScript, + Value: int64(fundingFee(feeRate, 1, false) + dust), + }, }, }, spendValue: fundingFee(feeRate, 1, false) + dust, @@ -240,10 +263,12 @@ func TestCoinSelectSubtractFees(t *testing.T) { // is below the dust limit. The remainder should go // towards the funding output. name: "dust change", - coins: []*Utxo{ + coins: []Coin{ { - AddressType: WitnessPubKey, - Value: 1 * btcutil.SatoshiPerBitcoin, + TxOut: wire.TxOut{ + PkScript: p2wkhScript, + Value: 1 * btcutil.SatoshiPerBitcoin, + }, }, }, spendValue: 1*btcutil.SatoshiPerBitcoin - dust, @@ -257,10 +282,12 @@ func TestCoinSelectSubtractFees(t *testing.T) { { // We got just enough funds to create an output above the dust limit. name: "output right above dustlimit", - coins: []*Utxo{ + coins: []Coin{ { - AddressType: WitnessPubKey, - Value: fundingFee(feeRate, 1, false) + dustLimit + 1, + TxOut: wire.TxOut{ + PkScript: p2wkhScript, + Value: int64(fundingFee(feeRate, 1, false) + dustLimit + 1), + }, }, }, spendValue: fundingFee(feeRate, 1, false) + dustLimit + 1, @@ -275,10 +302,12 @@ func TestCoinSelectSubtractFees(t *testing.T) { // Amount left is below dust limit after paying fee for // a change output, resulting in a no-change tx. name: "no amount to pay fee for change", - coins: []*Utxo{ + coins: []Coin{ { - AddressType: WitnessPubKey, - Value: fundingFee(feeRate, 1, false) + 2*(dustLimit+1), + TxOut: wire.TxOut{ + PkScript: p2wkhScript, + Value: int64(fundingFee(feeRate, 1, false) + 2*(dustLimit+1)), + }, }, }, spendValue: fundingFee(feeRate, 1, false) + dustLimit + 1, @@ -292,10 +321,12 @@ func TestCoinSelectSubtractFees(t *testing.T) { { // If more than 20% of funds goes to fees, it should fail. name: "high fee", - coins: []*Utxo{ + coins: []Coin{ { - AddressType: WitnessPubKey, - Value: 5 * fundingFee(feeRate, 1, false), + TxOut: wire.TxOut{ + PkScript: p2wkhScript, + Value: int64(5 * fundingFee(feeRate, 1, false)), + }, }, }, spendValue: 5 * fundingFee(feeRate, 1, false), @@ -305,8 +336,10 @@ func TestCoinSelectSubtractFees(t *testing.T) { } for _, test := range testCases { + test := test + t.Run(test.name, func(t *testing.T) { - selected, localFundingAmt, changeAmt, err := coinSelectSubtractFees( + selected, localFundingAmt, changeAmt, err := CoinSelectSubtractFees( feeRate, test.spendValue, dustLimit, test.coins, ) if !test.expectErr && err != nil { @@ -329,7 +362,7 @@ func TestCoinSelectSubtractFees(t *testing.T) { } for i, coin := range selected { - if coin.Value != test.expectedInput[i] { + if coin.Value != int64(test.expectedInput[i]) { t.Fatalf("expected input %v to have value %v, "+ "had %v", i, test.expectedInput[i], coin.Value) diff --git a/lnwallet/chanfunding/log.go b/lnwallet/chanfunding/log.go new file mode 100644 index 0000000000..159a96ca1d --- /dev/null +++ b/lnwallet/chanfunding/log.go @@ -0,0 +1,29 @@ +package chanfunding + +import ( + "github.com/btcsuite/btclog" + "github.com/lightningnetwork/lnd/build" +) + +// log is a logger that is initialized with no output filters. This +// means the package will not perform any logging by default until the caller +// requests it. +var log btclog.Logger + +// The default amount of logging is none. +func init() { + UseLogger(build.NewSubLogger("CHFD", nil)) +} + +// DisableLog disables all library log output. Logging output is disabled +// by default until UseLogger is called. +func DisableLog() { + UseLogger(btclog.Disabled) +} + +// UseLogger uses a specified Logger to output package logging info. +// This should be used in preference to SetLogWriter if the caller is also +// using btclog. +func UseLogger(logger btclog.Logger) { + log = logger +} diff --git a/lnwallet/chanfunding/psbt_assembler.go b/lnwallet/chanfunding/psbt_assembler.go new file mode 100644 index 0000000000..a65cc624a2 --- /dev/null +++ b/lnwallet/chanfunding/psbt_assembler.go @@ -0,0 +1,524 @@ +package chanfunding + +import ( + "bytes" + "crypto/sha256" + "errors" + "fmt" + "sync" + + "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" + "github.com/btcsuite/btcutil/psbt" + "github.com/lightningnetwork/lnd/input" + "github.com/lightningnetwork/lnd/keychain" +) + +// PsbtState is a type for the state of the PSBT intent state machine. +type PsbtState uint8 + +const ( + // PsbtShimRegistered denotes a channel funding process has started with + // a PSBT shim attached. This is the default state for a PsbtIntent. We + // don't use iota here because the values have to be in sync with the + // RPC constants. + PsbtShimRegistered PsbtState = 1 + + // PsbtOutputKnown denotes that the local and remote peer have + // negotiated the multisig keys to be used as the channel funding output + // and therefore the PSBT funding process can now start. + PsbtOutputKnown PsbtState = 2 + + // PsbtVerified denotes that a potential PSBT has been presented to the + // intent and passed all checks. The verified PSBT can be given to a/the + // signer(s). + PsbtVerified PsbtState = 3 + + // PsbtFinalized denotes that a fully signed PSBT has been given to the + // intent that looks identical to the previously verified transaction + // but has all witness data added and is therefore completely signed. + PsbtFinalized PsbtState = 4 + + // PsbtFundingTxCompiled denotes that the PSBT processed by this intent + // has been successfully converted into a protocol transaction. It is + // not yet completely certain that the resulting transaction will be + // published because the commitment transactions between the channel + // peers first need to be counter signed. But the job of the intent is + // hereby completed. + PsbtFundingTxCompiled PsbtState = 5 + + // PsbtInitiatorCanceled denotes that the user has canceled the intent. + PsbtInitiatorCanceled PsbtState = 6 + + // PsbtResponderCanceled denotes that the remote peer has canceled the + // funding, likely due to a timeout. + PsbtResponderCanceled PsbtState = 7 +) + +// String returns a string representation of the PsbtState. +func (s PsbtState) String() string { + switch s { + case PsbtShimRegistered: + return "shim_registered" + + case PsbtOutputKnown: + return "output_known" + + case PsbtVerified: + return "verified" + + case PsbtFinalized: + return "finalized" + + case PsbtFundingTxCompiled: + return "funding_tx_compiled" + + case PsbtInitiatorCanceled: + return "user_canceled" + + case PsbtResponderCanceled: + return "remote_canceled" + + default: + return fmt.Sprintf("", s) + } +} + +var ( + // ErrRemoteCanceled is the error that is returned to the user if the + // funding flow was canceled by the remote peer. + ErrRemoteCanceled = errors.New("remote canceled funding, possibly " + + "timed out") + + // ErrUserCanceled is the error that is returned through the PsbtReady + // channel if the user canceled the funding flow. + ErrUserCanceled = errors.New("user canceled funding") +) + +// PsbtIntent is an intent created by the PsbtAssembler which represents a +// funding output to be created by a PSBT. This might be used when a hardware +// wallet, or a channel factory is the entity crafting the funding transaction, +// and not lnd. +type PsbtIntent struct { + // ShimIntent is the wrapped basic intent that contains common fields + // we also use in the PSBT funding case. + ShimIntent + + // State is the current state the intent state machine is in. + State PsbtState + + // BasePsbt is the user-supplied base PSBT the channel output should be + // added to. If this is nil we will create a new, empty PSBT as the base + // for the funding transaction. + BasePsbt *psbt.Packet + + // PendingPsbt is the parsed version of the current PSBT. This can be + // in two stages: If the user has not yet provided any PSBT, this is + // nil. Once the user sends us an unsigned funded PSBT, we verify that + // we have a valid transaction that sends to the channel output PK + // script and has an input large enough to pay for it. We keep this + // verified but not yet signed version around until the fully signed + // transaction is submitted by the user. At that point we make sure the + // inputs and outputs haven't changed to what was previously verified. + // Only witness data should be added after the verification process. + PendingPsbt *psbt.Packet + + // PsbtReady is an error channel the funding manager will listen for + // a signal about the PSBT being ready to continue the funding flow. In + // the normal, happy flow, this channel is only ever closed. If a + // non-nil error is sent through the channel, the funding flow will be + // canceled. + // + // NOTE: This channel must always be buffered. + PsbtReady chan error + + // signalPsbtReady is a Once guard to make sure the PsbtReady channel is + // only closed exactly once. + signalPsbtReady sync.Once + + // netParams are the network parameters used to encode the P2WSH funding + // address. + netParams *chaincfg.Params +} + +// BindKeys sets both the remote and local node's keys that will be used for the +// channel funding multisig output. +func (i *PsbtIntent) BindKeys(localKey *keychain.KeyDescriptor, + remoteKey *btcec.PublicKey) { + + i.localKey = localKey + i.remoteKey = remoteKey + i.State = PsbtOutputKnown +} + +// FundingParams returns the parameters that are necessary to start funding the +// channel output this intent was created for. It returns the P2WSH funding +// address, the exact funding amount and a PSBT packet that contains exactly one +// output that encodes the previous two parameters. +func (i *PsbtIntent) FundingParams() (btcutil.Address, int64, *psbt.Packet, + error) { + + if i.State != PsbtOutputKnown { + return nil, 0, nil, fmt.Errorf("invalid state, got %v "+ + "expected %v", i.State, PsbtOutputKnown) + } + + // The funding output needs to be known already at this point, which + // means we need to have the local and remote multisig keys bound + // already. + witnessScript, out, err := i.FundingOutput() + if err != nil { + return nil, 0, nil, fmt.Errorf("unable to create funding "+ + "output: %v", err) + } + witnessScriptHash := sha256.Sum256(witnessScript) + + // Encode the address in the human readable bech32 format. + addr, err := btcutil.NewAddressWitnessScriptHash( + witnessScriptHash[:], i.netParams, + ) + if err != nil { + return nil, 0, nil, fmt.Errorf("unable to encode address: %v", + err) + } + + // We'll also encode the address/amount in a machine readable raw PSBT + // format. If the user supplied a base PSBT, we'll add the output to + // that one, otherwise we'll create a new one. + packet := i.BasePsbt + if packet == nil { + packet, err = psbt.New(nil, nil, 2, 0, nil) + if err != nil { + return nil, 0, nil, fmt.Errorf("unable to create "+ + "PSBT: %v", err) + } + } + packet.UnsignedTx.TxOut = append(packet.UnsignedTx.TxOut, out) + packet.Outputs = append(packet.Outputs, psbt.POutput{}) + return addr, out.Value, packet, nil +} + +// Verify makes sure the PSBT that is given to the intent has an output that +// sends to the channel funding multisig address with the correct amount. A +// simple check that at least a single input has been specified is performed. +func (i *PsbtIntent) Verify(packet *psbt.Packet) error { + if packet == nil { + return fmt.Errorf("PSBT is nil") + } + if i.State != PsbtOutputKnown { + return fmt.Errorf("invalid state. got %v expected %v", i.State, + PsbtOutputKnown) + } + + // Try to locate the channel funding multisig output. + _, expectedOutput, err := i.FundingOutput() + if err != nil { + return fmt.Errorf("funding output cannot be created: %v", err) + } + outputFound := false + outputSum := int64(0) + for _, out := range packet.UnsignedTx.TxOut { + outputSum += out.Value + if txOutsEqual(out, expectedOutput) { + outputFound = true + } + } + if !outputFound { + return fmt.Errorf("funding output not found in PSBT") + } + + // At least one input needs to be specified and it must be large enough + // to pay for all outputs. We don't want to dive into fee estimation + // here so we just assume that if the input amount exceeds the output + // amount, the chosen fee is sufficient. + if len(packet.UnsignedTx.TxIn) == 0 { + return fmt.Errorf("PSBT has no inputs") + } + sum, err := sumUtxoInputValues(packet) + if err != nil { + return fmt.Errorf("error determining input sum: %v", err) + } + if sum <= outputSum { + return fmt.Errorf("input amount sum must be larger than " + + "output amount sum") + } + + i.PendingPsbt = packet + i.State = PsbtVerified + return nil +} + +// Finalize makes sure the final PSBT that is given to the intent is fully valid +// and signed but still contains the same UTXOs and outputs as the pending +// transaction we previously verified. If everything checks out, the funding +// manager is informed that the channel can now be opened and the funding +// transaction be broadcast. +func (i *PsbtIntent) Finalize(packet *psbt.Packet) error { + if packet == nil { + return fmt.Errorf("PSBT is nil") + } + if i.State != PsbtVerified { + return fmt.Errorf("invalid state. got %v expected %v", i.State, + PsbtVerified) + } + + // Make sure the PSBT itself thinks it's finalized and ready to be + // broadcast. + err := psbt.MaybeFinalizeAll(packet) + if err != nil { + return fmt.Errorf("error finalizing PSBT: %v", err) + } + _, err = psbt.Extract(packet) + if err != nil { + return fmt.Errorf("unable to extract funding TX: %v", err) + } + + // Do a basic check that this is still the same PSBT that we verified in + // the previous step. This is to protect the user from unwanted + // modifications. We only check the outputs and previous outpoints of + // the inputs of the wire transaction because the fields in the PSBT + // part are allowed to change. + if i.PendingPsbt == nil { + return fmt.Errorf("PSBT was not verified first") + } + err = verifyOutputsEqual( + packet.UnsignedTx.TxOut, i.PendingPsbt.UnsignedTx.TxOut, + ) + if err != nil { + return fmt.Errorf("outputs differ from verified PSBT: %v", err) + } + err = verifyInputPrevOutpointsEqual( + packet.UnsignedTx.TxIn, i.PendingPsbt.UnsignedTx.TxIn, + ) + if err != nil { + return fmt.Errorf("inputs differ from verified PSBT: %v", err) + } + + // As far as we can tell, this PSBT is ok to be used as a funding + // transaction. + i.PendingPsbt = packet + i.State = PsbtFinalized + + // Signal the funding manager that it can now finally continue with its + // funding flow as the PSBT is now ready to be converted into a real + // transaction and be published. + i.signalPsbtReady.Do(func() { + close(i.PsbtReady) + }) + return nil +} + +// CompileFundingTx finalizes the previously verified PSBT and returns the +// extracted binary serialized transaction from it. It also prepares the channel +// point for which this funding intent was initiated for. +func (i *PsbtIntent) CompileFundingTx() (*wire.MsgTx, error) { + if i.State != PsbtFinalized { + return nil, fmt.Errorf("invalid state. got %v expected %v", + i.State, PsbtFinalized) + } + + // Make sure the PSBT can be finalized and extracted. + err := psbt.MaybeFinalizeAll(i.PendingPsbt) + if err != nil { + return nil, fmt.Errorf("error finalizing PSBT: %v", err) + } + fundingTx, err := psbt.Extract(i.PendingPsbt) + if err != nil { + return nil, fmt.Errorf("unable to extract funding TX: %v", err) + } + + // Identify our funding outpoint now that we know everything's ready. + _, txOut, err := i.FundingOutput() + if err != nil { + return nil, fmt.Errorf("cannot get funding output: %v", err) + } + ok, idx := input.FindScriptOutputIndex(fundingTx, txOut.PkScript) + if !ok { + return nil, fmt.Errorf("funding output not found in PSBT") + } + i.chanPoint = &wire.OutPoint{ + Hash: fundingTx.TxHash(), + Index: idx, + } + i.State = PsbtFundingTxCompiled + + return fundingTx, nil +} + +// RemoteCanceled informs the listener of the PSBT ready channel that the +// funding has been canceled by the remote peer and that we can no longer +// continue with it. +func (i *PsbtIntent) RemoteCanceled() { + log.Debugf("PSBT funding intent canceled by remote, state=%v", i.State) + i.signalPsbtReady.Do(func() { + i.PsbtReady <- ErrRemoteCanceled + i.State = PsbtResponderCanceled + }) + i.ShimIntent.Cancel() +} + +// Cancel allows the caller to cancel a funding Intent at any time. This will +// return make sure the channel funding flow with the remote peer is failed and +// any reservations are canceled. +// +// NOTE: Part of the chanfunding.Intent interface. +func (i *PsbtIntent) Cancel() { + log.Debugf("PSBT funding intent canceled, state=%v", i.State) + i.signalPsbtReady.Do(func() { + i.PsbtReady <- ErrUserCanceled + i.State = PsbtInitiatorCanceled + }) + i.ShimIntent.Cancel() +} + +// PsbtAssembler is a type of chanfunding.Assembler wherein the funding +// transaction is constructed outside of lnd by using partially signed bitcoin +// transactions (PSBT). +type PsbtAssembler struct { + // fundingAmt is the total amount of coins in the funding output. + fundingAmt btcutil.Amount + + // basePsbt is the user-supplied base PSBT the channel output should be + // added to. + basePsbt *psbt.Packet + + // netParams are the network parameters used to encode the P2WSH funding + // address. + netParams *chaincfg.Params +} + +// NewPsbtAssembler creates a new CannedAssembler from the material required +// to construct a funding output and channel point. An optional base PSBT can +// be supplied which will be used to add the channel output to instead of +// creating a new one. +func NewPsbtAssembler(fundingAmt btcutil.Amount, basePsbt *psbt.Packet, + netParams *chaincfg.Params) *PsbtAssembler { + + return &PsbtAssembler{ + fundingAmt: fundingAmt, + basePsbt: basePsbt, + netParams: netParams, + } +} + +// ProvisionChannel creates a new ShimIntent given the passed funding Request. +// The returned intent is immediately able to provide the channel point and +// funding output as they've already been created outside lnd. +// +// NOTE: This method satisfies the chanfunding.Assembler interface. +func (p *PsbtAssembler) ProvisionChannel(req *Request) (Intent, error) { + // We'll exit out if this field is set as the funding transaction will + // be assembled externally, so we don't influence coin selection. + if req.SubtractFees { + return nil, fmt.Errorf("SubtractFees not supported for PSBT") + } + + intent := &PsbtIntent{ + ShimIntent: ShimIntent{ + localFundingAmt: p.fundingAmt, + }, + State: PsbtShimRegistered, + BasePsbt: p.basePsbt, + PsbtReady: make(chan error, 1), + netParams: p.netParams, + } + + // A simple sanity check to ensure the provisioned request matches the + // re-made shim intent. + if req.LocalAmt+req.RemoteAmt != p.fundingAmt { + return nil, fmt.Errorf("intent doesn't match PSBT "+ + "assembler: local_amt=%v, remote_amt=%v, funding_amt=%v", + req.LocalAmt, req.RemoteAmt, p.fundingAmt) + } + + return intent, nil +} + +// FundingTxAvailable is an empty method that an assembler can implement to +// signal to callers that its able to provide the funding transaction for the +// channel via the intent it returns. +// +// NOTE: This method is a part of the FundingTxAssembler interface. +func (p *PsbtAssembler) FundingTxAvailable() {} + +// A compile-time assertion to ensure PsbtAssembler meets the Assembler +// interface. +var _ Assembler = (*PsbtAssembler)(nil) + +// sumUtxoInputValues tries to extract the sum of all inputs specified in the +// UTXO fields of the PSBT. An error is returned if an input is specified that +// does not contain any UTXO information. +func sumUtxoInputValues(packet *psbt.Packet) (int64, error) { + // We take the TX ins of the unsigned TX as the truth for how many + // inputs there should be, as the fields in the extra data part of the + // PSBT can be empty. + if len(packet.UnsignedTx.TxIn) != len(packet.Inputs) { + return 0, fmt.Errorf("TX input length doesn't match PSBT " + + "input length") + } + inputSum := int64(0) + for idx, in := range packet.Inputs { + switch { + case in.WitnessUtxo != nil: + // Witness UTXOs only need to reference the TxOut. + inputSum += in.WitnessUtxo.Value + + case in.NonWitnessUtxo != nil: + // Non-witness UTXOs reference to the whole transaction + // the UTXO resides in. + utxOuts := in.NonWitnessUtxo.TxOut + txIn := packet.UnsignedTx.TxIn[idx] + inputSum += utxOuts[txIn.PreviousOutPoint.Index].Value + + default: + return 0, fmt.Errorf("input %d has no UTXO information", + idx) + } + } + return inputSum, nil +} + +// txOutsEqual returns true if two transaction outputs are equal. +func txOutsEqual(out1, out2 *wire.TxOut) bool { + if out1 == nil || out2 == nil { + return out1 == out2 + } + return out1.Value == out2.Value && + bytes.Equal(out1.PkScript, out2.PkScript) +} + +// verifyOutputsEqual verifies that the two slices of transaction outputs are +// deep equal to each other. We do the length check and manual loop to provide +// better error messages to the user than just returning "not equal". +func verifyOutputsEqual(outs1, outs2 []*wire.TxOut) error { + if len(outs1) != len(outs2) { + return fmt.Errorf("number of outputs are different") + } + for idx, out := range outs1 { + // There is a byte slice in the output so we can't use the + // equality operator. + if !txOutsEqual(out, outs2[idx]) { + return fmt.Errorf("output %d is different", idx) + } + } + return nil +} + +// verifyInputPrevOutpointsEqual verifies that the previous outpoints of the +// two slices of transaction inputs are deep equal to each other. We do the +// length check and manual loop to provide better error messages to the user +// than just returning "not equal". +func verifyInputPrevOutpointsEqual(ins1, ins2 []*wire.TxIn) error { + if len(ins1) != len(ins2) { + return fmt.Errorf("number of inputs are different") + } + for idx, in := range ins1 { + if in.PreviousOutPoint != ins2[idx].PreviousOutPoint { + return fmt.Errorf("previous outpoint of input %d is "+ + "different", idx) + } + } + return nil +} diff --git a/lnwallet/chanfunding/psbt_assembler_test.go b/lnwallet/chanfunding/psbt_assembler_test.go new file mode 100644 index 0000000000..5367ecdbe6 --- /dev/null +++ b/lnwallet/chanfunding/psbt_assembler_test.go @@ -0,0 +1,577 @@ +package chanfunding + +import ( + "bytes" + "crypto/sha256" + "fmt" + "reflect" + "sync" + "testing" + "time" + + "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" + "github.com/btcsuite/btcutil/psbt" + "github.com/davecgh/go-spew/spew" + "github.com/lightningnetwork/lnd/input" + "github.com/lightningnetwork/lnd/keychain" +) + +var ( + localPrivkey = []byte{1, 2, 3, 4, 5, 6} + remotePrivkey = []byte{6, 5, 4, 3, 2, 1} + chanCapacity btcutil.Amount = 644000 + params = chaincfg.RegressionNetParams + defaultTimeout = 50 * time.Millisecond +) + +// TestPsbtIntent tests the basic happy path of the PSBT assembler and intent. +func TestPsbtIntent(t *testing.T) { + t.Parallel() + + // Create a simple assembler and ask it to provision a channel to get + // the funding intent. + a := NewPsbtAssembler(chanCapacity, nil, ¶ms) + intent, err := a.ProvisionChannel(&Request{LocalAmt: chanCapacity}) + if err != nil { + t.Fatalf("error provisioning channel: %v", err) + } + psbtIntent, ok := intent.(*PsbtIntent) + if !ok { + t.Fatalf("intent was not a PsbtIntent") + } + if psbtIntent.State != PsbtShimRegistered { + t.Fatalf("unexpected state. got %d wanted %d", psbtIntent.State, + PsbtShimRegistered) + } + + // The first step with the intent is that the funding manager starts + // negotiating with the remote peer and they accept. By accepting, they + // send over their multisig key that's going to be used for the funding + // output. With that known, we can start crafting a PSBT. + _, localPubkey := btcec.PrivKeyFromBytes(btcec.S256(), localPrivkey) + _, remotePubkey := btcec.PrivKeyFromBytes(btcec.S256(), remotePrivkey) + psbtIntent.BindKeys( + &keychain.KeyDescriptor{PubKey: localPubkey}, remotePubkey, + ) + if psbtIntent.State != PsbtOutputKnown { + t.Fatalf("unexpected state. got %d wanted %d", psbtIntent.State, + PsbtOutputKnown) + } + + // Make sure the output script address is correct. + script, _, err := input.GenFundingPkScript( + localPubkey.SerializeCompressed(), + remotePubkey.SerializeCompressed(), int64(chanCapacity), + ) + if err != nil { + t.Fatalf("error calculating script: %v", err) + } + witnessScriptHash := sha256.Sum256(script) + addr, err := btcutil.NewAddressWitnessScriptHash( + witnessScriptHash[:], ¶ms, + ) + if err != nil { + t.Fatalf("unable to encode address: %v", err) + } + fundingAddr, amt, pendingPsbt, err := psbtIntent.FundingParams() + if err != nil { + t.Fatalf("unable to get funding params: %v", err) + } + if addr.EncodeAddress() != fundingAddr.EncodeAddress() { + t.Fatalf("unexpected address. got %s wanted %s", fundingAddr, + addr) + } + if amt != int64(chanCapacity) { + t.Fatalf("unexpected amount. got %d wanted %d", amt, + chanCapacity) + } + + // Parse and check the returned PSBT packet. + if pendingPsbt == nil { + t.Fatalf("expected pending PSBT to be returned") + } + if len(pendingPsbt.UnsignedTx.TxOut) != 1 { + t.Fatalf("unexpected number of outputs. got %d wanted %d", + len(pendingPsbt.UnsignedTx.TxOut), 1) + } + txOut := pendingPsbt.UnsignedTx.TxOut[0] + if !bytes.Equal(txOut.PkScript[2:], witnessScriptHash[:]) { + t.Fatalf("unexpected PK script in output. got %x wanted %x", + txOut.PkScript[2:], witnessScriptHash) + } + if txOut.Value != int64(chanCapacity) { + t.Fatalf("unexpected value in output. got %d wanted %d", + txOut.Value, chanCapacity) + } + + // Add an input to the pending TX to simulate it being funded. + pendingPsbt.UnsignedTx.TxIn = []*wire.TxIn{ + {PreviousOutPoint: wire.OutPoint{Index: 0}}, + } + pendingPsbt.Inputs = []psbt.PInput{ + {WitnessUtxo: &wire.TxOut{Value: int64(chanCapacity + 1)}}, + } + + // Verify the dummy PSBT with the intent. + err = psbtIntent.Verify(pendingPsbt) + if err != nil { + t.Fatalf("error verifying pending PSBT: %v", err) + } + if psbtIntent.State != PsbtVerified { + t.Fatalf("unexpected state. got %d wanted %d", psbtIntent.State, + PsbtVerified) + } + + // Add some fake witness data to the transaction so it thinks it's + // signed. + pendingPsbt.Inputs[0].WitnessUtxo = &wire.TxOut{ + Value: int64(chanCapacity) * 2, + PkScript: []byte{99, 99, 99}, + } + pendingPsbt.Inputs[0].FinalScriptSig = []byte{88, 88, 88} + pendingPsbt.Inputs[0].FinalScriptWitness = []byte{2, 0, 0} + + // If we call Finalize, the intent will signal to the funding manager + // that it can continue with the funding flow. We want to make sure + // the signal arrives. + var wg sync.WaitGroup + errChan := make(chan error, 1) + wg.Add(1) + go func() { + defer wg.Done() + select { + case err := <-psbtIntent.PsbtReady: + errChan <- err + + case <-time.After(defaultTimeout): + errChan <- fmt.Errorf("timed out") + } + }() + err = psbtIntent.Finalize(pendingPsbt) + if err != nil { + t.Fatalf("error finalizing pending PSBT: %v", err) + } + wg.Wait() + + // We should have a nil error in our channel now. + err = <-errChan + if err != nil { + t.Fatalf("unexpected error after finalize: %v", err) + } + if psbtIntent.State != PsbtFinalized { + t.Fatalf("unexpected state. got %d wanted %d", psbtIntent.State, + PsbtFinalized) + } + + // Make sure the funding transaction can be compiled. + _, err = psbtIntent.CompileFundingTx() + if err != nil { + t.Fatalf("error compiling funding TX from PSBT: %v", err) + } + if psbtIntent.State != PsbtFundingTxCompiled { + t.Fatalf("unexpected state. got %d wanted %d", psbtIntent.State, + PsbtFundingTxCompiled) + } +} + +// TestPsbtIntentBasePsbt tests that a channel funding output can be appended to +// a given base PSBT in the funding flow. +func TestPsbtIntentBasePsbt(t *testing.T) { + t.Parallel() + + // First create a dummy PSBT with a single output. + pendingPsbt, err := psbt.New( + []*wire.OutPoint{{}}, []*wire.TxOut{ + {Value: 999, PkScript: []byte{99, 88, 77}}, + }, 2, 0, []uint32{0}, + ) + if err != nil { + t.Fatalf("unable to create dummy PSBT") + } + + // Generate the funding multisig keys and the address so we can compare + // it to the output of the intent. + _, localPubkey := btcec.PrivKeyFromBytes(btcec.S256(), localPrivkey) + _, remotePubkey := btcec.PrivKeyFromBytes(btcec.S256(), remotePrivkey) + // Make sure the output script address is correct. + script, _, err := input.GenFundingPkScript( + localPubkey.SerializeCompressed(), + remotePubkey.SerializeCompressed(), int64(chanCapacity), + ) + if err != nil { + t.Fatalf("error calculating script: %v", err) + } + witnessScriptHash := sha256.Sum256(script) + addr, err := btcutil.NewAddressWitnessScriptHash( + witnessScriptHash[:], ¶ms, + ) + if err != nil { + t.Fatalf("unable to encode address: %v", err) + } + + // Now as the next step, create a new assembler/intent pair with a base + // PSBT to see that we can add an additional output to it. + a := NewPsbtAssembler(chanCapacity, pendingPsbt, ¶ms) + intent, err := a.ProvisionChannel(&Request{LocalAmt: chanCapacity}) + if err != nil { + t.Fatalf("error provisioning channel: %v", err) + } + psbtIntent, ok := intent.(*PsbtIntent) + if !ok { + t.Fatalf("intent was not a PsbtIntent") + } + psbtIntent.BindKeys( + &keychain.KeyDescriptor{PubKey: localPubkey}, remotePubkey, + ) + newAddr, amt, twoOutPsbt, err := psbtIntent.FundingParams() + if err != nil { + t.Fatalf("unable to get funding params: %v", err) + } + if addr.EncodeAddress() != newAddr.EncodeAddress() { + t.Fatalf("unexpected address. got %s wanted %s", newAddr, + addr) + } + if amt != int64(chanCapacity) { + t.Fatalf("unexpected amount. got %d wanted %d", amt, + chanCapacity) + } + if len(twoOutPsbt.UnsignedTx.TxOut) != 2 { + t.Fatalf("unexpected number of outputs. got %d wanted %d", + len(twoOutPsbt.UnsignedTx.TxOut), 2) + } + if len(twoOutPsbt.UnsignedTx.TxIn) != 1 { + t.Fatalf("unexpected number of inputs. got %d wanted %d", + len(twoOutPsbt.UnsignedTx.TxIn), 1) + } + txOld := pendingPsbt.UnsignedTx + txNew := twoOutPsbt.UnsignedTx + prevoutEqual := reflect.DeepEqual( + txOld.TxIn[0].PreviousOutPoint, txNew.TxIn[0].PreviousOutPoint, + ) + if !prevoutEqual { + t.Fatalf("inputs changed. got %s wanted %s", + spew.Sdump(txOld.TxIn[0].PreviousOutPoint), + spew.Sdump(txNew.TxIn[0].PreviousOutPoint)) + } + if !reflect.DeepEqual(txOld.TxOut[0], txNew.TxOut[0]) { + t.Fatalf("existing output changed. got %v wanted %v", + txOld.TxOut[0], txNew.TxOut[0]) + } +} + +// TestPsbtVerify tests the PSBT verification process more deeply than just +// the happy path. +func TestPsbtVerify(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + expectedErr string + doVerify func(int64, *psbt.Packet, *PsbtIntent) error + }{ + { + name: "nil packet", + expectedErr: "PSBT is nil", + doVerify: func(amt int64, p *psbt.Packet, + i *PsbtIntent) error { + + return i.Verify(nil) + }, + }, + { + name: "wrong state", + expectedErr: "invalid state. got user_canceled " + + "expected output_known", + doVerify: func(amt int64, p *psbt.Packet, + i *PsbtIntent) error { + + i.State = PsbtInitiatorCanceled + return i.Verify(p) + }, + }, + { + name: "output not found, value wrong", + expectedErr: "funding output not found in PSBT", + doVerify: func(amt int64, p *psbt.Packet, + i *PsbtIntent) error { + + p.UnsignedTx.TxOut[0].Value = 123 + return i.Verify(p) + }, + }, + { + name: "output not found, pk script wrong", + expectedErr: "funding output not found in PSBT", + doVerify: func(amt int64, p *psbt.Packet, + i *PsbtIntent) error { + + p.UnsignedTx.TxOut[0].PkScript = []byte{1, 2, 3} + return i.Verify(p) + }, + }, + { + name: "no inputs", + expectedErr: "PSBT has no inputs", + doVerify: func(amt int64, p *psbt.Packet, + i *PsbtIntent) error { + + return i.Verify(p) + }, + }, + { + name: "input(s) too small", + expectedErr: "input amount sum must be larger than " + + "output amount sum", + doVerify: func(amt int64, p *psbt.Packet, + i *PsbtIntent) error { + + p.UnsignedTx.TxIn = []*wire.TxIn{{}} + p.Inputs = []psbt.PInput{{ + WitnessUtxo: &wire.TxOut{ + Value: int64(chanCapacity), + }, + }} + return i.Verify(p) + }, + }, + { + name: "input correct", + expectedErr: "", + doVerify: func(amt int64, p *psbt.Packet, + i *PsbtIntent) error { + + txOut := &wire.TxOut{ + Value: int64(chanCapacity/2) + 1, + } + p.UnsignedTx.TxIn = []*wire.TxIn{ + {}, + { + PreviousOutPoint: wire.OutPoint{ + Index: 0, + }, + }, + } + p.Inputs = []psbt.PInput{ + { + WitnessUtxo: txOut, + }, + { + NonWitnessUtxo: &wire.MsgTx{ + TxOut: []*wire.TxOut{ + txOut, + }, + }, + }} + return i.Verify(p) + }, + }, + } + + // Create a simple assembler and ask it to provision a channel to get + // the funding intent. + a := NewPsbtAssembler(chanCapacity, nil, ¶ms) + intent, err := a.ProvisionChannel(&Request{LocalAmt: chanCapacity}) + if err != nil { + t.Fatalf("error provisioning channel: %v", err) + } + psbtIntent := intent.(*PsbtIntent) + + // Bind our test keys to get the funding parameters. + _, localPubkey := btcec.PrivKeyFromBytes(btcec.S256(), localPrivkey) + _, remotePubkey := btcec.PrivKeyFromBytes(btcec.S256(), remotePrivkey) + psbtIntent.BindKeys( + &keychain.KeyDescriptor{PubKey: localPubkey}, remotePubkey, + ) + + // Loop through all our test cases. + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + // Reset the state from a previous test and create a new + // pending PSBT that we can manipulate. + psbtIntent.State = PsbtOutputKnown + _, amt, pendingPsbt, err := psbtIntent.FundingParams() + if err != nil { + t.Fatalf("unable to get funding params: %v", err) + } + + err = tc.doVerify(amt, pendingPsbt, psbtIntent) + if err != nil && tc.expectedErr != "" && + err.Error() != tc.expectedErr { + + t.Fatalf("unexpected error, got '%v' wanted "+ + "'%v'", err, tc.expectedErr) + } + }) + } +} + +// TestPsbtFinalize tests the PSBT finalization process more deeply than just +// the happy path. +func TestPsbtFinalize(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + expectedErr string + doFinalize func(int64, *psbt.Packet, *PsbtIntent) error + }{ + { + name: "nil packet", + expectedErr: "PSBT is nil", + doFinalize: func(amt int64, p *psbt.Packet, + i *PsbtIntent) error { + + return i.Finalize(nil) + }, + }, + { + name: "wrong state", + expectedErr: "invalid state. got user_canceled " + + "expected verified", + doFinalize: func(amt int64, p *psbt.Packet, + i *PsbtIntent) error { + + i.State = PsbtInitiatorCanceled + return i.Finalize(p) + }, + }, + { + name: "not verified first", + expectedErr: "PSBT was not verified first", + doFinalize: func(amt int64, p *psbt.Packet, + i *PsbtIntent) error { + + i.State = PsbtVerified + i.PendingPsbt = nil + return i.Finalize(p) + }, + }, + { + name: "output value changed", + expectedErr: "outputs differ from verified PSBT: " + + "output 0 is different", + doFinalize: func(amt int64, p *psbt.Packet, + i *PsbtIntent) error { + + p.UnsignedTx.TxOut[0].Value = 123 + return i.Finalize(p) + }, + }, + { + name: "output pk script changed", + expectedErr: "outputs differ from verified PSBT: " + + "output 0 is different", + doFinalize: func(amt int64, p *psbt.Packet, + i *PsbtIntent) error { + + p.UnsignedTx.TxOut[0].PkScript = []byte{3, 2, 1} + return i.Finalize(p) + }, + }, + { + name: "input previous outpoint index changed", + expectedErr: "inputs differ from verified PSBT: " + + "previous outpoint of input 0 is different", + doFinalize: func(amt int64, p *psbt.Packet, + i *PsbtIntent) error { + + p.UnsignedTx.TxIn[0].PreviousOutPoint.Index = 0 + return i.Finalize(p) + }, + }, + { + name: "input previous outpoint hash changed", + expectedErr: "inputs differ from verified PSBT: " + + "previous outpoint of input 0 is different", + doFinalize: func(amt int64, p *psbt.Packet, + i *PsbtIntent) error { + + prevout := &p.UnsignedTx.TxIn[0].PreviousOutPoint + prevout.Hash = chainhash.Hash{77, 88, 99, 11} + return i.Finalize(p) + }, + }, + } + + // Create a simple assembler and ask it to provision a channel to get + // the funding intent. + a := NewPsbtAssembler(chanCapacity, nil, ¶ms) + intent, err := a.ProvisionChannel(&Request{LocalAmt: chanCapacity}) + if err != nil { + t.Fatalf("error provisioning channel: %v", err) + } + psbtIntent := intent.(*PsbtIntent) + + // Bind our test keys to get the funding parameters. + _, localPubkey := btcec.PrivKeyFromBytes(btcec.S256(), localPrivkey) + _, remotePubkey := btcec.PrivKeyFromBytes(btcec.S256(), remotePrivkey) + psbtIntent.BindKeys( + &keychain.KeyDescriptor{PubKey: localPubkey}, remotePubkey, + ) + + // Loop through all our test cases. + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + // Reset the state from a previous test and create a new + // pending PSBT that we can manipulate. + psbtIntent.State = PsbtOutputKnown + _, amt, pendingPsbt, err := psbtIntent.FundingParams() + if err != nil { + t.Fatalf("unable to get funding params: %v", err) + } + + // We need to have a simulated transaction here that is + // fully funded and signed. + pendingPsbt.UnsignedTx.TxIn = []*wire.TxIn{{ + PreviousOutPoint: wire.OutPoint{ + Index: 1, + Hash: chainhash.Hash{1, 2, 3}, + }, + }} + pendingPsbt.Inputs = []psbt.PInput{{ + WitnessUtxo: &wire.TxOut{ + Value: int64(chanCapacity) + 1, + PkScript: []byte{1, 2, 3}, + }, + FinalScriptWitness: []byte{0x01, 0x00}, + }} + err = psbtIntent.Verify(pendingPsbt) + if err != nil { + t.Fatalf("error verifying PSBT: %v", err) + } + + // Deep clone the PSBT so we don't modify the pending + // one that was registered during Verify. + pendingPsbt = clonePsbt(t, pendingPsbt) + + err = tc.doFinalize(amt, pendingPsbt, psbtIntent) + if (err == nil && tc.expectedErr != "") || + (err != nil && err.Error() != tc.expectedErr) { + + t.Fatalf("unexpected error, got '%v' wanted "+ + "'%v'", err, tc.expectedErr) + } + }) + } +} + +// clonePsbt creates a clone of a PSBT packet by serializing then de-serializing +// it. +func clonePsbt(t *testing.T, p *psbt.Packet) *psbt.Packet { + var buf bytes.Buffer + err := p.Serialize(&buf) + if err != nil { + t.Fatalf("error serializing PSBT: %v", err) + } + newPacket, err := psbt.NewFromRawBytes(&buf, false) + if err != nil { + t.Fatalf("error unserializing PSBT: %v", err) + } + return newPacket +} diff --git a/lnwallet/chanfunding/wallet_assembler.go b/lnwallet/chanfunding/wallet_assembler.go new file mode 100644 index 0000000000..554654feae --- /dev/null +++ b/lnwallet/chanfunding/wallet_assembler.go @@ -0,0 +1,343 @@ +package chanfunding + +import ( + "math" + + "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/txscript" + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" + "github.com/btcsuite/btcutil/txsort" + "github.com/lightningnetwork/lnd/input" + "github.com/lightningnetwork/lnd/keychain" +) + +// FullIntent is an intent that is fully backed by the internal wallet. This +// intent differs from the ShimIntent, in that the funding transaction will be +// constructed internally, and will consist of only inputs we wholly control. +// This Intent implements a basic state machine that must be executed in order +// before CompileFundingTx can be called. +// +// Steps to final channel provisioning: +// 1. Call BindKeys to notify the intent which keys to use when constructing +// the multi-sig output. +// 2. Call CompileFundingTx afterwards to obtain the funding transaction. +// +// If either of these steps fail, then the Cancel method MUST be called. +type FullIntent struct { + ShimIntent + + // InputCoins are the set of coins selected as inputs to this funding + // transaction. + InputCoins []Coin + + // ChangeOutputs are the set of outputs that the Assembler will use as + // change from the main funding transaction. + ChangeOutputs []*wire.TxOut + + // coinLocker is the Assembler's instance of the OutpointLocker + // interface. + coinLocker OutpointLocker + + // coinSource is the Assembler's instance of the CoinSource interface. + coinSource CoinSource + + // signer is the Assembler's instance of the Singer interface. + signer input.Signer +} + +// BindKeys is a method unique to the FullIntent variant. This allows the +// caller to decide precisely which keys are used in the final funding +// transaction. This is kept out of the main Assembler as these may may not +// necessarily be under full control of the wallet. Only after this method has +// been executed will CompileFundingTx succeed. +func (f *FullIntent) BindKeys(localKey *keychain.KeyDescriptor, + remoteKey *btcec.PublicKey) { + + f.localKey = localKey + f.remoteKey = remoteKey +} + +// CompileFundingTx is to be called after BindKeys on the sub-intent has been +// called. This method will construct the final funding transaction, and fully +// sign all inputs that are known by the backing CoinSource. After this method +// returns, the Intent is assumed to be complete, as the output can be created +// at any point. +func (f *FullIntent) CompileFundingTx(extraInputs []*wire.TxIn, + extraOutputs []*wire.TxOut) (*wire.MsgTx, error) { + + // Create a blank, fresh transaction. Soon to be a complete funding + // transaction which will allow opening a lightning channel. + fundingTx := wire.NewMsgTx(2) + + // Add all multi-party inputs and outputs to the transaction. + for _, coin := range f.InputCoins { + fundingTx.AddTxIn(&wire.TxIn{ + PreviousOutPoint: coin.OutPoint, + }) + } + for _, theirInput := range extraInputs { + fundingTx.AddTxIn(theirInput) + } + for _, ourChangeOutput := range f.ChangeOutputs { + fundingTx.AddTxOut(ourChangeOutput) + } + for _, theirChangeOutput := range extraOutputs { + fundingTx.AddTxOut(theirChangeOutput) + } + + _, fundingOutput, err := f.FundingOutput() + if err != nil { + return nil, err + } + + // Sort the transaction. Since both side agree to a canonical ordering, + // by sorting we no longer need to send the entire transaction. Only + // signatures will be exchanged. + fundingTx.AddTxOut(fundingOutput) + txsort.InPlaceSort(fundingTx) + + // Now that the funding tx has been fully assembled, we'll locate the + // index of the funding output so we can create our final channel + // point. + _, multiSigIndex := input.FindScriptOutputIndex( + fundingTx, fundingOutput.PkScript, + ) + + // Next, sign all inputs that are ours, collecting the signatures in + // order of the inputs. + signDesc := input.SignDescriptor{ + HashType: txscript.SigHashAll, + SigHashes: txscript.NewTxSigHashes(fundingTx), + } + for i, txIn := range fundingTx.TxIn { + // We can only sign this input if it's ours, so we'll ask the + // coin source if it can map this outpoint into a coin we own. + // If not, then we'll continue as it isn't our input. + info, err := f.coinSource.CoinFromOutPoint( + txIn.PreviousOutPoint, + ) + if err != nil { + continue + } + + // Now that we know the input is ours, we'll populate the + // signDesc with the per input unique information. + signDesc.Output = &wire.TxOut{ + Value: info.Value, + PkScript: info.PkScript, + } + signDesc.InputIndex = i + + // Finally, we'll sign the input as is, and populate the input + // with the witness and sigScript (if needed). + inputScript, err := f.signer.ComputeInputScript( + fundingTx, &signDesc, + ) + if err != nil { + return nil, err + } + + txIn.SignatureScript = inputScript.SigScript + txIn.Witness = inputScript.Witness + } + + // Finally, we'll populate the chanPoint now that we've fully + // constructed the funding transaction. + f.chanPoint = &wire.OutPoint{ + Hash: fundingTx.TxHash(), + Index: multiSigIndex, + } + + return fundingTx, nil +} + +// Cancel allows the caller to cancel a funding Intent at any time. This will +// return any resources such as coins back to the eligible pool to be used in +// order channel fundings. +// +// NOTE: Part of the chanfunding.Intent interface. +func (f *FullIntent) Cancel() { + for _, coin := range f.InputCoins { + f.coinLocker.UnlockOutpoint(coin.OutPoint) + } + + f.ShimIntent.Cancel() +} + +// A compile-time check to ensure FullIntent meets the Intent interface. +var _ Intent = (*FullIntent)(nil) + +// WalletConfig is the main config of the WalletAssembler. +type WalletConfig struct { + // CoinSource is what the WalletAssembler uses to list/locate coins. + CoinSource CoinSource + + // CoinSelectionLocker allows the WalletAssembler to gain exclusive + // access to the current set of coins returned by the CoinSource. + CoinSelectLocker CoinSelectionLocker + + // CoinLocker is what the WalletAssembler uses to lock coins that may + // be used as inputs for a new funding transaction. + CoinLocker OutpointLocker + + // Signer allows the WalletAssembler to sign inputs on any potential + // funding transactions. + Signer input.Signer + + // DustLimit is the current dust limit. We'll use this to ensure that + // we don't make dust outputs on the funding transaction. + DustLimit btcutil.Amount +} + +// WalletAssembler is an instance of the Assembler interface that is backed by +// a full wallet. This variant of the Assembler interface will produce the +// entirety of the funding transaction within the wallet. This implements the +// typical funding flow that is initiated either on the p2p level or using the +// CLi. +type WalletAssembler struct { + cfg WalletConfig +} + +// NewWalletAssembler creates a new instance of the WalletAssembler from a +// fully populated wallet config. +func NewWalletAssembler(cfg WalletConfig) *WalletAssembler { + return &WalletAssembler{ + cfg: cfg, + } +} + +// ProvisionChannel is the main entry point to begin a funding workflow given a +// fully populated request. The internal WalletAssembler will perform coin +// selection in a goroutine safe manner, returning an Intent that will allow +// the caller to finalize the funding process. +// +// NOTE: To cancel the funding flow the Cancel() method on the returned Intent, +// MUST be called. +// +// NOTE: This is a part of the chanfunding.Assembler interface. +func (w *WalletAssembler) ProvisionChannel(r *Request) (Intent, error) { + var intent Intent + + // We hold the coin select mutex while querying for outputs, and + // performing coin selection in order to avoid inadvertent double + // spends across funding transactions. + err := w.cfg.CoinSelectLocker.WithCoinSelectLock(func() error { + log.Infof("Performing funding tx coin selection using %v "+ + "sat/kw as fee rate", int64(r.FeeRate)) + + // Find all unlocked unspent witness outputs that satisfy the + // minimum number of confirmations required. + coins, err := w.cfg.CoinSource.ListCoins( + r.MinConfs, math.MaxInt32, + ) + if err != nil { + return err + } + + var ( + selectedCoins []Coin + localContributionAmt btcutil.Amount + changeAmt btcutil.Amount + ) + + // Perform coin selection over our available, unlocked unspent + // outputs in order to find enough coins to meet the funding + // amount requirements. + switch { + // If there's no funding amount at all (receiving an inbound + // single funder request), then we don't need to perform any + // coin selection at all. + case r.LocalAmt == 0: + break + + // In case this request want the fees subtracted from the local + // amount, we'll call the specialized method for that. This + // ensures that we won't deduct more that the specified balance + // from our wallet. + case r.SubtractFees: + dustLimit := w.cfg.DustLimit + selectedCoins, localContributionAmt, changeAmt, err = CoinSelectSubtractFees( + r.FeeRate, r.LocalAmt, dustLimit, coins, + ) + if err != nil { + return err + } + + // Otherwise do a normal coin selection where we target a given + // funding amount. + default: + localContributionAmt = r.LocalAmt + selectedCoins, changeAmt, err = CoinSelect( + r.FeeRate, r.LocalAmt, coins, + ) + if err != nil { + return err + } + } + + // Record any change output(s) generated as a result of the + // coin selection, but only if the addition of the output won't + // lead to the creation of dust. + var changeOutput *wire.TxOut + if changeAmt != 0 && changeAmt > w.cfg.DustLimit { + changeAddr, err := r.ChangeAddr() + if err != nil { + return err + } + changeScript, err := txscript.PayToAddrScript(changeAddr) + if err != nil { + return err + } + + changeOutput = &wire.TxOut{ + Value: int64(changeAmt), + PkScript: changeScript, + } + } + + // Lock the selected coins. These coins are now "reserved", + // this prevents concurrent funding requests from referring to + // and this double-spending the same set of coins. + for _, coin := range selectedCoins { + outpoint := coin.OutPoint + + w.cfg.CoinLocker.LockOutpoint(outpoint) + } + + newIntent := &FullIntent{ + ShimIntent: ShimIntent{ + localFundingAmt: localContributionAmt, + remoteFundingAmt: r.RemoteAmt, + }, + InputCoins: selectedCoins, + coinLocker: w.cfg.CoinLocker, + coinSource: w.cfg.CoinSource, + signer: w.cfg.Signer, + } + + if changeOutput != nil { + newIntent.ChangeOutputs = []*wire.TxOut{changeOutput} + } + + intent = newIntent + + return nil + }) + if err != nil { + return nil, err + } + + return intent, nil +} + +// FundingTxAvailable is an empty method that an assembler can implement to +// signal to callers that its able to provide the funding transaction for the +// channel via the intent it returns. +// +// NOTE: This method is a part of the FundingTxAssembler interface. +func (w *WalletAssembler) FundingTxAvailable() {} + +// A compile-time assertion to ensure the WalletAssembler meets the +// FundingTxAssembler interface. +var _ FundingTxAssembler = (*WalletAssembler)(nil) diff --git a/lnwallet/channel.go b/lnwallet/channel.go index 53b1613686..600820f2cb 100644 --- a/lnwallet/channel.go +++ b/lnwallet/channel.go @@ -4,6 +4,7 @@ import ( "bytes" "container/list" "crypto/sha256" + "errors" "fmt" "math" "sort" @@ -14,13 +15,15 @@ import ( "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btclog" "github.com/btcsuite/btcutil" "github.com/btcsuite/btcutil/txsort" "github.com/davecgh/go-spew/spew" - + "github.com/lightningnetwork/lnd/build" "github.com/lightningnetwork/lnd/chainntnfs" "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/input" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" "github.com/lightningnetwork/lnd/lnwire" ) @@ -64,6 +67,10 @@ var ( ErrBelowMinHTLC = fmt.Errorf("proposed HTLC value is below minimum " + "allowed HTLC value") + // ErrInvalidHTLCAmt signals that a proposed HTLC has a value that is + // not positive. + ErrInvalidHTLCAmt = fmt.Errorf("proposed HTLC value must be positive") + // ErrCannotSyncCommitChains is returned if, upon receiving a ChanSync // message, the state machine deems that is unable to properly // synchronize states with the remote peer. In this case we should fail @@ -500,7 +507,8 @@ type commitment struct { // evaluating all the add/remove/settle log entries before the listed // indexes. // - // NOTE: This is the balance *before* subtracting any commitment fee. + // NOTE: This is the balance *after* subtracting any commitment fee, + // AND anchor output values. ourBalance lnwire.MilliSatoshi theirBalance lnwire.MilliSatoshi @@ -511,7 +519,7 @@ type commitment struct { // feePerKw is the fee per kw used to calculate this commitment // transaction's fee. - feePerKw SatPerKWeight + feePerKw chainfee.SatPerKWeight // dustLimit is the limit on the commitment transaction such that no // output values should be below this amount. @@ -545,7 +553,7 @@ type commitment struct { // transition. This ensures that we don't assign multiple HTLC's to the same // index within the commitment transaction. func locateOutputIndex(p *PaymentDescriptor, tx *wire.MsgTx, ourCommit bool, - dups map[PaymentHash][]int32) (int32, error) { + dups map[PaymentHash][]int32, cltvs []uint32) (int32, error) { // Checks to see if element (e) exists in slice (s). contains := func(s []int32, e int32) bool { @@ -567,8 +575,11 @@ func locateOutputIndex(p *PaymentDescriptor, tx *wire.MsgTx, ourCommit bool, } for i, txOut := range tx.TxOut { + cltv := cltvs[i] + if bytes.Equal(txOut.PkScript, pkScript) && - txOut.Value == int64(p.Amount.ToSatoshis()) { + txOut.Value == int64(p.Amount.ToSatoshis()) && + cltv == p.Timeout { // If this payment hash and index has already been // found, then we'll continue in order to avoid any @@ -583,8 +594,8 @@ func locateOutputIndex(p *PaymentDescriptor, tx *wire.MsgTx, ourCommit bool, } } - return 0, fmt.Errorf("unable to find htlc: script=%x, value=%v", - pkScript, p.Amount) + return 0, fmt.Errorf("unable to find htlc: script=%x, value=%v, "+ + "cltv=%v", pkScript, p.Amount, p.Timeout) } // populateHtlcIndexes modifies the set of HTLC's locked-into the target view @@ -592,7 +603,9 @@ func locateOutputIndex(p *PaymentDescriptor, tx *wire.MsgTx, ourCommit bool, // we need to keep track of the indexes of each HTLC in order to properly write // the current state to disk, and also to locate the PaymentDescriptor // corresponding to HTLC outputs in the commitment transaction. -func (c *commitment) populateHtlcIndexes() error { +func (c *commitment) populateHtlcIndexes(chanType channeldb.ChannelType, + cltvs []uint32) error { + // First, we'll set up some state to allow us to locate the output // index of the all the HTLC's within the commitment transaction. We // must keep this index so we can validate the HTLC signatures sent to @@ -604,8 +617,10 @@ func (c *commitment) populateHtlcIndexes() error { // populateIndex is a helper function that populates the necessary // indexes within the commitment view for a particular HTLC. populateIndex := func(htlc *PaymentDescriptor, incoming bool) error { - isDust := htlcIsDust(incoming, c.isOurs, c.feePerKw, - htlc.Amount.ToSatoshis(), c.dustLimit) + isDust := htlcIsDust( + chanType, incoming, c.isOurs, c.feePerKw, + htlc.Amount.ToSatoshis(), c.dustLimit, + ) var err error switch { @@ -626,7 +641,7 @@ func (c *commitment) populateHtlcIndexes() error { // signatures. case c.isOurs: htlc.localOutputIndex, err = locateOutputIndex( - htlc, c.txn, c.isOurs, dups, + htlc, c.txn, c.isOurs, dups, cltvs, ) if err != nil { return err @@ -647,7 +662,7 @@ func (c *commitment) populateHtlcIndexes() error { // index within the HTLC index. case !c.isOurs: htlc.remoteOutputIndex, err = locateOutputIndex( - htlc, c.txn, c.isOurs, dups, + htlc, c.txn, c.isOurs, dups, cltvs, ) if err != nil { return err @@ -758,9 +773,10 @@ func (c *commitment) toDiskCommit(ourCommit bool) *channeldb.ChannelCommitment { // commitment struct and updateLog. This function is used when we need to // restore commitment state written do disk back into memory once we need to // restart a channel session. -func (lc *LightningChannel) diskHtlcToPayDesc(feeRate SatPerKWeight, +func (lc *LightningChannel) diskHtlcToPayDesc(feeRate chainfee.SatPerKWeight, commitHeight uint64, htlc *channeldb.HTLC, localCommitKeys, - remoteCommitKeys *CommitmentKeyRing) (PaymentDescriptor, error) { + remoteCommitKeys *CommitmentKeyRing, isLocal bool) (PaymentDescriptor, + error) { // The proper pkScripts for this PaymentDescriptor must be // generated so we can easily locate them within the commitment @@ -770,6 +786,7 @@ func (lc *LightningChannel) diskHtlcToPayDesc(feeRate SatPerKWeight, ourWitnessScript, theirWitnessScript []byte pd PaymentDescriptor err error + chanType = lc.channelState.ChanType ) // If the either outputs is dust from the local or remote node's @@ -777,27 +794,46 @@ func (lc *LightningChannel) diskHtlcToPayDesc(feeRate SatPerKWeight, // generate them in order to locate the outputs within the commitment // transaction. As we'll mark dust with a special output index in the // on-disk state snapshot. - isDustLocal := htlcIsDust(htlc.Incoming, true, feeRate, - htlc.Amt.ToSatoshis(), lc.channelState.LocalChanCfg.DustLimit) + isDustLocal := htlcIsDust( + chanType, htlc.Incoming, true, feeRate, + htlc.Amt.ToSatoshis(), lc.channelState.LocalChanCfg.DustLimit, + ) if !isDustLocal && localCommitKeys != nil { ourP2WSH, ourWitnessScript, err = genHtlcScript( - htlc.Incoming, true, htlc.RefundTimeout, htlc.RHash, - localCommitKeys) + chanType, htlc.Incoming, true, htlc.RefundTimeout, + htlc.RHash, localCommitKeys, + ) if err != nil { return pd, err } } - isDustRemote := htlcIsDust(htlc.Incoming, false, feeRate, - htlc.Amt.ToSatoshis(), lc.channelState.RemoteChanCfg.DustLimit) + isDustRemote := htlcIsDust( + chanType, htlc.Incoming, false, feeRate, + htlc.Amt.ToSatoshis(), lc.channelState.RemoteChanCfg.DustLimit, + ) if !isDustRemote && remoteCommitKeys != nil { theirP2WSH, theirWitnessScript, err = genHtlcScript( - htlc.Incoming, false, htlc.RefundTimeout, htlc.RHash, - remoteCommitKeys) + chanType, htlc.Incoming, false, htlc.RefundTimeout, + htlc.RHash, remoteCommitKeys, + ) if err != nil { return pd, err } } + // Reconstruct the proper local/remote output indexes from the HTLC's + // persisted output index depending on whose commitment we are + // generating. + var ( + localOutputIndex int32 + remoteOutputIndex int32 + ) + if isLocal { + localOutputIndex = htlc.OutputIndex + } else { + remoteOutputIndex = htlc.OutputIndex + } + // With the scripts reconstructed (depending on if this is our commit // vs theirs or a pending commit for the remote party), we can now // re-create the original payment descriptor. @@ -809,6 +845,8 @@ func (lc *LightningChannel) diskHtlcToPayDesc(feeRate SatPerKWeight, HtlcIndex: htlc.HtlcIndex, LogIndex: htlc.LogIndex, OnionBlob: htlc.OnionBlob, + localOutputIndex: localOutputIndex, + remoteOutputIndex: remoteOutputIndex, ourPkScript: ourP2WSH, ourWitnessScript: ourWitnessScript, theirPkScript: theirP2WSH, @@ -823,8 +861,9 @@ func (lc *LightningChannel) diskHtlcToPayDesc(feeRate SatPerKWeight, // these payment descriptors can be re-inserted into the in-memory updateLog // for each side. func (lc *LightningChannel) extractPayDescs(commitHeight uint64, - feeRate SatPerKWeight, htlcs []channeldb.HTLC, localCommitKeys, - remoteCommitKeys *CommitmentKeyRing) ([]PaymentDescriptor, []PaymentDescriptor, error) { + feeRate chainfee.SatPerKWeight, htlcs []channeldb.HTLC, localCommitKeys, + remoteCommitKeys *CommitmentKeyRing, isLocal bool) ([]PaymentDescriptor, + []PaymentDescriptor, error) { var ( incomingHtlcs []PaymentDescriptor @@ -842,6 +881,7 @@ func (lc *LightningChannel) extractPayDescs(commitHeight uint64, payDesc, err := lc.diskHtlcToPayDesc( feeRate, commitHeight, &htlc, localCommitKeys, remoteCommitKeys, + isLocal, ) if err != nil { return incomingHtlcs, outgoingHtlcs, err @@ -864,12 +904,6 @@ func (lc *LightningChannel) diskCommitToMemCommit(isLocal bool, diskCommit *channeldb.ChannelCommitment, localCommitPoint, remoteCommitPoint *btcec.PublicKey) (*commitment, error) { - // If this commit is tweakless, then it'll affect the way we derive our - // keys, which will affect the commitment transaction reconstruction. - // So we'll determine this first, before we do anything else. - tweaklessCommit := (lc.channelState.ChanType == - channeldb.SingleFunderTweakless) - // First, we'll need to re-derive the commitment key ring for each // party used within this particular state. If this is a pending commit // (we extended but weren't able to complete the commitment dance @@ -878,14 +912,16 @@ func (lc *LightningChannel) diskCommitToMemCommit(isLocal bool, var localCommitKeys, remoteCommitKeys *CommitmentKeyRing if localCommitPoint != nil { localCommitKeys = DeriveCommitmentKeys( - localCommitPoint, true, tweaklessCommit, - lc.localChanCfg, lc.remoteChanCfg, + localCommitPoint, true, lc.channelState.ChanType, + &lc.channelState.LocalChanCfg, + &lc.channelState.RemoteChanCfg, ) } if remoteCommitPoint != nil { remoteCommitKeys = DeriveCommitmentKeys( - remoteCommitPoint, false, tweaklessCommit, - lc.localChanCfg, lc.remoteChanCfg, + remoteCommitPoint, false, lc.channelState.ChanType, + &lc.channelState.LocalChanCfg, + &lc.channelState.RemoteChanCfg, ) } @@ -893,8 +929,10 @@ func (lc *LightningChannel) diskCommitToMemCommit(isLocal bool, // HTLC"s into PaymentDescriptor's so we can re-insert them into our // update log. incomingHtlcs, outgoingHtlcs, err := lc.extractPayDescs( - diskCommit.CommitHeight, SatPerKWeight(diskCommit.FeePerKw), + diskCommit.CommitHeight, + chainfee.SatPerKWeight(diskCommit.FeePerKw), diskCommit.Htlcs, localCommitKeys, remoteCommitKeys, + isLocal, ) if err != nil { return nil, err @@ -914,7 +952,7 @@ func (lc *LightningChannel) diskCommitToMemCommit(isLocal bool, txn: diskCommit.CommitTx, sig: diskCommit.CommitSig, fee: diskCommit.CommitFee, - feePerKw: SatPerKWeight(diskCommit.FeePerKw), + feePerKw: chainfee.SatPerKWeight(diskCommit.FeePerKw), incomingHTLCs: incomingHtlcs, outgoingHTLCs: outgoingHtlcs, } @@ -924,130 +962,9 @@ func (lc *LightningChannel) diskCommitToMemCommit(isLocal bool, commit.dustLimit = lc.channelState.RemoteChanCfg.DustLimit } - // Finally, we'll re-populate the HTLC index for this state so we can - // properly locate each HTLC within the commitment transaction. - if err := commit.populateHtlcIndexes(); err != nil { - return nil, err - } - return commit, nil } -// CommitmentKeyRing holds all derived keys needed to construct commitment and -// HTLC transactions. The keys are derived differently depending whether the -// commitment transaction is ours or the remote peer's. Private keys associated -// with each key may belong to the commitment owner or the "other party" which -// is referred to in the field comments, regardless of which is local and which -// is remote. -type CommitmentKeyRing struct { - // commitPoint is the "per commitment point" used to derive the tweak - // for each base point. - CommitPoint *btcec.PublicKey - - // LocalCommitKeyTweak is the tweak used to derive the local public key - // from the local payment base point or the local private key from the - // base point secret. This may be included in a SignDescriptor to - // generate signatures for the local payment key. - LocalCommitKeyTweak []byte - - // TODO(roasbeef): need delay tweak as well? - - // LocalHtlcKeyTweak is the teak used to derive the local HTLC key from - // the local HTLC base point. This value is needed in order to - // derive the final key used within the HTLC scripts in the commitment - // transaction. - LocalHtlcKeyTweak []byte - - // LocalHtlcKey is the key that will be used in the "to self" clause of - // any HTLC scripts within the commitment transaction for this key ring - // set. - LocalHtlcKey *btcec.PublicKey - - // RemoteHtlcKey is the key that will be used in clauses within the - // HTLC script that send money to the remote party. - RemoteHtlcKey *btcec.PublicKey - - // DelayKey is the commitment transaction owner's key which is included - // in HTLC success and timeout transaction scripts. - DelayKey *btcec.PublicKey - - // NoDelayKey is the other party's payment key in the commitment tx. - // This is the key used to generate the unencumbered output within the - // commitment transaction. - NoDelayKey *btcec.PublicKey - - // RevocationKey is the key that can be used by the other party to - // redeem outputs from a revoked commitment transaction if it were to - // be published. - RevocationKey *btcec.PublicKey -} - -// DeriveCommitmentKey generates a new commitment key set using the base points -// and commitment point. The keys are derived differently depending whether the -// commitment transaction is ours or the remote peer's. -func DeriveCommitmentKeys(commitPoint *btcec.PublicKey, - isOurCommit, tweaklessCommit bool, - localChanCfg, remoteChanCfg *channeldb.ChannelConfig) *CommitmentKeyRing { - - // First, we'll derive all the keys that don't depend on the context of - // whose commitment transaction this is. - keyRing := &CommitmentKeyRing{ - CommitPoint: commitPoint, - - LocalCommitKeyTweak: input.SingleTweakBytes( - commitPoint, localChanCfg.PaymentBasePoint.PubKey, - ), - LocalHtlcKeyTweak: input.SingleTweakBytes( - commitPoint, localChanCfg.HtlcBasePoint.PubKey, - ), - LocalHtlcKey: input.TweakPubKey( - localChanCfg.HtlcBasePoint.PubKey, commitPoint, - ), - RemoteHtlcKey: input.TweakPubKey( - remoteChanCfg.HtlcBasePoint.PubKey, commitPoint, - ), - } - - // We'll now compute the delay, no delay, and revocation key based on - // the current commitment point. All keys are tweaked each state in - // order to ensure the keys from each state are unlinkable. To create - // the revocation key, we take the opposite party's revocation base - // point and combine that with the current commitment point. - var ( - delayBasePoint *btcec.PublicKey - noDelayBasePoint *btcec.PublicKey - revocationBasePoint *btcec.PublicKey - ) - if isOurCommit { - delayBasePoint = localChanCfg.DelayBasePoint.PubKey - noDelayBasePoint = remoteChanCfg.PaymentBasePoint.PubKey - revocationBasePoint = remoteChanCfg.RevocationBasePoint.PubKey - } else { - delayBasePoint = remoteChanCfg.DelayBasePoint.PubKey - noDelayBasePoint = localChanCfg.PaymentBasePoint.PubKey - revocationBasePoint = localChanCfg.RevocationBasePoint.PubKey - } - - // With the base points assigned, we can now derive the actual keys - // using the base point, and the current commitment tweak. - keyRing.DelayKey = input.TweakPubKey(delayBasePoint, commitPoint) - keyRing.RevocationKey = input.DeriveRevocationPubkey( - revocationBasePoint, commitPoint, - ) - - // If this commitment should omit the tweak for the remote point, then - // we'll use that directly, and ignore the commitPoint tweak. - if tweaklessCommit { - keyRing.NoDelayKey = noDelayBasePoint - } else { - keyRing.NoDelayKey = input.TweakPubKey( - noDelayBasePoint, commitPoint, - ) - } - - return keyRing -} - // commitmentChain represents a chain of unrevoked commitments. The tail of the // chain is the latest fully signed, yet unrevoked commitment. Two chains are // tracked, one for the local node, and another for the remote node. New @@ -1175,6 +1092,13 @@ func (u *updateLog) appendUpdate(pd *PaymentDescriptor) { u.logIndex++ } +// restoreUpdate appends a new update to the tip of the updateLog. The entry is +// also added to index accordingly. This function differs from appendUpdate in +// that it won't increment the log index counter. +func (u *updateLog) restoreUpdate(pd *PaymentDescriptor) { + u.updateIndex[pd.LogIndex] = u.PushBack(pd) +} + // appendHtlc appends a new HTLC offer to the tip of the update log. The entry // is also added to the offer index accordingly. func (u *updateLog) appendHtlc(pd *PaymentDescriptor) { @@ -1337,10 +1261,6 @@ type LightningChannel struct { // Capacity is the total capacity of this channel. Capacity btcutil.Amount - // stateHintObfuscator is a 48-bit state hint that's used to obfuscate - // the current state number on the commitment transactions. - stateHintObfuscator [StateHintSize]byte - // currentHeight is the current height of our local commitment chain. // This is also the same as the number of updates to the channel we've // accepted. @@ -1358,9 +1278,7 @@ type LightningChannel struct { channelState *channeldb.OpenChannel - localChanCfg *channeldb.ChannelConfig - - remoteChanCfg *channeldb.ChannelConfig + commitBuilder *CommitmentBuilder // [local|remote]Log is a (mostly) append-only log storing all the HTLC // updates to this channel. The log is walked backwards as HTLC updates @@ -1378,6 +1296,9 @@ type LightningChannel struct { // channel. RemoteFundingKey *btcec.PublicKey + // log is a channel-specific logging instance. + log btclog.Logger + sync.RWMutex } @@ -1402,6 +1323,8 @@ func NewLightningChannel(signer input.Signer, localCommit.RemoteLogIndex, localCommit.RemoteHtlcIndex, ) + logPrefix := fmt.Sprintf("ChannelPoint(%v):", state.FundingOutpoint) + lc := &LightningChannel{ Signer: signer, sigPool: sigPool, @@ -1409,14 +1332,14 @@ func NewLightningChannel(signer input.Signer, remoteCommitChain: newCommitmentChain(), localCommitChain: newCommitmentChain(), channelState: state, - localChanCfg: &state.LocalChanCfg, - remoteChanCfg: &state.RemoteChanCfg, + commitBuilder: NewCommitmentBuilder(state), localUpdateLog: localUpdateLog, remoteUpdateLog: remoteUpdateLog, ChanPoint: &state.FundingOutpoint, Capacity: state.Capacity, LocalFundingKey: state.LocalChanCfg.MultiSigKey.PubKey, RemoteFundingKey: state.RemoteChanCfg.MultiSigKey.PubKey, + log: build.NewPrefixLog(logPrefix, walletLog), } // With the main channel struct reconstructed, we'll now restore the @@ -1433,16 +1356,16 @@ func NewLightningChannel(signer input.Signer, return nil, err } - lc.createStateHintObfuscator() - return lc, nil } // createSignDesc derives the SignDescriptor for commitment transactions from // other fields on the LightningChannel. func (lc *LightningChannel) createSignDesc() error { - localKey := lc.localChanCfg.MultiSigKey.PubKey.SerializeCompressed() - remoteKey := lc.remoteChanCfg.MultiSigKey.PubKey.SerializeCompressed() + localKey := lc.channelState.LocalChanCfg.MultiSigKey.PubKey. + SerializeCompressed() + remoteKey := lc.channelState.RemoteChanCfg.MultiSigKey.PubKey. + SerializeCompressed() multiSigScript, err := input.GenMultiSigScript(localKey, remoteKey) if err != nil { @@ -1454,7 +1377,7 @@ func (lc *LightningChannel) createSignDesc() error { return err } lc.signDesc = &input.SignDescriptor{ - KeyDesc: lc.localChanCfg.MultiSigKey, + KeyDesc: lc.channelState.LocalChanCfg.MultiSigKey, WitnessScript: multiSigScript, Output: &wire.TxOut{ PkScript: fundingPkScript, @@ -1467,24 +1390,6 @@ func (lc *LightningChannel) createSignDesc() error { return nil } -// createStateHintObfuscator derives and assigns the state hint obfuscator for -// the channel, which is used to encode the commitment height in the sequence -// number of commitment transaction inputs. -func (lc *LightningChannel) createStateHintObfuscator() { - state := lc.channelState - if state.IsInitiator { - lc.stateHintObfuscator = DeriveStateHintObfuscator( - state.LocalChanCfg.PaymentBasePoint.PubKey, - state.RemoteChanCfg.PaymentBasePoint.PubKey, - ) - } else { - lc.stateHintObfuscator = DeriveStateHintObfuscator( - state.RemoteChanCfg.PaymentBasePoint.PubKey, - state.LocalChanCfg.PaymentBasePoint.PubKey, - ) - } -} - // ResetState resets the state of the channel back to the default state. This // ensures that any active goroutines which need to act based on on-chain // events do so properly. @@ -1502,7 +1407,7 @@ func (lc *LightningChannel) ResetState() { // if nothing happened. func (lc *LightningChannel) logUpdateToPayDesc(logUpdate *channeldb.LogUpdate, remoteUpdateLog *updateLog, commitHeight uint64, - feeRate SatPerKWeight, remoteCommitKeys *CommitmentKeyRing, + feeRate chainfee.SatPerKWeight, remoteCommitKeys *CommitmentKeyRing, remoteDustLimit btcutil.Amount) (*PaymentDescriptor, error) { // Depending on the type of update message we'll map that to a distinct @@ -1533,11 +1438,14 @@ func (lc *LightningChannel) logUpdateToPayDesc(logUpdate *channeldb.LogUpdate, pd.OnionBlob = make([]byte, len(wireMsg.OnionBlob)) copy(pd.OnionBlob[:], wireMsg.OnionBlob[:]) - isDustRemote := htlcIsDust(false, false, feeRate, - wireMsg.Amount.ToSatoshis(), remoteDustLimit) + isDustRemote := htlcIsDust( + lc.channelState.ChanType, false, false, feeRate, + wireMsg.Amount.ToSatoshis(), remoteDustLimit, + ) if !isDustRemote { theirP2WSH, theirWitnessScript, err := genHtlcScript( - false, false, wireMsg.Expiry, wireMsg.PaymentHash, + lc.channelState.ChanType, false, false, + wireMsg.Expiry, wireMsg.PaymentHash, remoteCommitKeys, ) if err != nil { @@ -1556,6 +1464,7 @@ func (lc *LightningChannel) logUpdateToPayDesc(logUpdate *channeldb.LogUpdate, pd = &PaymentDescriptor{ Amount: ogHTLC.Amount, + RHash: ogHTLC.RHash, RPreimage: wireMsg.PaymentPreimage, LogIndex: logUpdate.LogIndex, ParentIndex: ogHTLC.HtlcIndex, @@ -1618,6 +1527,110 @@ func (lc *LightningChannel) logUpdateToPayDesc(logUpdate *channeldb.LogUpdate, return pd, nil } +// remoteLogUpdateToPayDesc converts a LogUpdate into a matching +// PaymentDescriptor entry that can be re-inserted into the update log. This +// method is used when we revoked a local commitment, but the connection was +// obstructed before we could sign a remote commitment that contains these +// updates. In this case, we need to re-insert the original entries back into +// the update log so we can resume as if nothing happened. The height of the +// latest local commitment is also expected to be provided. We are restoring all +// log update entries with this height, even though the real commitment height +// may be lower. In the way these fields are used elsewhere, this doesn't change +// anything. +func (lc *LightningChannel) remoteLogUpdateToPayDesc(logUpdate *channeldb.LogUpdate, + localUpdateLog *updateLog, commitHeight uint64) (*PaymentDescriptor, + error) { + + switch wireMsg := logUpdate.UpdateMsg.(type) { + + case *lnwire.UpdateAddHTLC: + pd := &PaymentDescriptor{ + RHash: wireMsg.PaymentHash, + Timeout: wireMsg.Expiry, + Amount: wireMsg.Amount, + EntryType: Add, + HtlcIndex: wireMsg.ID, + LogIndex: logUpdate.LogIndex, + addCommitHeightLocal: commitHeight, + } + pd.OnionBlob = make([]byte, len(wireMsg.OnionBlob)) + copy(pd.OnionBlob, wireMsg.OnionBlob[:]) + + // We don't need to generate an htlc script yet. This will be + // done once we sign our remote commitment. + + return pd, nil + + // For HTLCs that the remote party settled, we'll fetch the original + // offered HTLC from the local update log so we can retrieve the same + // PaymentDescriptor that ReceiveHTLCSettle would produce. + case *lnwire.UpdateFulfillHTLC: + ogHTLC := localUpdateLog.lookupHtlc(wireMsg.ID) + + return &PaymentDescriptor{ + Amount: ogHTLC.Amount, + RHash: ogHTLC.RHash, + RPreimage: wireMsg.PaymentPreimage, + LogIndex: logUpdate.LogIndex, + ParentIndex: ogHTLC.HtlcIndex, + EntryType: Settle, + removeCommitHeightLocal: commitHeight, + }, nil + + // If we received a failure for a prior outgoing HTLC, then we'll + // consult the local update log so we can retrieve the information of + // the original HTLC we're failing. + case *lnwire.UpdateFailHTLC: + ogHTLC := localUpdateLog.lookupHtlc(wireMsg.ID) + + return &PaymentDescriptor{ + Amount: ogHTLC.Amount, + RHash: ogHTLC.RHash, + ParentIndex: ogHTLC.HtlcIndex, + LogIndex: logUpdate.LogIndex, + EntryType: Fail, + FailReason: wireMsg.Reason[:], + removeCommitHeightLocal: commitHeight, + }, nil + + // HTLC fails due to malformed onion blobs are treated the exact same + // way as regular HTLC fails. + case *lnwire.UpdateFailMalformedHTLC: + ogHTLC := localUpdateLog.lookupHtlc(wireMsg.ID) + + return &PaymentDescriptor{ + Amount: ogHTLC.Amount, + RHash: ogHTLC.RHash, + ParentIndex: ogHTLC.HtlcIndex, + LogIndex: logUpdate.LogIndex, + EntryType: MalformedFail, + FailCode: wireMsg.FailureCode, + ShaOnionBlob: wireMsg.ShaOnionBlob, + removeCommitHeightLocal: commitHeight, + }, nil + + // For fee updates we'll create a FeeUpdate type to add to the log. We + // reuse the amount field to hold the fee rate. Since the amount field + // is denominated in msat we won't lose precision when storing the + // sat/kw denominated feerate. Note that we set both the add and remove + // height to the same value, as we consider the fee update locked in by + // adding and removing it at the same height. + case *lnwire.UpdateFee: + return &PaymentDescriptor{ + LogIndex: logUpdate.LogIndex, + Amount: lnwire.NewMSatFromSatoshis( + btcutil.Amount(wireMsg.FeePerKw), + ), + EntryType: FeeUpdate, + addCommitHeightLocal: commitHeight, + removeCommitHeightLocal: commitHeight, + }, nil + + default: + return nil, errors.New("unknown message type") + } +} + // restoreCommitState will restore the local commitment chain and updateLog // state to a consistent in-memory representation of the passed disk commitment. // This method is to be used upon reconnection to our channel counter party. @@ -1652,8 +1665,8 @@ func (lc *LightningChannel) restoreCommitState( } lc.localCommitChain.addCommitment(localCommit) - walletLog.Debugf("ChannelPoint(%v), starting local commitment: %v", - lc.channelState.FundingOutpoint, newLogClosure(func() string { + lc.log.Debugf("starting local commitment: %v", + newLogClosure(func() string { return spew.Sdump(lc.localCommitChain.tail()) }), ) @@ -1668,8 +1681,8 @@ func (lc *LightningChannel) restoreCommitState( } lc.remoteCommitChain.addCommitment(remoteCommit) - walletLog.Debugf("ChannelPoint(%v), starting remote commitment: %v", - lc.channelState.FundingOutpoint, newLogClosure(func() string { + lc.log.Debugf("starting remote commitment: %v", + newLogClosure(func() string { return spew.Sdump(lc.remoteCommitChain.tail()) }), ) @@ -1703,8 +1716,7 @@ func (lc *LightningChannel) restoreCommitState( } lc.remoteCommitChain.addCommitment(pendingRemoteCommit) - walletLog.Debugf("ChannelPoint(%v), pending remote "+ - "commitment: %v", lc.channelState.FundingOutpoint, + lc.log.Debugf("pending remote commitment: %v", newLogClosure(func() string { return spew.Sdump(lc.remoteCommitChain.tip()) }), @@ -1712,19 +1724,25 @@ func (lc *LightningChannel) restoreCommitState( // We'll also re-create the set of commitment keys needed to // fully re-derive the state. - tweaklessCommit := lc.channelState.ChanType.IsTweakless() pendingRemoteKeyChain = DeriveCommitmentKeys( - pendingCommitPoint, false, tweaklessCommit, - lc.localChanCfg, lc.remoteChanCfg, + pendingCommitPoint, false, lc.channelState.ChanType, + &lc.channelState.LocalChanCfg, &lc.channelState.RemoteChanCfg, ) } + // Fetch remote updates that we have acked but not yet signed for. + unsignedAckedUpdates, err := lc.channelState.UnsignedAckedUpdates() + if err != nil { + return err + } + // Finally, with the commitment states restored, we'll now restore the // state logs based on the current local+remote commit, and any pending // remote commit that exists. err = lc.restoreStateLogs( localCommit, remoteCommit, pendingRemoteCommit, pendingRemoteCommitDiff, pendingRemoteKeyChain, + unsignedAckedUpdates, ) if err != nil { return err @@ -1740,7 +1758,8 @@ func (lc *LightningChannel) restoreCommitState( func (lc *LightningChannel) restoreStateLogs( localCommitment, remoteCommitment, pendingRemoteCommit *commitment, pendingRemoteCommitDiff *channeldb.CommitDiff, - pendingRemoteKeys *CommitmentKeyRing) error { + pendingRemoteKeys *CommitmentKeyRing, + unsignedAckedUpdates []channeldb.LogUpdate) error { // We make a map of incoming HTLCs to the height of the remote // commitment they were first added, and outgoing HTLCs to the height @@ -1806,12 +1825,91 @@ func (lc *LightningChannel) restoreStateLogs( lc.localUpdateLog.restoreHtlc(&htlc) } - // If we didn't have a dangling (un-acked) commit for the remote party, - // then we can exit here. - if pendingRemoteCommit == nil { - return nil + // If we have a dangling (un-acked) commit for the remote party, then we + // restore the updates leading up to this commit. + if pendingRemoteCommit != nil { + err := lc.restorePendingLocalUpdates( + pendingRemoteCommitDiff, pendingRemoteKeys, + ) + if err != nil { + return err + } + } + + // Restore unsigned acked remote log updates so that we can include them + // in our next signature. + err := lc.restorePendingRemoteUpdates( + unsignedAckedUpdates, localCommitment.height, + ) + if err != nil { + return err + } + + return nil +} + +// restorePendingRemoteUpdates restores the acked remote log updates that we +// haven't yet signed for. +func (lc *LightningChannel) restorePendingRemoteUpdates( + unsignedAckedUpdates []channeldb.LogUpdate, + localCommitmentHeight uint64) error { + + lc.log.Debugf("Restoring %v dangling remote updates", + len(unsignedAckedUpdates)) + + for _, logUpdate := range unsignedAckedUpdates { + logUpdate := logUpdate + + payDesc, err := lc.remoteLogUpdateToPayDesc( + &logUpdate, lc.localUpdateLog, localCommitmentHeight, + ) + if err != nil { + return err + } + + // Sanity check that we are not restoring a remote log update + // that we haven't received a sig for. + if payDesc.LogIndex >= lc.remoteUpdateLog.logIndex { + return fmt.Errorf("attempted to restore an "+ + "unsigned remote update: log_index=%v", + payDesc.LogIndex) + } + + // Insert the update into the log. The log update index doesn't + // need to be incremented (hence the restore calls), because its + // final value was properly persisted with the last local + // commitment update. + switch payDesc.EntryType { + case Add: + lc.remoteUpdateLog.restoreHtlc(payDesc) + + // Sanity check to be sure that we are not restoring an + // add update that the remote hasn't signed for yet. + if payDesc.HtlcIndex >= lc.remoteUpdateLog.htlcCounter { + return fmt.Errorf("attempted to restore an "+ + "unsigned remote htlc: htlc_index=%v", + payDesc.HtlcIndex) + } + + case FeeUpdate: + lc.remoteUpdateLog.restoreUpdate(payDesc) + + default: + lc.remoteUpdateLog.restoreUpdate(payDesc) + + lc.localUpdateLog.markHtlcModified(payDesc.ParentIndex) + } } + return nil +} + +// restorePendingLocalUpdates restores the local log updates leading up to the +// given pending remote commitment. +func (lc *LightningChannel) restorePendingLocalUpdates( + pendingRemoteCommitDiff *channeldb.CommitDiff, + pendingRemoteKeys *CommitmentKeyRing) error { + pendingCommit := pendingRemoteCommitDiff.Commitment pendingHeight := pendingCommit.CommitHeight @@ -1820,7 +1918,8 @@ func (lc *LightningChannel) restoreStateLogs( for _, logUpdate := range pendingRemoteCommitDiff.LogUpdates { payDesc, err := lc.logUpdateToPayDesc( &logUpdate, lc.remoteUpdateLog, pendingHeight, - SatPerKWeight(pendingCommit.FeePerKw), pendingRemoteKeys, + chainfee.SatPerKWeight(pendingCommit.FeePerKw), + pendingRemoteKeys, lc.channelState.RemoteChanCfg.DustLimit, ) if err != nil { @@ -1834,7 +1933,7 @@ func (lc *LightningChannel) restoreStateLogs( lc.localUpdateLog.logIndex > 0 { payDesc.LogIndex = lc.localUpdateLog.logIndex - walletLog.Debugf("Found FeeUpdate on "+ + lc.log.Debugf("Found FeeUpdate on "+ "pendingRemoteCommitDiff without logIndex, "+ "using %v", payDesc.LogIndex) } @@ -1944,6 +2043,10 @@ type BreachRetribution struct { // party) within the breach transaction. LocalOutpoint wire.OutPoint + // LocalDelay is the CSV delay for the to_remote script on the breached + // commitment. + LocalDelay uint32 + // RemoteOutputSignDesc is a SignDescriptor which is capable of // generating the signature required to claim the funds as described // within the revocation clause of the remote party's commitment @@ -1957,6 +2060,10 @@ type BreachRetribution struct { // party within the breach transaction. RemoteOutpoint wire.OutPoint + // RemoteDelay specifies the CSV delay applied to to-local scripts on + // the breaching commitment transaction. + RemoteDelay uint32 + // HtlcRetributions is a slice of HTLC retributions for each output // active HTLC output within the breached commitment transaction. HtlcRetributions []HtlcRetribution @@ -1965,10 +2072,6 @@ type BreachRetribution struct { // breaching commitment transaction. This allows downstream clients to // have access to the public keys used in the scripts. KeyRing *CommitmentKeyRing - - // RemoteDelay specifies the CSV delay applied to to-local scripts on - // the breaching commitment transaction. - RemoteDelay uint32 } // NewBreachRetribution creates a new fully populated BreachRetribution for the @@ -1999,45 +2102,49 @@ func NewBreachRetribution(chanState *channeldb.OpenChannel, stateNum uint64, // With the commitment point generated, we can now generate the four // keys we'll need to reconstruct the commitment state, - tweaklessCommit := chanState.ChanType.IsTweakless() keyRing := DeriveCommitmentKeys( - commitmentPoint, false, tweaklessCommit, + commitmentPoint, false, chanState.ChanType, &chanState.LocalChanCfg, &chanState.RemoteChanCfg, ) // Next, reconstruct the scripts as they were present at this state // number so we can have the proper witness script to sign and include // within the final witness. - remoteDelay := uint32(chanState.RemoteChanCfg.CsvDelay) - remotePkScript, err := input.CommitScriptToSelf( - remoteDelay, keyRing.DelayKey, keyRing.RevocationKey, + theirDelay := uint32(chanState.RemoteChanCfg.CsvDelay) + theirPkScript, err := input.CommitScriptToSelf( + theirDelay, keyRing.ToLocalKey, keyRing.RevocationKey, ) if err != nil { return nil, err } - remoteWitnessHash, err := input.WitnessScriptHash(remotePkScript) + theirWitnessHash, err := input.WitnessScriptHash(theirPkScript) if err != nil { return nil, err } - localPkScript, err := input.CommitScriptUnencumbered(keyRing.NoDelayKey) + + // Since it is the remote breach we are reconstructing, the output going + // to us will be a to-remote script with our local params. + ourScript, ourDelay, err := CommitScriptToRemote( + chanState.ChanType, keyRing.ToRemoteKey, + ) if err != nil { return nil, err } // In order to fully populate the breach retribution struct, we'll need - // to find the exact index of the local+remote commitment outputs. - localOutpoint := wire.OutPoint{ + // to find the exact index of the commitment outputs. + ourOutpoint := wire.OutPoint{ Hash: commitHash, } - remoteOutpoint := wire.OutPoint{ + theirOutpoint := wire.OutPoint{ Hash: commitHash, } for i, txOut := range revokedSnapshot.CommitTx.TxOut { switch { - case bytes.Equal(txOut.PkScript, localPkScript): - localOutpoint.Index = uint32(i) - case bytes.Equal(txOut.PkScript, remoteWitnessHash): - remoteOutpoint.Index = uint32(i) + case bytes.Equal(txOut.PkScript, ourScript.PkScript): + ourOutpoint.Index = uint32(i) + case bytes.Equal(txOut.PkScript, theirWitnessHash): + theirOutpoint.Index = uint32(i) } } @@ -2045,45 +2152,39 @@ func NewBreachRetribution(chanState *channeldb.OpenChannel, stateNum uint64, // commitment outputs. If either is considered dust using the remote // party's dust limit, the respective sign descriptor will be nil. var ( - localSignDesc *input.SignDescriptor - remoteSignDesc *input.SignDescriptor + ourSignDesc *input.SignDescriptor + theirSignDesc *input.SignDescriptor ) - // Compute the local and remote balances in satoshis. - localAmt := revokedSnapshot.LocalBalance.ToSatoshis() - remoteAmt := revokedSnapshot.RemoteBalance.ToSatoshis() + // Compute the balances in satoshis. + ourAmt := revokedSnapshot.LocalBalance.ToSatoshis() + theirAmt := revokedSnapshot.RemoteBalance.ToSatoshis() - // If the local balance exceeds the remote party's dust limit, - // instantiate the local sign descriptor. - if localAmt >= chanState.RemoteChanCfg.DustLimit { - localSignDesc = &input.SignDescriptor{ + // If our balance exceeds the remote party's dust limit, instantiate + // the sign descriptor for our output. + if ourAmt >= chanState.RemoteChanCfg.DustLimit { + ourSignDesc = &input.SignDescriptor{ SingleTweak: keyRing.LocalCommitKeyTweak, KeyDesc: chanState.LocalChanCfg.PaymentBasePoint, - WitnessScript: localPkScript, + WitnessScript: ourScript.WitnessScript, Output: &wire.TxOut{ - PkScript: localPkScript, - Value: int64(localAmt), + PkScript: ourScript.PkScript, + Value: int64(ourAmt), }, HashType: txscript.SigHashAll, } - - // If this is a tweakless commitment, then we can safely blank - // out the SingleTweak value as it isn't needed. - if tweaklessCommit { - localSignDesc.SingleTweak = nil - } } - // Similarly, if the remote balance exceeds the remote party's dust - // limit, assemble the remote sign descriptor. - if remoteAmt >= chanState.RemoteChanCfg.DustLimit { - remoteSignDesc = &input.SignDescriptor{ + // Similarly, if their balance exceeds the remote party's dust limit, + // assemble the sign descriptor for their output, which we can sweep. + if theirAmt >= chanState.RemoteChanCfg.DustLimit { + theirSignDesc = &input.SignDescriptor{ KeyDesc: chanState.LocalChanCfg.RevocationBasePoint, DoubleTweak: commitmentSecret, - WitnessScript: remotePkScript, + WitnessScript: theirPkScript, Output: &wire.TxOut{ - PkScript: remoteWitnessHash, - Value: int64(remoteAmt), + PkScript: theirWitnessHash, + Value: int64(theirAmt), }, HashType: txscript.SigHashAll, } @@ -2094,16 +2195,11 @@ func NewBreachRetribution(chanState *channeldb.OpenChannel, stateNum uint64, // remote commitment transaction. htlcRetributions := make([]HtlcRetribution, 0, len(revokedSnapshot.Htlcs)) for _, htlc := range revokedSnapshot.Htlcs { - var ( - htlcWitnessScript []byte - err error - ) - // If the HTLC is dust, then we'll skip it as it doesn't have // an output on the commitment transaction. if htlcIsDust( - htlc.Incoming, false, - SatPerKWeight(revokedSnapshot.FeePerKw), + chanState.ChanType, htlc.Incoming, false, + chainfee.SatPerKWeight(revokedSnapshot.FeePerKw), htlc.Amt.ToSatoshis(), chanState.RemoteChanCfg.DustLimit, ) { continue @@ -2114,7 +2210,7 @@ func NewBreachRetribution(chanState *channeldb.OpenChannel, stateNum uint64, // remote commitment transaction, and *they* go to the second // level. secondLevelWitnessScript, err := input.SecondLevelHtlcScript( - keyRing.RevocationKey, keyRing.DelayKey, remoteDelay, + keyRing.RevocationKey, keyRing.ToLocalKey, theirDelay, ) if err != nil { return nil, err @@ -2122,31 +2218,13 @@ func NewBreachRetribution(chanState *channeldb.OpenChannel, stateNum uint64, // If this is an incoming HTLC, then this means that they were // the sender of the HTLC (relative to us). So we'll - // re-generate the sender HTLC script. - if htlc.Incoming { - htlcWitnessScript, err = input.SenderHTLCScript( - keyRing.RemoteHtlcKey, keyRing.LocalHtlcKey, - keyRing.RevocationKey, htlc.RHash[:], - ) - if err != nil { - return nil, err - } - - } else { - // Otherwise, is this was an outgoing HTLC that we - // sent, then from the PoV of the remote commitment - // state, they're the receiver of this HTLC. - htlcWitnessScript, err = input.ReceiverHTLCScript( - htlc.RefundTimeout, keyRing.LocalHtlcKey, - keyRing.RemoteHtlcKey, keyRing.RevocationKey, - htlc.RHash[:], - ) - if err != nil { - return nil, err - } - } - - htlcPkScript, err := input.WitnessScriptHash(htlcWitnessScript) + // re-generate the sender HTLC script. Otherwise, is this was + // an outgoing HTLC that we sent, then from the PoV of the + // remote commitment state, they're the receiver of this HTLC. + htlcPkScript, htlcWitnessScript, err := genHtlcScript( + chanState.ChanType, htlc.Incoming, false, + htlc.RefundTimeout, htlc.RHash, keyRing, + ) if err != nil { return nil, err } @@ -2180,35 +2258,25 @@ func NewBreachRetribution(chanState *channeldb.OpenChannel, stateNum uint64, BreachHeight: breachHeight, RevokedStateNum: stateNum, PendingHTLCs: revokedSnapshot.Htlcs, - LocalOutpoint: localOutpoint, - LocalOutputSignDesc: localSignDesc, - RemoteOutpoint: remoteOutpoint, - RemoteOutputSignDesc: remoteSignDesc, + LocalOutpoint: ourOutpoint, + LocalOutputSignDesc: ourSignDesc, + LocalDelay: ourDelay, + RemoteOutpoint: theirOutpoint, + RemoteOutputSignDesc: theirSignDesc, + RemoteDelay: theirDelay, HtlcRetributions: htlcRetributions, KeyRing: keyRing, - RemoteDelay: remoteDelay, }, nil } -// htlcTimeoutFee returns the fee in satoshis required for an HTLC timeout -// transaction based on the current fee rate. -func htlcTimeoutFee(feePerKw SatPerKWeight) btcutil.Amount { - return feePerKw.FeeForWeight(input.HtlcTimeoutWeight) -} - -// htlcSuccessFee returns the fee in satoshis required for an HTLC success -// transaction based on the current fee rate. -func htlcSuccessFee(feePerKw SatPerKWeight) btcutil.Amount { - return feePerKw.FeeForWeight(input.HtlcSuccessWeight) -} - // htlcIsDust determines if an HTLC output is dust or not depending on two // bits: if the HTLC is incoming and if the HTLC will be placed on our // commitment transaction, or theirs. These two pieces of information are // require as we currently used second-level HTLC transactions as off-chain // covenants. Depending on the two bits, we'll either be using a timeout or // success transaction which have different weights. -func htlcIsDust(incoming, ourCommit bool, feePerKw SatPerKWeight, +func htlcIsDust(chanType channeldb.ChannelType, + incoming, ourCommit bool, feePerKw chainfee.SatPerKWeight, htlcAmt, dustLimit btcutil.Amount) bool { // First we'll determine the fee required for this HTLC based on if this is @@ -2220,25 +2288,25 @@ func htlcIsDust(incoming, ourCommit bool, feePerKw SatPerKWeight, // If this is an incoming HTLC on our commitment transaction, then the // second-level transaction will be a success transaction. case incoming && ourCommit: - htlcFee = htlcSuccessFee(feePerKw) + htlcFee = HtlcSuccessFee(chanType, feePerKw) // If this is an incoming HTLC on their commitment transaction, then // we'll be using a second-level timeout transaction as they've added // this HTLC. case incoming && !ourCommit: - htlcFee = htlcTimeoutFee(feePerKw) + htlcFee = HtlcTimeoutFee(chanType, feePerKw) // If this is an outgoing HTLC on our commitment transaction, then // we'll be using a timeout transaction as we're the sender of the // HTLC. case !incoming && ourCommit: - htlcFee = htlcTimeoutFee(feePerKw) + htlcFee = HtlcTimeoutFee(chanType, feePerKw) // If this is an outgoing HTLC on their commitment transaction, then // we'll be using an HTLC success transaction as they're the receiver // of this HTLC. case !incoming && !ourCommit: - htlcFee = htlcSuccessFee(feePerKw) + htlcFee = HtlcSuccessFee(chanType, feePerKw) } return (htlcAmt - htlcFee) < dustLimit @@ -2249,7 +2317,7 @@ func htlcIsDust(incoming, ourCommit bool, feePerKw SatPerKWeight, type htlcView struct { ourUpdates []*PaymentDescriptor theirUpdates []*PaymentDescriptor - feePerKw SatPerKWeight + feePerKw chainfee.SatPerKWeight } // fetchHTLCView returns all the candidate HTLC updates which should be @@ -2297,33 +2365,67 @@ func (lc *LightningChannel) fetchCommitmentView(remoteChain bool, keyRing *CommitmentKeyRing) (*commitment, error) { commitChain := lc.localCommitChain + dustLimit := lc.channelState.LocalChanCfg.DustLimit if remoteChain { commitChain = lc.remoteCommitChain + dustLimit = lc.channelState.RemoteChanCfg.DustLimit } nextHeight := commitChain.tip().height + 1 // Run through all the HTLCs that will be covered by this transaction // in order to update their commitment addition height, and to adjust - // the balances on the commitment transaction accordingly. + // the balances on the commitment transaction accordingly. Note that + // these balances will be *before* taking a commitment fee from the + // initiator. htlcView := lc.fetchHTLCView(theirLogIndex, ourLogIndex) - ourBalance, theirBalance, _, filteredHTLCView := lc.computeView( + ourBalance, theirBalance, _, filteredHTLCView, err := lc.computeView( htlcView, remoteChain, true, ) + if err != nil { + return nil, err + } feePerKw := filteredHTLCView.feePerKw - // Determine how many current HTLCs are over the dust limit, and should - // be counted for the purpose of fee calculation. - var dustLimit btcutil.Amount - if remoteChain { - dustLimit = lc.remoteChanCfg.DustLimit - } else { - dustLimit = lc.localChanCfg.DustLimit + // Actually generate unsigned commitment transaction for this view. + commitTx, err := lc.commitBuilder.createUnsignedCommitmentTx( + ourBalance, theirBalance, !remoteChain, feePerKw, nextHeight, + filteredHTLCView, keyRing, + ) + if err != nil { + return nil, err + } + + // We'll assert that there hasn't been a mistake during fee calculation + // leading to a fee too low. + var totalOut btcutil.Amount + for _, txOut := range commitTx.txn.TxOut { + totalOut += btcutil.Amount(txOut.Value) } + fee := lc.channelState.Capacity - totalOut + // Since the transaction is not signed yet, we use the witness weight + // used for weight calculation. + uTx := btcutil.NewTx(commitTx.txn) + weight := blockchain.GetTransactionWeight(uTx) + + input.WitnessCommitmentTxWeight + + effFeeRate := chainfee.SatPerKWeight(fee) * 1000 / + chainfee.SatPerKWeight(weight) + if effFeeRate < chainfee.AbsoluteFeePerKwFloor { + return nil, fmt.Errorf("height=%v, for ChannelPoint(%v) "+ + "attempts to create commitment with feerate %v: %v", + nextHeight, lc.channelState.FundingOutpoint, + effFeeRate, spew.Sdump(commitTx)) + } + + // With the commitment view created, store the resulting balances and + // transaction with the other parameters for this height. c := &commitment{ - ourBalance: ourBalance, - theirBalance: theirBalance, + ourBalance: commitTx.ourBalance, + theirBalance: commitTx.theirBalance, + txn: commitTx.txn, + fee: commitTx.fee, ourMessageIndex: ourLogIndex, ourHtlcIndex: ourHtlcIndex, theirMessageIndex: theirLogIndex, @@ -2334,11 +2436,6 @@ func (lc *LightningChannel) fetchCommitmentView(remoteChain bool, isOurs: !remoteChain, } - // Actually generate unsigned commitment transaction for this view. - if err := lc.createCommitmentTx(c, filteredHTLCView, keyRing); err != nil { - return nil, err - } - // In order to ensure _none_ of the HTLC's associated with this new // commitment are mutated, we'll manually copy over each HTLC to its // respective slice. @@ -2352,202 +2449,102 @@ func (lc *LightningChannel) fetchCommitmentView(remoteChain bool, } // Finally, we'll populate all the HTLC indexes so we can track the - // locations of each HTLC in the commitment state. - if err := c.populateHtlcIndexes(); err != nil { + // locations of each HTLC in the commitment state. We pass in the sorted + // slice of CLTV deltas in order to properly locate HTLCs that otherwise + // have the same payment hash and amount. + err = c.populateHtlcIndexes(lc.channelState.ChanType, commitTx.cltvs) + if err != nil { return nil, err } return c, nil } -func (lc *LightningChannel) fundingTxIn() wire.TxIn { - return *wire.NewTxIn(&lc.channelState.FundingOutpoint, nil, nil) +func fundingTxIn(chanState *channeldb.OpenChannel) wire.TxIn { + return *wire.NewTxIn(&chanState.FundingOutpoint, nil, nil) } -// createCommitmentTx generates the unsigned commitment transaction for a -// commitment view and assigns to txn field. -func (lc *LightningChannel) createCommitmentTx(c *commitment, - filteredHTLCView *htlcView, keyRing *CommitmentKeyRing) error { - - ourBalance := c.ourBalance - theirBalance := c.theirBalance - - numHTLCs := int64(0) - for _, htlc := range filteredHTLCView.ourUpdates { - if htlcIsDust(false, c.isOurs, c.feePerKw, - htlc.Amount.ToSatoshis(), c.dustLimit) { - - continue - } +// evaluateHTLCView processes all update entries in both HTLC update logs, +// producing a final view which is the result of properly applying all adds, +// settles, timeouts and fee updates found in both logs. The resulting view +// returned reflects the current state of HTLCs within the remote or local +// commitment chain, and the current commitment fee rate. +// +// If mutateState is set to true, then the add height of all added HTLCs +// will be set to nextHeight, and the remove height of all removed HTLCs +// will be set to nextHeight. This should therefore only be set to true +// once for each height, and only in concert with signing a new commitment. +// TODO(halseth): return htlcs to mutate instead of mutating inside +// method. +func (lc *LightningChannel) evaluateHTLCView(view *htlcView, ourBalance, + theirBalance *lnwire.MilliSatoshi, nextHeight uint64, + remoteChain, mutateState bool) (*htlcView, error) { - numHTLCs++ + // We initialize the view's fee rate to the fee rate of the unfiltered + // view. If any fee updates are found when evaluating the view, it will + // be updated. + newView := &htlcView{ + feePerKw: view.feePerKw, } - for _, htlc := range filteredHTLCView.theirUpdates { - if htlcIsDust(true, c.isOurs, c.feePerKw, - htlc.Amount.ToSatoshis(), c.dustLimit) { - - continue - } - numHTLCs++ - } + // We use two maps, one for the local log and one for the remote log to + // keep track of which entries we need to skip when creating the final + // htlc view. We skip an entry whenever we find a settle or a timeout + // modifying an entry. + skipUs := make(map[uint64]struct{}) + skipThem := make(map[uint64]struct{}) - // Next, we'll calculate the fee for the commitment transaction based - // on its total weight. Once we have the total weight, we'll multiply - // by the current fee-per-kw, then divide by 1000 to get the proper - // fee. - totalCommitWeight := input.CommitWeight + (input.HtlcWeight * numHTLCs) + // fetchParentEntry is a helper method that will fetch the parent of + // entry from the corresponding update log. + fetchParentEntry := func(entry *PaymentDescriptor, + remoteLog bool) (*PaymentDescriptor, error) { - // With the weight known, we can now calculate the commitment fee, - // ensuring that we account for any dust outputs trimmed above. - commitFee := c.feePerKw.FeeForWeight(totalCommitWeight) - commitFeeMSat := lnwire.NewMSatFromSatoshis(commitFee) + var ( + updateLog *updateLog + logName string + ) - // Currently, within the protocol, the initiator always pays the fees. - // So we'll subtract the fee amount from the balance of the current - // initiator. If the initiator is unable to pay the fee fully, then - // their entire output is consumed. - switch { - case lc.channelState.IsInitiator && commitFee > ourBalance.ToSatoshis(): - ourBalance = 0 + if remoteLog { + updateLog = lc.remoteUpdateLog + logName = "remote" + } else { + updateLog = lc.localUpdateLog + logName = "local" + } - case lc.channelState.IsInitiator: - ourBalance -= commitFeeMSat + addEntry := updateLog.lookupHtlc(entry.ParentIndex) - case !lc.channelState.IsInitiator && commitFee > theirBalance.ToSatoshis(): - theirBalance = 0 + switch { + // We check if the parent entry is not found at this point. + // This could happen for old versions of lnd, and we return an + // error to gracefully shut down the state machine if such an + // entry is still in the logs. + case addEntry == nil: + return nil, fmt.Errorf("unable to find parent entry "+ + "%d in %v update log: %v\nUpdatelog: %v", + entry.ParentIndex, logName, + newLogClosure(func() string { + return spew.Sdump(entry) + }), newLogClosure(func() string { + return spew.Sdump(updateLog) + }), + ) - case !lc.channelState.IsInitiator: - theirBalance -= commitFeeMSat - } + // The parent add height should never be zero at this point. If + // that's the case we probably forgot to send a new commitment. + case remoteChain && addEntry.addCommitHeightRemote == 0: + return nil, fmt.Errorf("parent entry %d for update %d "+ + "had zero remote add height", entry.ParentIndex, + entry.LogIndex) + case !remoteChain && addEntry.addCommitHeightLocal == 0: + return nil, fmt.Errorf("parent entry %d for update %d "+ + "had zero local add height", entry.ParentIndex, + entry.LogIndex) + } - var ( - delay uint32 - delayBalance, p2wkhBalance btcutil.Amount - ) - if c.isOurs { - delay = uint32(lc.localChanCfg.CsvDelay) - delayBalance = ourBalance.ToSatoshis() - p2wkhBalance = theirBalance.ToSatoshis() - } else { - delay = uint32(lc.remoteChanCfg.CsvDelay) - delayBalance = theirBalance.ToSatoshis() - p2wkhBalance = ourBalance.ToSatoshis() + return addEntry, nil } - // Generate a new commitment transaction with all the latest - // unsettled/un-timed out HTLCs. - commitTx, err := CreateCommitTx(lc.fundingTxIn(), keyRing, delay, - delayBalance, p2wkhBalance, c.dustLimit) - if err != nil { - return err - } - - // We'll now add all the HTLC outputs to the commitment transaction. - // Each output includes an off-chain 2-of-2 covenant clause, so we'll - // need the objective local/remote keys for this particular commitment - // as well. For any non-dust HTLCs that are manifested on the commitment - // transaction, we'll also record its CLTV which is required to sort the - // commitment transaction below. The slice is initially sized to the - // number of existing outputs, since any outputs already added are - // commitment outputs and should correspond to zero values for the - // purposes of sorting. - cltvs := make([]uint32, len(commitTx.TxOut)) - for _, htlc := range filteredHTLCView.ourUpdates { - if htlcIsDust(false, c.isOurs, c.feePerKw, - htlc.Amount.ToSatoshis(), c.dustLimit) { - continue - } - - err := lc.addHTLC(commitTx, c.isOurs, false, htlc, keyRing) - if err != nil { - return err - } - cltvs = append(cltvs, htlc.Timeout) - } - for _, htlc := range filteredHTLCView.theirUpdates { - if htlcIsDust(true, c.isOurs, c.feePerKw, - htlc.Amount.ToSatoshis(), c.dustLimit) { - continue - } - - err := lc.addHTLC(commitTx, c.isOurs, true, htlc, keyRing) - if err != nil { - return err - } - cltvs = append(cltvs, htlc.Timeout) - } - - // Set the state hint of the commitment transaction to facilitate - // quickly recovering the necessary penalty state in the case of an - // uncooperative broadcast. - err = SetStateNumHint(commitTx, c.height, lc.stateHintObfuscator) - if err != nil { - return err - } - - // Sort the transactions according to the agreed upon canonical - // ordering. This lets us skip sending the entire transaction over, - // instead we'll just send signatures. - InPlaceCommitSort(commitTx, cltvs) - - // Next, we'll ensure that we don't accidentally create a commitment - // transaction which would be invalid by consensus. - uTx := btcutil.NewTx(commitTx) - if err := blockchain.CheckTransactionSanity(uTx); err != nil { - return err - } - - // Finally, we'll assert that were not attempting to draw more out of - // the channel that was originally placed within it. - var totalOut btcutil.Amount - for _, txOut := range commitTx.TxOut { - totalOut += btcutil.Amount(txOut.Value) - } - if totalOut > lc.channelState.Capacity { - return fmt.Errorf("height=%v, for ChannelPoint(%v) attempts "+ - "to consume %v while channel capacity is %v", - c.height, lc.channelState.FundingOutpoint, - totalOut, lc.channelState.Capacity) - } - - c.txn = commitTx - c.fee = commitFee - c.ourBalance = ourBalance - c.theirBalance = theirBalance - return nil -} - -// evaluateHTLCView processes all update entries in both HTLC update logs, -// producing a final view which is the result of properly applying all adds, -// settles, timeouts and fee updates found in both logs. The resulting view -// returned reflects the current state of HTLCs within the remote or local -// commitment chain, and the current commitment fee rate. -// -// If mutateState is set to true, then the add height of all added HTLCs -// will be set to nextHeight, and the remove height of all removed HTLCs -// will be set to nextHeight. This should therefore only be set to true -// once for each height, and only in concert with signing a new commitment. -// TODO(halseth): return htlcs to mutate instead of mutating inside -// method. -func (lc *LightningChannel) evaluateHTLCView(view *htlcView, ourBalance, - theirBalance *lnwire.MilliSatoshi, nextHeight uint64, - remoteChain, mutateState bool) *htlcView { - - // We initialize the view's fee rate to the fee rate of the unfiltered - // view. If any fee updates are found when evaluating the view, it will - // be updated. - newView := &htlcView{ - feePerKw: view.feePerKw, - } - - // We use two maps, one for the local log and one for the remote log to - // keep track of which entries we need to skip when creating the final - // htlc view. We skip an entry whenever we find a settle or a timeout - // modifying an entry. - skipUs := make(map[uint64]struct{}) - skipThem := make(map[uint64]struct{}) - // First we run through non-add entries in both logs, populating the // skip sets and mutating the current chain state (crediting balances, // etc) to reflect the settle/timeout entry encountered. @@ -2574,21 +2571,9 @@ func (lc *LightningChannel) evaluateHTLCView(view *htlcView, ourBalance, lc.channelState.TotalMSatReceived += entry.Amount } - addEntry := lc.remoteUpdateLog.lookupHtlc(entry.ParentIndex) - - // We check if the parent entry is not found at this point. We - // have seen this happening a few times and panic with some - // addtitional info to figure out why. - // TODO(halseth): remove when bug is fixed. - if addEntry == nil { - panic(fmt.Sprintf("unable to find parent entry %d "+ - "in remote update log: %v\nUpdatelog: %v", - entry.ParentIndex, newLogClosure(func() string { - return spew.Sdump(entry) - }), newLogClosure(func() string { - return spew.Sdump(lc.remoteUpdateLog) - }), - )) + addEntry, err := fetchParentEntry(entry, true) + if err != nil { + return nil, err } skipThem[addEntry.HtlcIndex] = struct{}{} @@ -2619,21 +2604,9 @@ func (lc *LightningChannel) evaluateHTLCView(view *htlcView, ourBalance, lc.channelState.TotalMSatSent += entry.Amount } - addEntry := lc.localUpdateLog.lookupHtlc(entry.ParentIndex) - - // We check if the parent entry is not found at this point. We - // have seen this happening a few times and panic with some - // addtitional info to figure out why. - // TODO(halseth): remove when bug is fixed. - if addEntry == nil { - panic(fmt.Sprintf("unable to find parent entry %d "+ - "in local update log: %v\nUpdatelog: %v", - entry.ParentIndex, newLogClosure(func() string { - return spew.Sdump(entry) - }), newLogClosure(func() string { - return spew.Sdump(lc.localUpdateLog) - }), - )) + addEntry, err := fetchParentEntry(entry, false) + if err != nil { + return nil, err } skipUs[addEntry.HtlcIndex] = struct{}{} @@ -2665,7 +2638,7 @@ func (lc *LightningChannel) evaluateHTLCView(view *htlcView, ourBalance, newView.theirUpdates = append(newView.theirUpdates, entry) } - return newView + return newView, nil } // processAddEntry evaluates the effect of an add entry within the HTLC log. @@ -2780,7 +2753,7 @@ func processFeeUpdate(feeUpdate *PaymentDescriptor, nextHeight uint64, // If the update wasn't already locked in, update the current fee rate // to reflect this update. - view.feePerKw = SatPerKWeight(feeUpdate.Amount.ToSatoshis()) + view.feePerKw = chainfee.SatPerKWeight(feeUpdate.Amount.ToSatoshis()) if mutateState { *addHeight = nextHeight @@ -2795,12 +2768,14 @@ func processFeeUpdate(feeUpdate *PaymentDescriptor, nextHeight uint64, // signature can be submitted to the sigPool to generate all the signatures // asynchronously and in parallel. func genRemoteHtlcSigJobs(keyRing *CommitmentKeyRing, + chanType channeldb.ChannelType, localChanCfg, remoteChanCfg *channeldb.ChannelConfig, remoteCommitView *commitment) ([]SignJob, chan struct{}, error) { txHash := remoteCommitView.txn.TxHash() dustLimit := remoteChanCfg.DustLimit feePerKw := remoteCommitView.feePerKw + sigHashType := HtlcSigHashType(chanType) // With the keys generated, we'll make a slice with enough capacity to // hold potentially all the HTLCs. The actual slice may be a bit @@ -2816,8 +2791,10 @@ func genRemoteHtlcSigJobs(keyRing *CommitmentKeyRing, // dust output after taking into account second-level HTLC fees, then a // sigJob will be generated and appended to the current batch. for _, htlc := range remoteCommitView.incomingHTLCs { - if htlcIsDust(true, false, feePerKw, htlc.Amount.ToSatoshis(), - dustLimit) { + if htlcIsDust( + chanType, true, false, feePerKw, + htlc.Amount.ToSatoshis(), dustLimit, + ) { continue } @@ -2832,7 +2809,7 @@ func genRemoteHtlcSigJobs(keyRing *CommitmentKeyRing, // HTLC timeout transaction for them. The output of the timeout // transaction needs to account for fees, so we'll compute the // required fee and output now. - htlcFee := htlcTimeoutFee(feePerKw) + htlcFee := HtlcTimeoutFee(chanType, feePerKw) outputAmt := htlc.Amount.ToSatoshis() - htlcFee // With the fee calculate, we can properly create the HTLC @@ -2842,9 +2819,9 @@ func genRemoteHtlcSigJobs(keyRing *CommitmentKeyRing, Index: uint32(htlc.remoteOutputIndex), } sigJob.Tx, err = createHtlcTimeoutTx( - op, outputAmt, htlc.Timeout, + chanType, op, outputAmt, htlc.Timeout, uint32(remoteChanCfg.CsvDelay), - keyRing.RevocationKey, keyRing.DelayKey, + keyRing.RevocationKey, keyRing.ToLocalKey, ) if err != nil { return nil, nil, err @@ -2860,7 +2837,7 @@ func genRemoteHtlcSigJobs(keyRing *CommitmentKeyRing, Output: &wire.TxOut{ Value: int64(htlc.Amount.ToSatoshis()), }, - HashType: txscript.SigHashAll, + HashType: sigHashType, SigHashes: txscript.NewTxSigHashes(sigJob.Tx), InputIndex: 0, } @@ -2869,8 +2846,10 @@ func genRemoteHtlcSigJobs(keyRing *CommitmentKeyRing, sigBatch = append(sigBatch, sigJob) } for _, htlc := range remoteCommitView.outgoingHTLCs { - if htlcIsDust(false, false, feePerKw, htlc.Amount.ToSatoshis(), - dustLimit) { + if htlcIsDust( + chanType, false, false, feePerKw, + htlc.Amount.ToSatoshis(), dustLimit, + ) { continue } @@ -2883,7 +2862,7 @@ func genRemoteHtlcSigJobs(keyRing *CommitmentKeyRing, // HTLC success transaction for them. The output of the timeout // transaction needs to account for fees, so we'll compute the // required fee and output now. - htlcFee := htlcSuccessFee(feePerKw) + htlcFee := HtlcSuccessFee(chanType, feePerKw) outputAmt := htlc.Amount.ToSatoshis() - htlcFee // With the proper output amount calculated, we can now @@ -2894,8 +2873,8 @@ func genRemoteHtlcSigJobs(keyRing *CommitmentKeyRing, Index: uint32(htlc.remoteOutputIndex), } sigJob.Tx, err = createHtlcSuccessTx( - op, outputAmt, uint32(remoteChanCfg.CsvDelay), - keyRing.RevocationKey, keyRing.DelayKey, + chanType, op, outputAmt, uint32(remoteChanCfg.CsvDelay), + keyRing.RevocationKey, keyRing.ToLocalKey, ) if err != nil { return nil, nil, err @@ -2911,7 +2890,7 @@ func genRemoteHtlcSigJobs(keyRing *CommitmentKeyRing, Output: &wire.TxOut{ Value: int64(htlc.Amount.ToSatoshis()), }, - HashType: txscript.SigHashAll, + HashType: sigHashType, SigHashes: txscript.NewTxSigHashes(sigJob.Tx), InputIndex: 0, } @@ -3069,25 +3048,117 @@ func (lc *LightningChannel) createCommitDiff( }, nil } +// getUnsignedAckedUpdates returns all remote log updates that we haven't +// signed for yet ourselves. +func (lc *LightningChannel) getUnsignedAckedUpdates() []channeldb.LogUpdate { + // First, we need to convert the funding outpoint into the ID that's + // used on the wire to identify this channel. + chanID := lnwire.NewChanIDFromOutPoint(&lc.channelState.FundingOutpoint) + + // Fetch the last remote update that we have signed for. + lastRemoteCommitted := lc.remoteCommitChain.tip().theirMessageIndex + + // Fetch the last remote update that we have acked. + lastLocalCommitted := lc.localCommitChain.tail().theirMessageIndex + + // We'll now run through the remote update log to locate the items that + // we haven't signed for yet. This will be the set of items we need to + // restore if we reconnect in order to produce the signature that the + // remote party expects. + var logUpdates []channeldb.LogUpdate + for e := lc.remoteUpdateLog.Front(); e != nil; e = e.Next() { + pd := e.Value.(*PaymentDescriptor) + + // Skip all remote updates that we have already included in our + // commit chain. + if pd.LogIndex < lastRemoteCommitted { + continue + } + + // Skip all remote updates that we haven't acked yet. At the + // moment this function is called, there shouldn't be any, but + // we check it anyway to make this function more generally + // usable. + if pd.LogIndex >= lastLocalCommitted { + continue + } + + logUpdate := channeldb.LogUpdate{ + LogIndex: pd.LogIndex, + } + + // We'll map the type of the PaymentDescriptor to one of the + // four messages that it corresponds to. + switch pd.EntryType { + case Add: + htlc := &lnwire.UpdateAddHTLC{ + ChanID: chanID, + ID: pd.HtlcIndex, + Amount: pd.Amount, + Expiry: pd.Timeout, + PaymentHash: pd.RHash, + } + copy(htlc.OnionBlob[:], pd.OnionBlob) + logUpdate.UpdateMsg = htlc + + case Settle: + logUpdate.UpdateMsg = &lnwire.UpdateFulfillHTLC{ + ChanID: chanID, + ID: pd.ParentIndex, + PaymentPreimage: pd.RPreimage, + } + + case Fail: + logUpdate.UpdateMsg = &lnwire.UpdateFailHTLC{ + ChanID: chanID, + ID: pd.ParentIndex, + Reason: pd.FailReason, + } + + case MalformedFail: + logUpdate.UpdateMsg = &lnwire.UpdateFailMalformedHTLC{ + ChanID: chanID, + ID: pd.ParentIndex, + ShaOnionBlob: pd.ShaOnionBlob, + FailureCode: pd.FailCode, + } + + case FeeUpdate: + // The Amount field holds the feerate denominated in + // msat. Since feerates are only denominated in sat/kw, + // we can convert it without loss of precision. + logUpdate.UpdateMsg = &lnwire.UpdateFee{ + ChanID: chanID, + FeePerKw: uint32(pd.Amount.ToSatoshis()), + } + } + + logUpdates = append(logUpdates, logUpdate) + } + return logUpdates +} + // validateCommitmentSanity is used to validate the current state of the // commitment transaction in terms of the ChannelConstraints that we and our -// remote peer agreed upon during the funding workflow. The predictAdded -// parameter should be set to a valid PaymentDescriptor if we are validating -// in the state when adding a new HTLC, or nil otherwise. +// remote peer agreed upon during the funding workflow. The +// predict[Our|Their]Add should parameters should be set to a valid +// PaymentDescriptor if we are validating in the state when adding a new HTLC, +// or nil otherwise. func (lc *LightningChannel) validateCommitmentSanity(theirLogCounter, ourLogCounter uint64, remoteChain bool, - predictAdded *PaymentDescriptor) error { + predictOurAdd, predictTheirAdd *PaymentDescriptor) error { // Fetch all updates not committed. view := lc.fetchHTLCView(theirLogCounter, ourLogCounter) // If we are checking if we can add a new HTLC, we add this to the - // update log, in order to validate the sanity of the commitment - // resulting from _actually adding_ this HTLC to the state. - if predictAdded != nil { - // If we are adding an HTLC, this will be an Add to the local - // update log. - view.ourUpdates = append(view.ourUpdates, predictAdded) + // appropriate update log, in order to validate the sanity of the + // commitment resulting from _actually adding_ this HTLC to the state. + if predictOurAdd != nil { + view.ourUpdates = append(view.ourUpdates, predictOurAdd) + } + if predictTheirAdd != nil { + view.theirUpdates = append(view.theirUpdates, predictTheirAdd) } commitChain := lc.localCommitChain @@ -3097,9 +3168,12 @@ func (lc *LightningChannel) validateCommitmentSanity(theirLogCounter, ourInitialBalance := commitChain.tip().ourBalance theirInitialBalance := commitChain.tip().theirBalance - ourBalance, theirBalance, commitWeight, filteredView := lc.computeView( + ourBalance, theirBalance, commitWeight, filteredView, err := lc.computeView( view, remoteChain, false, ) + if err != nil { + return err + } feePerKw := filteredView.feePerKw // Calculate the commitment fee, and subtract it from the initiator's @@ -3125,9 +3199,9 @@ func (lc *LightningChannel) validateCommitmentSanity(theirLogCounter, // Ensure that the fee being applied is enough to be relayed across the // network in a reasonable time frame. - if feePerKw < FeePerKwFloor { + if feePerKw < chainfee.FeePerKwFloor { return fmt.Errorf("commitment fee per kw %v below fee floor %v", - feePerKw, FeePerKwFloor) + feePerKw, chainfee.FeePerKwFloor) } // If the added HTLCs will decrease the balance, make sure they won't @@ -3135,12 +3209,12 @@ func (lc *LightningChannel) validateCommitmentSanity(theirLogCounter, switch { case ourBalance < ourInitialBalance && ourBalance < lnwire.NewMSatFromSatoshis( - lc.localChanCfg.ChanReserve): + lc.channelState.LocalChanCfg.ChanReserve): return ErrBelowChanReserve case theirBalance < theirInitialBalance && theirBalance < lnwire.NewMSatFromSatoshis( - lc.remoteChanCfg.ChanReserve): + lc.channelState.RemoteChanCfg.ChanReserve): return ErrBelowChanReserve } @@ -3164,6 +3238,11 @@ func (lc *LightningChannel) validateCommitmentSanity(theirLogCounter, amtInFlight += entry.Amount numInFlight++ + // Check that the HTLC amount is positive. + if entry.Amount == 0 { + return ErrInvalidHTLCAmt + } + // Check that the value of the HTLC they added // is above our minimum. if entry.Amount < constraints.MinHTLC { @@ -3190,8 +3269,8 @@ func (lc *LightningChannel) validateCommitmentSanity(theirLogCounter, // First check that the remote updates won't violate it's channel // constraints. - err := validateUpdates( - filteredView.theirUpdates, lc.remoteChanCfg, + err = validateUpdates( + filteredView.theirUpdates, &lc.channelState.RemoteChanCfg, ) if err != nil { return err @@ -3200,7 +3279,7 @@ func (lc *LightningChannel) validateCommitmentSanity(theirLogCounter, // Secondly check that our updates won't violate our channel // constraints. err = validateUpdates( - filteredView.ourUpdates, lc.localChanCfg, + filteredView.ourUpdates, &lc.channelState.LocalChanCfg, ) if err != nil { return err @@ -3225,6 +3304,14 @@ func (lc *LightningChannel) SignNextCommitment() (lnwire.Sig, []lnwire.Sig, []ch lc.Lock() defer lc.Unlock() + // Check for empty commit sig. This should never happen, but we don't + // dare to fail hard here. We assume peers can deal with the empty sig + // and continue channel operation. We log an error so that the bug + // causing this can be tracked down. + if !lc.oweCommitment(true) { + lc.log.Errorf("sending empty commit sig") + } + var ( sig lnwire.Sig htlcSigs []lnwire.Sig @@ -3249,7 +3336,7 @@ func (lc *LightningChannel) SignNextCommitment() (lnwire.Sig, []lnwire.Sig, []ch // party set up when we initially set up the channel. If we are, then // we'll abort this state transition. err := lc.validateCommitmentSanity( - remoteACKedIndex, lc.localUpdateLog.logIndex, true, nil, + remoteACKedIndex, lc.localUpdateLog.logIndex, true, nil, nil, ) if err != nil { return sig, htlcSigs, nil, err @@ -3259,8 +3346,8 @@ func (lc *LightningChannel) SignNextCommitment() (lnwire.Sig, []lnwire.Sig, []ch // used within fetchCommitmentView to derive all the keys necessary to // construct the commitment state. keyRing := DeriveCommitmentKeys( - commitPoint, false, lc.channelState.ChanType.IsTweakless(), - lc.localChanCfg, lc.remoteChanCfg, + commitPoint, false, lc.channelState.ChanType, + &lc.channelState.LocalChanCfg, &lc.channelState.RemoteChanCfg, ) // Create a new commitment view which will calculate the evaluated @@ -3278,14 +3365,14 @@ func (lc *LightningChannel) SignNextCommitment() (lnwire.Sig, []lnwire.Sig, []ch return sig, htlcSigs, nil, err } - walletLog.Tracef("ChannelPoint(%v): extending remote chain to height %v, "+ + lc.log.Tracef("extending remote chain to height %v, "+ "local_log=%v, remote_log=%v", - lc.channelState.FundingOutpoint, newCommitView.height, + newCommitView.height, lc.localUpdateLog.logIndex, remoteACKedIndex) - walletLog.Tracef("ChannelPoint(%v): remote chain: our_balance=%v, "+ + lc.log.Tracef("remote chain: our_balance=%v, "+ "their_balance=%v, commit_tx: %v", - lc.channelState.FundingOutpoint, newCommitView.ourBalance, + newCommitView.ourBalance, newCommitView.theirBalance, newLogClosure(func() string { return spew.Sdump(newCommitView.txn) @@ -3296,8 +3383,10 @@ func (lc *LightningChannel) SignNextCommitment() (lnwire.Sig, []lnwire.Sig, []ch // need to generate signatures of each of them for the remote party's // commitment state. We do so in two phases: first we generate and // submit the set of signature jobs to the worker pool. - sigBatch, cancelChan, err := genRemoteHtlcSigJobs(keyRing, - lc.localChanCfg, lc.remoteChanCfg, newCommitView, + sigBatch, cancelChan, err := genRemoteHtlcSigJobs( + keyRing, lc.channelState.ChanType, + &lc.channelState.LocalChanCfg, &lc.channelState.RemoteChanCfg, + newCommitView, ) if err != nil { return sig, htlcSigs, nil, err @@ -3313,7 +3402,7 @@ func (lc *LightningChannel) SignNextCommitment() (lnwire.Sig, []lnwire.Sig, []ch close(cancelChan) return sig, htlcSigs, nil, err } - sig, err = lnwire.NewSigFromRawSignature(rawSig) + sig, err = lnwire.NewSigFromSignature(rawSig) if err != nil { close(cancelChan) return sig, htlcSigs, nil, err @@ -3423,9 +3512,8 @@ func (lc *LightningChannel) ProcessChanSyncMsg( // In this case, we'll return an error to indicate the // remote node sent us the wrong values. This will let // the caller act accordingly. - walletLog.Errorf("ChannelPoint(%v), sync failed: "+ - "remote provided invalid commit secret!", - lc.channelState.FundingOutpoint) + lc.log.Errorf("sync failed: remote provided invalid " + + "commit secret!") return nil, nil, nil, ErrInvalidLastCommitSecret } } @@ -3455,15 +3543,12 @@ func (lc *LightningChannel) ProcessChanSyncMsg( // If their reported height for our local chain tail is ahead of our // view, then we're behind! case msg.RemoteCommitTailHeight > localTailHeight || isRestoredChan: - walletLog.Errorf("ChannelPoint(%v), sync failed with local "+ - "data loss: remote believes our tail height is %v, "+ - "while we have %v!", lc.channelState.FundingOutpoint, + lc.log.Errorf("sync failed with local data loss: remote "+ + "believes our tail height is %v, while we have %v!", msg.RemoteCommitTailHeight, localTailHeight) if isRestoredChan { - walletLog.Warnf("ChannelPoint(%v): detected restored "+ - "triggering DLP", - lc.channelState.FundingOutpoint) + lc.log.Warnf("detected restored triggering DLP") } // We must check that we had recovery options to ensure the @@ -3475,9 +3560,9 @@ func (lc *LightningChannel) ProcessChanSyncMsg( // doesn't support data loss protection. In either case // it is not safe for us to keep using the channel, so // we mark it borked and fail the channel. - walletLog.Errorf("ChannelPoint(%v), sync failed: "+ - "local data loss, but no recovery option.", - lc.channelState.FundingOutpoint) + lc.log.Errorf("sync failed: local data loss, but no " + + "recovery option.") + return nil, nil, nil, ErrCannotSyncCommitChains } @@ -3492,9 +3577,8 @@ func (lc *LightningChannel) ProcessChanSyncMsg( // is behind our view of the chain, then they probably lost some state, // and we'll force close the channel. case msg.RemoteCommitTailHeight+1 < localTailHeight: - walletLog.Errorf("ChannelPoint(%v), sync failed: remote "+ - "believes our tail height is %v, while we have %v!", - lc.channelState.FundingOutpoint, + lc.log.Errorf("sync failed: remote believes our tail height is "+ + "%v, while we have %v!", msg.RemoteCommitTailHeight, localTailHeight) return nil, nil, nil, ErrCommitSyncRemoteDataLoss @@ -3507,9 +3591,8 @@ func (lc *LightningChannel) ProcessChanSyncMsg( // this case we'll re-send the last revocation message that we sent. // This will be the revocation message for our prior chain tail. case msg.RemoteCommitTailHeight+1 == localTailHeight: - walletLog.Debugf("ChannelPoint(%v), sync: remote believes "+ - "our tail height is %v, while we have %v, we owe "+ - "them a revocation", lc.channelState.FundingOutpoint, + lc.log.Debugf("sync: remote believes our tail height is %v, "+ + "while we have %v, we owe them a revocation", msg.RemoteCommitTailHeight, localTailHeight) revocationMsg, err := lc.generateRevocation( @@ -3525,7 +3608,7 @@ func (lc *LightningChannel) ProcessChanSyncMsg( // but died before the signature was sent. We re-transmit our // revocation, but also initiate a state transition to re-sync // them. - if !lc.FullySynced() { + if lc.OweCommitment(true) { commitSig, htlcSigs, _, err := lc.SignNextCommitment() switch { @@ -3555,9 +3638,8 @@ func (lc *LightningChannel) ProcessChanSyncMsg( // There should be no other possible states. default: - walletLog.Errorf("ChannelPoint(%v), sync failed: remote "+ - "believes our tail height is %v, while we have %v!", - lc.channelState.FundingOutpoint, + lc.log.Errorf("sync failed: remote believes our tail height is "+ + "%v, while we have %v!", msg.RemoteCommitTailHeight, localTailHeight) return nil, nil, nil, ErrCannotSyncCommitChains } @@ -3572,18 +3654,16 @@ func (lc *LightningChannel) ProcessChanSyncMsg( // or not, we will fail the channel, but should not force close it // automatically. case msg.NextLocalCommitHeight > remoteTipHeight+1: - walletLog.Errorf("ChannelPoint(%v), sync failed: remote's "+ - "next commit height is %v, while we believe it is %v!", - lc.channelState.FundingOutpoint, + lc.log.Errorf("sync failed: remote's next commit height is %v, "+ + "while we believe it is %v!", msg.NextLocalCommitHeight, remoteTipHeight) return nil, nil, nil, ErrCannotSyncCommitChains // They are waiting for a state they have already ACKed. case msg.NextLocalCommitHeight <= remoteTailHeight: - walletLog.Errorf("ChannelPoint(%v), sync failed: remote's "+ - "next commit height is %v, while we believe it is %v!", - lc.channelState.FundingOutpoint, + lc.log.Errorf("sync failed: remote's next commit height is %v, "+ + "while we believe it is %v!", msg.NextLocalCommitHeight, remoteTipHeight) // They previously ACKed our current tail, and now they are @@ -3598,9 +3678,8 @@ func (lc *LightningChannel) ProcessChanSyncMsg( // re-send all the updates necessary to recreate this state, along // with the commit sig. case msg.NextLocalCommitHeight == remoteTipHeight: - walletLog.Debugf("ChannelPoint(%v), sync: remote's next "+ - "commit height is %v, while we believe it is %v, we "+ - "owe them a commitment", lc.channelState.FundingOutpoint, + lc.log.Debugf("sync: remote's next commit height is %v, while "+ + "we believe it is %v, we owe them a commitment", msg.NextLocalCommitHeight, remoteTipHeight) // Grab the current remote chain tip from the database. This @@ -3629,9 +3708,8 @@ func (lc *LightningChannel) ProcessChanSyncMsg( // can have at most two elements. If that's the case, something is // wrong. default: - walletLog.Errorf("ChannelPoint(%v), sync failed: remote's "+ - "next commit height is %v, while we believe it is %v!", - lc.channelState.FundingOutpoint, + lc.log.Errorf("sync failed: remote's next commit height is %v, "+ + "while we believe it is %v!", msg.NextLocalCommitHeight, remoteTipHeight) return nil, nil, nil, ErrCannotSyncCommitChains } @@ -3669,9 +3747,8 @@ func (lc *LightningChannel) ProcessChanSyncMsg( if !tweakless && commitPoint != nil && !commitPoint.IsEqual(msg.LocalUnrevokedCommitPoint) { - walletLog.Errorf("ChannelPoint(%v), sync failed: remote "+ - "sent invalid commit point for height %v!", - lc.channelState.FundingOutpoint, + lc.log.Errorf("sync failed: remote sent invalid commit point "+ + "for height %v!", msg.NextLocalCommitHeight) return nil, nil, nil, ErrInvalidLocalUnrevokedCommitPoint } @@ -3689,13 +3766,13 @@ func (lc *LightningChannel) ProcessChanSyncMsg( // HTLCs will be set to the next commitment height. func (lc *LightningChannel) computeView(view *htlcView, remoteChain bool, updateState bool) (lnwire.MilliSatoshi, lnwire.MilliSatoshi, int64, - *htlcView) { + *htlcView, error) { commitChain := lc.localCommitChain - dustLimit := lc.localChanCfg.DustLimit + dustLimit := lc.channelState.LocalChanCfg.DustLimit if remoteChain { commitChain = lc.remoteCommitChain - dustLimit = lc.remoteChanCfg.DustLimit + dustLimit = lc.channelState.RemoteChanCfg.DustLimit } // Since the fetched htlc view will include all updates added after the @@ -3728,32 +3805,40 @@ func (lc *LightningChannel) computeView(view *htlcView, remoteChain bool, // channel constraints to the final commitment state. If any fee // updates are found in the logs, the commitment fee rate should be // changed, so we'll also set the feePerKw to this new value. - filteredHTLCView := lc.evaluateHTLCView(view, &ourBalance, + filteredHTLCView, err := lc.evaluateHTLCView(view, &ourBalance, &theirBalance, nextHeight, remoteChain, updateState) + if err != nil { + return 0, 0, 0, nil, err + } feePerKw := filteredHTLCView.feePerKw // Now go through all HTLCs at this stage, to calculate the total // weight, needed to calculate the transaction fee. var totalHtlcWeight int64 for _, htlc := range filteredHTLCView.ourUpdates { - if htlcIsDust(remoteChain, !remoteChain, feePerKw, - htlc.Amount.ToSatoshis(), dustLimit) { + if htlcIsDust( + lc.channelState.ChanType, remoteChain, !remoteChain, + feePerKw, htlc.Amount.ToSatoshis(), dustLimit, + ) { continue } - totalHtlcWeight += input.HtlcWeight + totalHtlcWeight += input.HTLCWeight } for _, htlc := range filteredHTLCView.theirUpdates { - if htlcIsDust(!remoteChain, !remoteChain, feePerKw, - htlc.Amount.ToSatoshis(), dustLimit) { + if htlcIsDust( + lc.channelState.ChanType, !remoteChain, !remoteChain, + feePerKw, htlc.Amount.ToSatoshis(), dustLimit, + ) { continue } - totalHtlcWeight += input.HtlcWeight + totalHtlcWeight += input.HTLCWeight } - totalCommitWeight := input.CommitWeight + totalHtlcWeight - return ourBalance, theirBalance, totalCommitWeight, filteredHTLCView + totalCommitWeight := CommitWeight(lc.channelState.ChanType) + + totalHtlcWeight + return ourBalance, theirBalance, totalCommitWeight, filteredHTLCView, nil } // genHtlcSigValidationJobs generates a series of signatures verification jobs @@ -3762,10 +3847,12 @@ func (lc *LightningChannel) computeView(view *htlcView, remoteChain bool, // directly into the pool of workers. func genHtlcSigValidationJobs(localCommitmentView *commitment, keyRing *CommitmentKeyRing, htlcSigs []lnwire.Sig, + chanType channeldb.ChannelType, localChanCfg, remoteChanCfg *channeldb.ChannelConfig) ([]VerifyJob, error) { txHash := localCommitmentView.txn.TxHash() feePerKw := localCommitmentView.feePerKw + sigHashType := HtlcSigHashType(chanType) // With the required state generated, we'll create a slice with large // enough capacity to hold verification jobs for all HTLC's in this @@ -3805,12 +3892,14 @@ func genHtlcSigValidationJobs(localCommitmentView *commitment, Index: uint32(htlc.localOutputIndex), } - htlcFee := htlcSuccessFee(feePerKw) + htlcFee := HtlcSuccessFee(chanType, feePerKw) outputAmt := htlc.Amount.ToSatoshis() - htlcFee - successTx, err := createHtlcSuccessTx(op, - outputAmt, uint32(localChanCfg.CsvDelay), - keyRing.RevocationKey, keyRing.DelayKey) + successTx, err := createHtlcSuccessTx( + chanType, op, outputAmt, + uint32(localChanCfg.CsvDelay), + keyRing.RevocationKey, keyRing.ToLocalKey, + ) if err != nil { return nil, err } @@ -3818,7 +3907,7 @@ func genHtlcSigValidationJobs(localCommitmentView *commitment, hashCache := txscript.NewTxSigHashes(successTx) sigHash, err := txscript.CalcWitnessSigHash( htlc.ourWitnessScript, hashCache, - txscript.SigHashAll, successTx, 0, + sigHashType, successTx, 0, int64(htlc.Amount.ToSatoshis()), ) if err != nil { @@ -3857,13 +3946,13 @@ func genHtlcSigValidationJobs(localCommitmentView *commitment, Index: uint32(htlc.localOutputIndex), } - htlcFee := htlcTimeoutFee(feePerKw) + htlcFee := HtlcTimeoutFee(chanType, feePerKw) outputAmt := htlc.Amount.ToSatoshis() - htlcFee - timeoutTx, err := createHtlcTimeoutTx(op, - outputAmt, htlc.Timeout, + timeoutTx, err := createHtlcTimeoutTx( + chanType, op, outputAmt, htlc.Timeout, uint32(localChanCfg.CsvDelay), - keyRing.RevocationKey, keyRing.DelayKey, + keyRing.RevocationKey, keyRing.ToLocalKey, ) if err != nil { return nil, err @@ -3872,7 +3961,7 @@ func genHtlcSigValidationJobs(localCommitmentView *commitment, hashCache := txscript.NewTxSigHashes(timeoutTx) sigHash, err := txscript.CalcWitnessSigHash( htlc.ourWitnessScript, hashCache, - txscript.SigHashAll, timeoutTx, 0, + sigHashType, timeoutTx, 0, int64(htlc.Amount.ToSatoshis()), ) if err != nil { @@ -3991,6 +4080,18 @@ func (lc *LightningChannel) ReceiveNewCommitment(commitSig lnwire.Sig, lc.Lock() defer lc.Unlock() + // Check for empty commit sig. Because of a previously existing bug, it + // is possible that we receive an empty commit sig from nodes running an + // older version. This is a relaxation of the spec, but it is still + // possible to handle it. To not break any channels with those older + // nodes, we just log the event. This check is also not totally + // reliable, because it could be that we've sent out a new sig, but the + // remote hasn't received it yet. We could then falsely assume that they + // should add our updates to their remote commitment tx. + if !lc.oweCommitment(false) { + lc.log.Warnf("empty commit sig message received") + } + // Determine the last update on the local log that has been locked in. localACKedIndex := lc.remoteCommitChain.tail().ourMessageIndex localHtlcIndex := lc.remoteCommitChain.tail().ourHtlcIndex @@ -3999,7 +4100,7 @@ func (lc *LightningChannel) ReceiveNewCommitment(commitSig lnwire.Sig, // the constraints we specified during initial channel setup. If not, // then we'll abort the channel as they've violated our constraints. err := lc.validateCommitmentSanity( - lc.remoteUpdateLog.logIndex, localACKedIndex, false, nil, + lc.remoteUpdateLog.logIndex, localACKedIndex, false, nil, nil, ) if err != nil { return err @@ -4016,8 +4117,8 @@ func (lc *LightningChannel) ReceiveNewCommitment(commitSig lnwire.Sig, } commitPoint := input.ComputeCommitmentPoint(commitSecret[:]) keyRing := DeriveCommitmentKeys( - commitPoint, true, lc.channelState.ChanType.IsTweakless(), - lc.localChanCfg, lc.remoteChanCfg, + commitPoint, true, lc.channelState.ChanType, + &lc.channelState.LocalChanCfg, &lc.channelState.RemoteChanCfg, ) // With the current commitment point re-calculated, construct the new @@ -4033,13 +4134,13 @@ func (lc *LightningChannel) ReceiveNewCommitment(commitSig lnwire.Sig, return err } - walletLog.Tracef("ChannelPoint(%v): extending local chain to height %v, "+ + lc.log.Tracef("extending local chain to height %v, "+ "local_log=%v, remote_log=%v", - lc.channelState.FundingOutpoint, localCommitmentView.height, + localCommitmentView.height, localACKedIndex, lc.remoteUpdateLog.logIndex) - walletLog.Tracef("ChannelPoint(%v): local chain: our_balance=%v, "+ - "their_balance=%v, commit_tx: %v", lc.channelState.FundingOutpoint, + lc.log.Tracef("local chain: our_balance=%v, "+ + "their_balance=%v, commit_tx: %v", localCommitmentView.ourBalance, localCommitmentView.theirBalance, newLogClosure(func() string { return spew.Sdump(localCommitmentView.txn) @@ -4065,8 +4166,9 @@ func (lc *LightningChannel) ReceiveNewCommitment(commitSig lnwire.Sig, // pool to verify each of the HTLc signatures presented. Once // generated, we'll submit these jobs to the worker pool. verifyJobs, err := genHtlcSigValidationJobs( - localCommitmentView, keyRing, htlcSigs, lc.localChanCfg, - lc.remoteChanCfg, + localCommitmentView, keyRing, htlcSigs, + lc.channelState.ChanType, &lc.channelState.LocalChanCfg, + &lc.channelState.RemoteChanCfg, ) if err != nil { return err @@ -4079,8 +4181,8 @@ func (lc *LightningChannel) ReceiveNewCommitment(commitSig lnwire.Sig, // we'll ensure that the newly constructed commitment state has a valid // signature. verifyKey := btcec.PublicKey{ - X: lc.remoteChanCfg.MultiSigKey.PubKey.X, - Y: lc.remoteChanCfg.MultiSigKey.PubKey.Y, + X: lc.channelState.RemoteChanCfg.MultiSigKey.PubKey.X, + Y: lc.channelState.RemoteChanCfg.MultiSigKey.PubKey.Y, Curve: btcec.S256(), } cSig, err := commitSig.ToSignature() @@ -4144,24 +4246,80 @@ func (lc *LightningChannel) ReceiveNewCommitment(commitSig lnwire.Sig, return nil } -// FullySynced returns a boolean value reflecting if both commitment chains -// (remote+local) are fully in sync. Both commitment chains are fully in sync -// if the tip of each chain includes the latest committed changes from both -// sides. -func (lc *LightningChannel) FullySynced() bool { +// OweCommitment returns a boolean value reflecting whether we need to send +// out a commitment signature because there are outstanding local updates and/or +// updates in the local commit tx that aren't reflected in the remote commit tx +// yet. +func (lc *LightningChannel) OweCommitment(local bool) bool { lc.RLock() defer lc.RUnlock() - lastLocalCommit := lc.localCommitChain.tip() - lastRemoteCommit := lc.remoteCommitChain.tip() + return lc.oweCommitment(local) +} + +// oweCommitment is the internal version of OweCommitment. This function expects +// to be executed with a lock held. +func (lc *LightningChannel) oweCommitment(local bool) bool { + var ( + remoteUpdatesPending, localUpdatesPending bool + + lastLocalCommit = lc.localCommitChain.tip() + lastRemoteCommit = lc.remoteCommitChain.tip() + + perspective string + ) + + if local { + perspective = "local" + + // There are local updates pending if our local update log is + // not in sync with our remote commitment tx. + localUpdatesPending = lc.localUpdateLog.logIndex != + lastRemoteCommit.ourMessageIndex - localUpdatesSynced := (lastLocalCommit.ourMessageIndex == - lastRemoteCommit.ourMessageIndex) + // There are remote updates pending if their remote commitment + // tx (our local commitment tx) contains updates that we don't + // have added to our remote commitment tx yet. + remoteUpdatesPending = lastLocalCommit.theirMessageIndex != + lastRemoteCommit.theirMessageIndex + + } else { + perspective = "remote" + + // There are local updates pending (local updates from the + // perspective of the remote party) if the remote party has + // updates to their remote tx pending for which they haven't + // signed yet. + localUpdatesPending = lc.remoteUpdateLog.logIndex != + lastLocalCommit.theirMessageIndex + + // There are remote updates pending (remote updates from the + // perspective of the remote party) if we have updates on our + // remote commitment tx that they haven't added to theirs yet. + remoteUpdatesPending = lastRemoteCommit.ourMessageIndex != + lastLocalCommit.ourMessageIndex + } + + // If any of the conditions above is true, we owe a commitment + // signature. + oweCommitment := localUpdatesPending || remoteUpdatesPending - remoteUpdatesSynced := (lastLocalCommit.theirMessageIndex == - lastRemoteCommit.theirMessageIndex) + lc.log.Tracef("%v owes commit: %v (local updates: %v, "+ + "remote updates %v)", perspective, oweCommitment, + localUpdatesPending, remoteUpdatesPending) - return localUpdatesSynced && remoteUpdatesSynced + return oweCommitment +} + +// PendingLocalUpdateCount returns the number of local updates that still need +// to be applied to the remote commitment tx. +func (lc *LightningChannel) PendingLocalUpdateCount() uint64 { + lc.RLock() + defer lc.RUnlock() + + lastRemoteCommit := lc.remoteCommitChain.tip() + + return lc.localUpdateLog.logIndex - lastRemoteCommit.ourMessageIndex } // RevokeCurrentCommitment revokes the next lowest unrevoked commitment @@ -4181,8 +4339,8 @@ func (lc *LightningChannel) RevokeCurrentCommitment() (*lnwire.RevokeAndAck, []c return nil, nil, err } - walletLog.Tracef("ChannelPoint(%v): revoking height=%v, now at height=%v", - lc.channelState.FundingOutpoint, lc.localCommitChain.tail().height, + lc.log.Tracef("revoking height=%v, now at height=%v", + lc.localCommitChain.tail().height, lc.currentHeight+1) // Advance our tail, as we've revoked our previous state. @@ -4193,15 +4351,24 @@ func (lc *LightningChannel) RevokeCurrentCommitment() (*lnwire.RevokeAndAck, []c // persistent storage. chainTail := lc.localCommitChain.tail() newCommitment := chainTail.toDiskCommit(true) - err = lc.channelState.UpdateCommitment(newCommitment) + + // Get the unsigned acked remotes updates that are currently in memory. + // We need them after a restart to sync our remote commitment with what + // is committed locally. + unsignedAckedUpdates := lc.getUnsignedAckedUpdates() + + err = lc.channelState.UpdateCommitment( + newCommitment, unsignedAckedUpdates, + ) if err != nil { return nil, nil, err } - walletLog.Tracef("ChannelPoint(%v): state transition accepted: "+ - "our_balance=%v, their_balance=%v", - lc.channelState.FundingOutpoint, chainTail.ourBalance, - chainTail.theirBalance) + lc.log.Tracef("state transition accepted: "+ + "our_balance=%v, their_balance=%v, unsigned_acked_updates=%v", + chainTail.ourBalance, + chainTail.theirBalance, + len(unsignedAckedUpdates)) revocationMsg.ChanID = lnwire.NewChanIDFromOutPoint( &lc.channelState.FundingOutpoint, @@ -4260,8 +4427,8 @@ func (lc *LightningChannel) ReceiveRevocation(revMsg *lnwire.RevokeAndAck) ( lc.channelState.RemoteCurrentRevocation = lc.channelState.RemoteNextRevocation lc.channelState.RemoteNextRevocation = revMsg.NextRevocationKey - walletLog.Tracef("ChannelPoint(%v): remote party accepted state transition, "+ - "revoked height %v, now at %v", lc.channelState.FundingOutpoint, + lc.log.Tracef("remote party accepted state transition, revoked height "+ + "%v, now at %v", lc.remoteCommitChain.tail().height, lc.remoteCommitChain.tail().height+1) @@ -4509,6 +4676,12 @@ func (lc *LightningChannel) InitNextRevocation(revKey *btcec.PublicKey) error { // The additional openKey argument corresponds to the incoming CircuitKey of the // committed circuit for this HTLC. This value should never be nil. // +// Note that AddHTLC doesn't reserve the HTLC fee for future payment (like +// AvailableBalance does), so one could get into the "stuck channel" state by +// sending dust HTLCs. +// TODO(halseth): fix this either by using additional reserve, or better commit +// format. See https://github.com/lightningnetwork/lightning-rfc/issues/728 +// // NOTE: It is okay for sourceRef to be nil when unit testing the wallet. func (lc *LightningChannel) AddHTLC(htlc *lnwire.UpdateAddHTLC, openKey *channeldb.CircuitKey) (uint64, error) { @@ -4528,10 +4701,26 @@ func (lc *LightningChannel) AddHTLC(htlc *lnwire.UpdateAddHTLC, } // Make sure adding this HTLC won't violate any of the constraints we - // must keep on our commitment transaction. + // must keep on the commitment transactions. remoteACKedIndex := lc.localCommitChain.tail().theirMessageIndex + + // First we'll check whether this HTLC can be added to the remote + // commitment transaction without violation any of the constraints. err := lc.validateCommitmentSanity( - remoteACKedIndex, lc.localUpdateLog.logIndex, true, pd, + remoteACKedIndex, lc.localUpdateLog.logIndex, true, pd, nil, + ) + if err != nil { + return 0, err + } + + // We must also check whether it can be added to our own commitment + // transaction, or the remote node will refuse to sign. This is not + // totally bullet proof, as the remote might be adding updates + // concurrently, but if we fail this check there is for sure not + // possible for us to add the HTLC. + err = lc.validateCommitmentSanity( + lc.remoteUpdateLog.logIndex, lc.localUpdateLog.logIndex, + false, pd, nil, ) if err != nil { return 0, err @@ -4564,6 +4753,17 @@ func (lc *LightningChannel) ReceiveHTLC(htlc *lnwire.UpdateAddHTLC) (uint64, err OnionBlob: htlc.OnionBlob[:], } + localACKedIndex := lc.remoteCommitChain.tail().ourMessageIndex + + // Clamp down on the number of HTLC's we can receive by checking the + // commitment sanity. + err := lc.validateCommitmentSanity( + lc.remoteUpdateLog.logIndex, localACKedIndex, false, nil, pd, + ) + if err != nil { + return 0, err + } + lc.remoteUpdateLog.appendHtlc(pd) return pd.HtlcIndex, nil @@ -4846,99 +5046,16 @@ func (lc *LightningChannel) ShortChanID() lnwire.ShortChannelID { return lc.channelState.ShortChanID() } -// genHtlcScript generates the proper P2WSH public key scripts for the HTLC -// output modified by two-bits denoting if this is an incoming HTLC, and if the -// HTLC is being applied to their commitment transaction or ours. -func genHtlcScript(isIncoming, ourCommit bool, timeout uint32, rHash [32]byte, - keyRing *CommitmentKeyRing) ([]byte, []byte, error) { - - var ( - witnessScript []byte - err error - ) - - // Generate the proper redeem scripts for the HTLC output modified by - // two-bits denoting if this is an incoming HTLC, and if the HTLC is - // being applied to their commitment transaction or ours. - switch { - // The HTLC is paying to us, and being applied to our commitment - // transaction. So we need to use the receiver's version of HTLC the - // script. - case isIncoming && ourCommit: - witnessScript, err = input.ReceiverHTLCScript(timeout, - keyRing.RemoteHtlcKey, keyRing.LocalHtlcKey, - keyRing.RevocationKey, rHash[:]) - - // We're being paid via an HTLC by the remote party, and the HTLC is - // being added to their commitment transaction, so we use the sender's - // version of the HTLC script. - case isIncoming && !ourCommit: - witnessScript, err = input.SenderHTLCScript(keyRing.RemoteHtlcKey, - keyRing.LocalHtlcKey, keyRing.RevocationKey, rHash[:]) - - // We're sending an HTLC which is being added to our commitment - // transaction. Therefore, we need to use the sender's version of the - // HTLC script. - case !isIncoming && ourCommit: - witnessScript, err = input.SenderHTLCScript(keyRing.LocalHtlcKey, - keyRing.RemoteHtlcKey, keyRing.RevocationKey, rHash[:]) - - // Finally, we're paying the remote party via an HTLC, which is being - // added to their commitment transaction. Therefore, we use the - // receiver's version of the HTLC script. - case !isIncoming && !ourCommit: - witnessScript, err = input.ReceiverHTLCScript(timeout, keyRing.LocalHtlcKey, - keyRing.RemoteHtlcKey, keyRing.RevocationKey, rHash[:]) - } - if err != nil { - return nil, nil, err - } - - // Now that we have the redeem scripts, create the P2WSH public key - // script for the output itself. - htlcP2WSH, err := input.WitnessScriptHash(witnessScript) - if err != nil { - return nil, nil, err - } - - return htlcP2WSH, witnessScript, nil +// LocalUpfrontShutdownScript returns the local upfront shutdown script for the +// channel. If it was not set, an empty byte array is returned. +func (lc *LightningChannel) LocalUpfrontShutdownScript() lnwire.DeliveryAddress { + return lc.channelState.LocalShutdownScript } -// addHTLC adds a new HTLC to the passed commitment transaction. One of four -// full scripts will be generated for the HTLC output depending on if the HTLC -// is incoming and if it's being applied to our commitment transaction or that -// of the remote node's. Additionally, in order to be able to efficiently -// locate the added HTLC on the commitment transaction from the -// PaymentDescriptor that generated it, the generated script is stored within -// the descriptor itself. -func (lc *LightningChannel) addHTLC(commitTx *wire.MsgTx, ourCommit bool, - isIncoming bool, paymentDesc *PaymentDescriptor, - keyRing *CommitmentKeyRing) error { - - timeout := paymentDesc.Timeout - rHash := paymentDesc.RHash - - p2wsh, witnessScript, err := genHtlcScript(isIncoming, ourCommit, - timeout, rHash, keyRing) - if err != nil { - return err - } - - // Add the new HTLC outputs to the respective commitment transactions. - amountPending := int64(paymentDesc.Amount.ToSatoshis()) - commitTx.AddTxOut(wire.NewTxOut(amountPending, p2wsh)) - - // Store the pkScript of this particular PaymentDescriptor so we can - // quickly locate it within the commitment transaction later. - if ourCommit { - paymentDesc.ourPkScript = p2wsh - paymentDesc.ourWitnessScript = witnessScript - } else { - paymentDesc.theirPkScript = p2wsh - paymentDesc.theirWitnessScript = witnessScript - } - - return nil +// RemoteUpfrontShutdownScript returns the remote upfront shutdown script for the +// channel. If it was not set, an empty byte array is returned. +func (lc *LightningChannel) RemoteUpfrontShutdownScript() lnwire.DeliveryAddress { + return lc.channelState.RemoteShutdownScript } // getSignedCommitTx function take the latest commitment transaction and @@ -4948,22 +5065,28 @@ func (lc *LightningChannel) getSignedCommitTx() (*wire.MsgTx, error) { // for the transaction. localCommit := lc.channelState.LocalCommitment commitTx := localCommit.CommitTx.Copy() - theirSig := append(localCommit.CommitSig, byte(txscript.SigHashAll)) + + theirSig, err := btcec.ParseDERSignature( + localCommit.CommitSig, btcec.S256(), + ) + if err != nil { + return nil, err + } // With this, we then generate the full witness so the caller can // broadcast a fully signed transaction. lc.signDesc.SigHashes = txscript.NewTxSigHashes(commitTx) - ourSigRaw, err := lc.Signer.SignOutputRaw(commitTx, lc.signDesc) + ourSig, err := lc.Signer.SignOutputRaw(commitTx, lc.signDesc) if err != nil { return nil, err } - ourSig := append(ourSigRaw, byte(txscript.SigHashAll)) - // With the final signature generated, create the witness stack // required to spend from the multi-sig output. - ourKey := lc.localChanCfg.MultiSigKey.PubKey.SerializeCompressed() - theirKey := lc.remoteChanCfg.MultiSigKey.PubKey.SerializeCompressed() + ourKey := lc.channelState.LocalChanCfg.MultiSigKey.PubKey. + SerializeCompressed() + theirKey := lc.channelState.RemoteChanCfg.MultiSigKey.PubKey. + SerializeCompressed() commitTx.TxIn[0].Witness = input.SpendMultiSig( lc.signDesc.WitnessScript, ourKey, @@ -4974,8 +5097,8 @@ func (lc *LightningChannel) getSignedCommitTx() (*wire.MsgTx, error) { } // CommitOutputResolution carries the necessary information required to allow -// us to sweep our direct commitment output in the case that either party goes -// to chain. +// us to sweep our commitment output in the case that either party goes to +// chain. type CommitOutputResolution struct { // SelfOutPoint is the full outpoint that points to out pay-to-self // output within the closing commitment transaction. @@ -4987,8 +5110,7 @@ type CommitOutputResolution struct { // MaturityDelay is the relative time-lock, in blocks for all outputs // that pay to the local party within the broadcast commitment - // transaction. This value will be non-zero iff, this output was on our - // commitment transaction. + // transaction. MaturityDelay uint32 } @@ -5026,6 +5148,11 @@ type UnilateralCloseSummary struct { // RemoteCommit is the exact commitment state that the remote party // broadcast. RemoteCommit channeldb.ChannelCommitment + + // AnchorResolution contains the data required to sweep our anchor + // output. If the channel type doesn't include anchors, the value of + // this field will be nil. + AnchorResolution *AnchorResolution } // NewUnilateralCloseSummary creates a new summary that provides the caller @@ -5046,18 +5173,18 @@ func NewUnilateralCloseSummary(chanState *channeldb.OpenChannel, signer input.Si // First, we'll generate the commitment point and the revocation point // so we can re-construct the HTLC state and also our payment key. - tweaklessCommit := chanState.ChanType.IsTweakless() keyRing := DeriveCommitmentKeys( - commitPoint, false, tweaklessCommit, &chanState.LocalChanCfg, - &chanState.RemoteChanCfg, + commitPoint, false, chanState.ChanType, + &chanState.LocalChanCfg, &chanState.RemoteChanCfg, ) // Next, we'll obtain HTLC resolutions for all the outgoing HTLC's we // had on their commitment transaction. htlcResolutions, err := extractHtlcResolutions( - SatPerKWeight(remoteCommit.FeePerKw), false, signer, + chainfee.SatPerKWeight(remoteCommit.FeePerKw), false, signer, remoteCommit.Htlcs, keyRing, &chanState.LocalChanCfg, &chanState.RemoteChanCfg, *commitSpend.SpenderTxHash, + chanState.ChanType, ) if err != nil { return nil, fmt.Errorf("unable to create htlc "+ @@ -5069,7 +5196,9 @@ func NewUnilateralCloseSummary(chanState *channeldb.OpenChannel, signer input.Si // Before we can generate the proper sign descriptor, we'll need to // locate the output index of our non-delayed output on the commitment // transaction. - selfP2WKH, err := input.CommitScriptUnencumbered(keyRing.NoDelayKey) + selfScript, maturityDelay, err := CommitScriptToRemote( + chanState.ChanType, keyRing.ToRemoteKey, + ) if err != nil { return nil, fmt.Errorf("unable to create self commit "+ "script: %v", err) @@ -5081,7 +5210,7 @@ func NewUnilateralCloseSummary(chanState *channeldb.OpenChannel, signer input.Si ) for outputIndex, txOut := range commitTxBroadcast.TxOut { - if bytes.Equal(txOut.PkScript, selfP2WKH) { + if bytes.Equal(txOut.PkScript, selfScript.PkScript) { selfPoint = &wire.OutPoint{ Hash: *commitSpend.SpenderTxHash, Index: uint32(outputIndex), @@ -5102,20 +5231,14 @@ func NewUnilateralCloseSummary(chanState *channeldb.OpenChannel, signer input.Si SelfOutputSignDesc: input.SignDescriptor{ KeyDesc: localPayBase, SingleTweak: keyRing.LocalCommitKeyTweak, - WitnessScript: selfP2WKH, + WitnessScript: selfScript.WitnessScript, Output: &wire.TxOut{ Value: localBalance, - PkScript: selfP2WKH, + PkScript: selfScript.PkScript, }, HashType: txscript.SigHashAll, }, - MaturityDelay: 0, - } - - // If this is a tweakless commitment, then we can safely blank - // out the SingleTweak value as it isn't needed. - if tweaklessCommit { - commitResolution.SelfOutputSignDesc.SingleTweak = nil + MaturityDelay: maturityDelay, } } @@ -5144,12 +5267,20 @@ func NewUnilateralCloseSummary(chanState *channeldb.OpenChannel, signer input.Si closeSummary.LastChanSyncMsg = chanSync } + anchorResolution, err := NewAnchorResolution( + chanState, commitTxBroadcast, + ) + if err != nil { + return nil, err + } + return &UnilateralCloseSummary{ SpendDetail: commitSpend, ChannelCloseSummary: closeSummary, CommitResolution: commitResolution, HtlcResolutions: htlcResolutions, RemoteCommit: remoteCommit, + AnchorResolution: anchorResolution, }, nil } @@ -5179,7 +5310,8 @@ type IncomingHtlcResolution struct { // pass after the SignedSuccessTx is confirmed in the chain before the // output can be swept. // - // NOTE: If SignedSuccessTx is nil, then this field isn't needed. + // NOTE: If SignedTimeoutTx is nil, then this field denotes the CSV + // delay needed to spend from the commitment transaction. CsvDelay uint32 // ClaimOutpoint is the final outpoint that needs to be spent in order @@ -5219,7 +5351,8 @@ type OutgoingHtlcResolution struct { // pass after the SignedTimeoutTx is confirmed in the chain before the // output can be swept. // - // NOTE: If SignedTimeoutTx is nil, then this field isn't needed. + // NOTE: If SignedTimeoutTx is nil, then this field denotes the CSV + // delay needed to spend from the commitment transaction. CsvDelay uint32 // ClaimOutpoint is the final outpoint that needs to be spent in order @@ -5252,34 +5385,31 @@ type HtlcResolutions struct { // newOutgoingHtlcResolution generates a new HTLC resolution capable of // allowing the caller to sweep an outgoing HTLC present on either their, or // the remote party's commitment transaction. -func newOutgoingHtlcResolution(signer input.Signer, localChanCfg *channeldb.ChannelConfig, - commitHash chainhash.Hash, htlc *channeldb.HTLC, keyRing *CommitmentKeyRing, - feePerKw SatPerKWeight, dustLimit btcutil.Amount, csvDelay uint32, localCommit bool, -) (*OutgoingHtlcResolution, error) { +func newOutgoingHtlcResolution(signer input.Signer, + localChanCfg *channeldb.ChannelConfig, commitHash chainhash.Hash, + htlc *channeldb.HTLC, keyRing *CommitmentKeyRing, + feePerKw chainfee.SatPerKWeight, csvDelay uint32, + localCommit bool, chanType channeldb.ChannelType) (*OutgoingHtlcResolution, error) { op := wire.OutPoint{ Hash: commitHash, Index: uint32(htlc.OutputIndex), } + // First, we'll re-generate the script used to send the HTLC to + // the remote party within their commitment transaction. + htlcScriptHash, htlcScript, err := genHtlcScript( + chanType, false, localCommit, htlc.RefundTimeout, htlc.RHash, + keyRing, + ) + if err != nil { + return nil, err + } + // If we're spending this HTLC output from the remote node's // commitment, then we won't need to go to the second level as our // outputs don't have a CSV delay. if !localCommit { - // First, we'll re-generate the script used to send the HTLC to - // the remote party within their commitment transaction. - htlcReceiverScript, err := input.ReceiverHTLCScript(htlc.RefundTimeout, - keyRing.LocalHtlcKey, keyRing.RemoteHtlcKey, - keyRing.RevocationKey, htlc.RHash[:], - ) - if err != nil { - return nil, err - } - htlcScriptHash, err := input.WitnessScriptHash(htlcReceiverScript) - if err != nil { - return nil, err - } - // With the script generated, we can completely populated the // SignDescriptor needed to sweep the output. return &OutgoingHtlcResolution{ @@ -5288,13 +5418,14 @@ func newOutgoingHtlcResolution(signer input.Signer, localChanCfg *channeldb.Chan SweepSignDesc: input.SignDescriptor{ KeyDesc: localChanCfg.HtlcBasePoint, SingleTweak: keyRing.LocalHtlcKeyTweak, - WitnessScript: htlcReceiverScript, + WitnessScript: htlcScript, Output: &wire.TxOut{ PkScript: htlcScriptHash, Value: int64(htlc.Amt.ToSatoshis()), }, HashType: txscript.SigHashAll, }, + CsvDelay: HtlcSecondLevelInputSequence(chanType), }, nil } @@ -5304,14 +5435,14 @@ func newOutgoingHtlcResolution(signer input.Signer, localChanCfg *channeldb.Chan // In order to properly reconstruct the HTLC transaction, we'll need to // re-calculate the fee required at this state, so we can add the // correct output value amount to the transaction. - htlcFee := htlcTimeoutFee(feePerKw) + htlcFee := HtlcTimeoutFee(chanType, feePerKw) secondLevelOutputAmt := htlc.Amt.ToSatoshis() - htlcFee // With the fee calculated, re-construct the second level timeout // transaction. timeoutTx, err := createHtlcTimeoutTx( - op, secondLevelOutputAmt, htlc.RefundTimeout, csvDelay, - keyRing.RevocationKey, keyRing.DelayKey, + chanType, op, secondLevelOutputAmt, htlc.RefundTimeout, + csvDelay, keyRing.RevocationKey, keyRing.ToLocalKey, ) if err != nil { return nil, err @@ -5320,15 +5451,10 @@ func newOutgoingHtlcResolution(signer input.Signer, localChanCfg *channeldb.Chan // With the transaction created, we can generate a sign descriptor // that's capable of generating the signature required to spend the // HTLC output using the timeout transaction. - htlcCreationScript, err := input.SenderHTLCScript(keyRing.LocalHtlcKey, - keyRing.RemoteHtlcKey, keyRing.RevocationKey, htlc.RHash[:]) - if err != nil { - return nil, err - } timeoutSignDesc := input.SignDescriptor{ KeyDesc: localChanCfg.HtlcBasePoint, SingleTweak: keyRing.LocalHtlcKeyTweak, - WitnessScript: htlcCreationScript, + WitnessScript: htlcScript, Output: &wire.TxOut{ Value: int64(htlc.Amt.ToSatoshis()), }, @@ -5337,10 +5463,16 @@ func newOutgoingHtlcResolution(signer input.Signer, localChanCfg *channeldb.Chan InputIndex: 0, } + htlcSig, err := btcec.ParseDERSignature(htlc.Signature, btcec.S256()) + if err != nil { + return nil, err + } + // With the sign desc created, we can now construct the full witness // for the timeout transaction, and populate it as well. + sigHashType := HtlcSigHashType(chanType) timeoutWitness, err := input.SenderHtlcSpendTimeout( - htlc.Signature, signer, &timeoutSignDesc, timeoutTx, + htlcSig, sigHashType, signer, &timeoutSignDesc, timeoutTx, ) if err != nil { return nil, err @@ -5351,12 +5483,12 @@ func newOutgoingHtlcResolution(signer input.Signer, localChanCfg *channeldb.Chan // transaction creates so we can generate the signDesc required to // complete the claim process after a delay period. htlcSweepScript, err := input.SecondLevelHtlcScript( - keyRing.RevocationKey, keyRing.DelayKey, csvDelay, + keyRing.RevocationKey, keyRing.ToLocalKey, csvDelay, ) if err != nil { return nil, err } - htlcScriptHash, err := input.WitnessScriptHash(htlcSweepScript) + htlcSweepScriptHash, err := input.WitnessScriptHash(htlcSweepScript) if err != nil { return nil, err } @@ -5377,7 +5509,7 @@ func newOutgoingHtlcResolution(signer input.Signer, localChanCfg *channeldb.Chan SingleTweak: localDelayTweak, WitnessScript: htlcSweepScript, Output: &wire.TxOut{ - PkScript: htlcScriptHash, + PkScript: htlcSweepScriptHash, Value: int64(secondLevelOutputAmt), }, HashType: txscript.SigHashAll, @@ -5392,48 +5524,45 @@ func newOutgoingHtlcResolution(signer input.Signer, localChanCfg *channeldb.Chan // they can just sweep the output immediately with knowledge of the pre-image. // // TODO(roasbeef) consolidate code with above func -func newIncomingHtlcResolution(signer input.Signer, localChanCfg *channeldb.ChannelConfig, - commitHash chainhash.Hash, htlc *channeldb.HTLC, keyRing *CommitmentKeyRing, - feePerKw SatPerKWeight, dustLimit btcutil.Amount, csvDelay uint32, - localCommit bool) (*IncomingHtlcResolution, error) { +func newIncomingHtlcResolution(signer input.Signer, + localChanCfg *channeldb.ChannelConfig, commitHash chainhash.Hash, + htlc *channeldb.HTLC, keyRing *CommitmentKeyRing, + feePerKw chainfee.SatPerKWeight, csvDelay uint32, localCommit bool, + chanType channeldb.ChannelType) (*IncomingHtlcResolution, error) { op := wire.OutPoint{ Hash: commitHash, Index: uint32(htlc.OutputIndex), } + // First, we'll re-generate the script the remote party used to + // send the HTLC to us in their commitment transaction. + htlcScriptHash, htlcScript, err := genHtlcScript( + chanType, true, localCommit, htlc.RefundTimeout, htlc.RHash, + keyRing, + ) + if err != nil { + return nil, err + } + // If we're spending this output from the remote node's commitment, // then we can skip the second layer and spend the output directly. if !localCommit { - // First, we'll re-generate the script the remote party used to - // send the HTLC to us in their commitment transaction. - htlcSenderScript, err := input.SenderHTLCScript( - keyRing.RemoteHtlcKey, keyRing.LocalHtlcKey, - keyRing.RevocationKey, htlc.RHash[:], - ) - if err != nil { - return nil, err - } - htlcScriptHash, err := input.WitnessScriptHash(htlcSenderScript) - if err != nil { - return nil, err - } - // With the script generated, we can completely populated the // SignDescriptor needed to sweep the output. return &IncomingHtlcResolution{ ClaimOutpoint: op, - CsvDelay: csvDelay, SweepSignDesc: input.SignDescriptor{ KeyDesc: localChanCfg.HtlcBasePoint, SingleTweak: keyRing.LocalHtlcKeyTweak, - WitnessScript: htlcSenderScript, + WitnessScript: htlcScript, Output: &wire.TxOut{ PkScript: htlcScriptHash, Value: int64(htlc.Amt.ToSatoshis()), }, HashType: txscript.SigHashAll, }, + CsvDelay: HtlcSecondLevelInputSequence(chanType), }, nil } @@ -5441,11 +5570,11 @@ func newIncomingHtlcResolution(signer input.Signer, localChanCfg *channeldb.Chan // First, we'll reconstruct the original HTLC success transaction, // taking into account the fee rate used. - htlcFee := htlcSuccessFee(feePerKw) + htlcFee := HtlcSuccessFee(chanType, feePerKw) secondLevelOutputAmt := htlc.Amt.ToSatoshis() - htlcFee successTx, err := createHtlcSuccessTx( - op, secondLevelOutputAmt, csvDelay, - keyRing.RevocationKey, keyRing.DelayKey, + chanType, op, secondLevelOutputAmt, csvDelay, + keyRing.RevocationKey, keyRing.ToLocalKey, ) if err != nil { return nil, err @@ -5453,17 +5582,10 @@ func newIncomingHtlcResolution(signer input.Signer, localChanCfg *channeldb.Chan // Once we've created the second-level transaction, we'll generate the // SignDesc needed spend the HTLC output using the success transaction. - htlcCreationScript, err := input.ReceiverHTLCScript(htlc.RefundTimeout, - keyRing.RemoteHtlcKey, keyRing.LocalHtlcKey, - keyRing.RevocationKey, htlc.RHash[:], - ) - if err != nil { - return nil, err - } successSignDesc := input.SignDescriptor{ KeyDesc: localChanCfg.HtlcBasePoint, SingleTweak: keyRing.LocalHtlcKeyTweak, - WitnessScript: htlcCreationScript, + WitnessScript: htlcScript, Output: &wire.TxOut{ Value: int64(htlc.Amt.ToSatoshis()), }, @@ -5472,12 +5594,18 @@ func newIncomingHtlcResolution(signer input.Signer, localChanCfg *channeldb.Chan InputIndex: 0, } + htlcSig, err := btcec.ParseDERSignature(htlc.Signature, btcec.S256()) + if err != nil { + return nil, err + } + // Next, we'll construct the full witness needed to satisfy the input of // the success transaction. Don't specify the preimage yet. The preimage // will be supplied by the contract resolver, either directly or when it // becomes known. + sigHashType := HtlcSigHashType(chanType) successWitness, err := input.ReceiverHtlcSpendRedeem( - htlc.Signature, nil, signer, &successSignDesc, successTx, + htlcSig, sigHashType, nil, signer, &successSignDesc, successTx, ) if err != nil { return nil, err @@ -5488,12 +5616,12 @@ func newIncomingHtlcResolution(signer input.Signer, localChanCfg *channeldb.Chan // creates so we can generate the proper signDesc to sweep it after the // CSV delay has passed. htlcSweepScript, err := input.SecondLevelHtlcScript( - keyRing.RevocationKey, keyRing.DelayKey, csvDelay, + keyRing.RevocationKey, keyRing.ToLocalKey, csvDelay, ) if err != nil { return nil, err } - htlcScriptHash, err := input.WitnessScriptHash(htlcSweepScript) + htlcSweepScriptHash, err := input.WitnessScriptHash(htlcSweepScript) if err != nil { return nil, err } @@ -5513,7 +5641,7 @@ func newIncomingHtlcResolution(signer input.Signer, localChanCfg *channeldb.Chan SingleTweak: localDelayTweak, WitnessScript: htlcSweepScript, Output: &wire.TxOut{ - PkScript: htlcScriptHash, + PkScript: htlcSweepScriptHash, Value: int64(secondLevelOutputAmt), }, HashType: txscript.SigHashAll, @@ -5548,10 +5676,11 @@ func (r *OutgoingHtlcResolution) HtlcPoint() wire.OutPoint { // extractHtlcResolutions creates a series of outgoing HTLC resolutions, and // the local key used when generating the HTLC scrips. This function is to be // used in two cases: force close, or a unilateral close. -func extractHtlcResolutions(feePerKw SatPerKWeight, ourCommit bool, +func extractHtlcResolutions(feePerKw chainfee.SatPerKWeight, ourCommit bool, signer input.Signer, htlcs []channeldb.HTLC, keyRing *CommitmentKeyRing, localChanCfg, remoteChanCfg *channeldb.ChannelConfig, - commitHash chainhash.Hash) (*HtlcResolutions, error) { + commitHash chainhash.Hash, chanType channeldb.ChannelType) ( + *HtlcResolutions, error) { // TODO(roasbeef): don't need to swap csv delay? dustLimit := remoteChanCfg.DustLimit @@ -5564,11 +5693,15 @@ func extractHtlcResolutions(feePerKw SatPerKWeight, ourCommit bool, incomingResolutions := make([]IncomingHtlcResolution, 0, len(htlcs)) outgoingResolutions := make([]OutgoingHtlcResolution, 0, len(htlcs)) for _, htlc := range htlcs { + htlc := htlc + // We'll skip any HTLC's which were dust on the commitment // transaction, as these don't have a corresponding output // within the commitment transaction. - if htlcIsDust(htlc.Incoming, ourCommit, feePerKw, - htlc.Amt.ToSatoshis(), dustLimit) { + if htlcIsDust( + chanType, htlc.Incoming, ourCommit, feePerKw, + htlc.Amt.ToSatoshis(), dustLimit, + ) { continue } @@ -5578,8 +5711,9 @@ func extractHtlcResolutions(feePerKw SatPerKWeight, ourCommit bool, // Otherwise, we'll create an incoming HTLC resolution // as we can satisfy the contract. ihr, err := newIncomingHtlcResolution( - signer, localChanCfg, commitHash, &htlc, keyRing, - feePerKw, dustLimit, uint32(csvDelay), ourCommit, + signer, localChanCfg, commitHash, &htlc, + keyRing, feePerKw, uint32(csvDelay), ourCommit, + chanType, ) if err != nil { return nil, err @@ -5591,7 +5725,7 @@ func extractHtlcResolutions(feePerKw SatPerKWeight, ourCommit bool, ohr, err := newOutgoingHtlcResolution( signer, localChanCfg, commitHash, &htlc, keyRing, - feePerKw, dustLimit, uint32(csvDelay), ourCommit, + feePerKw, uint32(csvDelay), ourCommit, chanType, ) if err != nil { return nil, err @@ -5606,6 +5740,16 @@ func extractHtlcResolutions(feePerKw SatPerKWeight, ourCommit bool, }, nil } +// AnchorResolution holds the information necessary to spend our commitment tx +// anchor. +type AnchorResolution struct { + // AnchorSignDescriptor is the sign descriptor for our anchor. + AnchorSignDescriptor input.SignDescriptor + + // CommitAnchor is the anchor outpoint on the commit tx. + CommitAnchor wire.OutPoint +} + // LocalForceCloseSummary describes the final commitment state before the // channel is locked-down to initiate a force closure by broadcasting the // latest state on-chain. If we intend to broadcast this this state, the @@ -5638,6 +5782,11 @@ type LocalForceCloseSummary struct { // ChanSnapshot is a snapshot of the final state of the channel at the // time the summary was created. ChanSnapshot channeldb.ChannelSnapshot + + // AnchorResolution contains the data required to sweep the anchor + // output. If the channel type doesn't include anchors, the value of + // this field will be nil. + AnchorResolution *AnchorResolution } // ForceClose executes a unilateral closure of the transaction at the current @@ -5703,11 +5852,13 @@ func NewLocalForceCloseSummary(chanState *channeldb.OpenChannel, signer input.Si } commitPoint := input.ComputeCommitmentPoint(revocation[:]) keyRing := DeriveCommitmentKeys( - commitPoint, true, chanState.ChanType.IsTweakless(), + commitPoint, true, chanState.ChanType, &chanState.LocalChanCfg, &chanState.RemoteChanCfg, ) - selfScript, err := input.CommitScriptToSelf(csvTimeout, keyRing.DelayKey, - keyRing.RevocationKey) + + selfScript, err := input.CommitScriptToSelf( + csvTimeout, keyRing.ToLocalKey, keyRing.RevocationKey, + ) if err != nil { return nil, err } @@ -5741,9 +5892,6 @@ func NewLocalForceCloseSummary(chanState *channeldb.OpenChannel, signer input.Si // nil. var commitResolution *CommitOutputResolution if len(delayScript) != 0 { - singleTweak := input.SingleTweakBytes( - commitPoint, chanState.LocalChanCfg.DelayBasePoint.PubKey, - ) localBalance := localCommit.LocalBalance commitResolution = &CommitOutputResolution{ SelfOutPoint: wire.OutPoint{ @@ -5752,7 +5900,7 @@ func NewLocalForceCloseSummary(chanState *channeldb.OpenChannel, signer input.Si }, SelfOutputSignDesc: input.SignDescriptor{ KeyDesc: chanState.LocalChanCfg.DelayBasePoint, - SingleTweak: singleTweak, + SingleTweak: keyRing.LocalCommitKeyTweak, WitnessScript: selfScript, Output: &wire.TxOut{ PkScript: delayScript, @@ -5769,9 +5917,16 @@ func NewLocalForceCloseSummary(chanState *channeldb.OpenChannel, signer input.Si // outgoing HTLC's that we'll need to claim as well. txHash := commitTx.TxHash() htlcResolutions, err := extractHtlcResolutions( - SatPerKWeight(localCommit.FeePerKw), true, signer, + chainfee.SatPerKWeight(localCommit.FeePerKw), true, signer, localCommit.Htlcs, keyRing, &chanState.LocalChanCfg, - &chanState.RemoteChanCfg, txHash, + &chanState.RemoteChanCfg, txHash, chanState.ChanType, + ) + if err != nil { + return nil, err + } + + anchorResolution, err := NewAnchorResolution( + chanState, commitTx, ) if err != nil { return nil, err @@ -5783,6 +5938,7 @@ func NewLocalForceCloseSummary(chanState *channeldb.OpenChannel, signer input.Si CommitResolution: commitResolution, HtlcResolutions: htlcResolutions, ChanSnapshot: *chanState.Snapshot(), + AnchorResolution: anchorResolution, }, nil } @@ -5798,7 +5954,8 @@ func NewLocalForceCloseSummary(chanState *channeldb.OpenChannel, signer input.Si // settle any in flight. func (lc *LightningChannel) CreateCloseProposal(proposedFee btcutil.Amount, localDeliveryScript []byte, - remoteDeliveryScript []byte) ([]byte, *chainhash.Hash, btcutil.Amount, error) { + remoteDeliveryScript []byte) (input.Signature, *chainhash.Hash, + btcutil.Amount, error) { lc.Lock() defer lc.Unlock() @@ -5825,10 +5982,11 @@ func (lc *LightningChannel) CreateCloseProposal(proposedFee btcutil.Amount, theirBalance = theirBalance - proposedFee + commitFee } - closeTx := CreateCooperativeCloseTx(lc.fundingTxIn(), - lc.localChanCfg.DustLimit, lc.remoteChanCfg.DustLimit, - ourBalance, theirBalance, localDeliveryScript, - remoteDeliveryScript, lc.channelState.IsInitiator) + closeTx := CreateCooperativeCloseTx( + fundingTxIn(lc.channelState), lc.channelState.LocalChanCfg.DustLimit, + lc.channelState.RemoteChanCfg.DustLimit, ourBalance, theirBalance, + localDeliveryScript, remoteDeliveryScript, + ) // Ensure that the transaction doesn't explicitly violate any // consensus rules such as being too big, or having any value with a @@ -5863,7 +6021,8 @@ func (lc *LightningChannel) CreateCloseProposal(proposedFee btcutil.Amount, // // NOTE: The passed local and remote sigs are expected to be fully complete // signatures including the proper sighash byte. -func (lc *LightningChannel) CompleteCooperativeClose(localSig, remoteSig []byte, +func (lc *LightningChannel) CompleteCooperativeClose( + localSig, remoteSig input.Signature, localDeliveryScript, remoteDeliveryScript []byte, proposedFee btcutil.Amount) (*wire.MsgTx, btcutil.Amount, error) { @@ -5895,10 +6054,11 @@ func (lc *LightningChannel) CompleteCooperativeClose(localSig, remoteSig []byte, // Create the transaction used to return the current settled balance // on this active channel back to both parties. In this current model, // the initiator pays full fees for the cooperative close transaction. - closeTx := CreateCooperativeCloseTx(lc.fundingTxIn(), - lc.localChanCfg.DustLimit, lc.remoteChanCfg.DustLimit, - ourBalance, theirBalance, localDeliveryScript, - remoteDeliveryScript, lc.channelState.IsInitiator) + closeTx := CreateCooperativeCloseTx( + fundingTxIn(lc.channelState), lc.channelState.LocalChanCfg.DustLimit, + lc.channelState.RemoteChanCfg.DustLimit, ourBalance, theirBalance, + localDeliveryScript, remoteDeliveryScript, + ) // Ensure that the transaction doesn't explicitly validate any // consensus rules such as being too big, or having any value with a @@ -5911,10 +6071,14 @@ func (lc *LightningChannel) CompleteCooperativeClose(localSig, remoteSig []byte, // Finally, construct the witness stack minding the order of the // pubkeys+sigs on the stack. - ourKey := lc.localChanCfg.MultiSigKey.PubKey.SerializeCompressed() - theirKey := lc.remoteChanCfg.MultiSigKey.PubKey.SerializeCompressed() - witness := input.SpendMultiSig(lc.signDesc.WitnessScript, ourKey, - localSig, theirKey, remoteSig) + ourKey := lc.channelState.LocalChanCfg.MultiSigKey.PubKey. + SerializeCompressed() + theirKey := lc.channelState.RemoteChanCfg.MultiSigKey.PubKey. + SerializeCompressed() + witness := input.SpendMultiSig( + lc.signDesc.WitnessScript, ourKey, localSig, theirKey, + remoteSig, + ) closeTx.TxIn[0].Witness = witness // Validate the finalized transaction to ensure the output script is @@ -5937,11 +6101,117 @@ func (lc *LightningChannel) CompleteCooperativeClose(localSig, remoteSig []byte, return closeTx, ourBalance, nil } -// AvailableBalance returns the current available balance within the channel. -// By available balance, we mean that if at this very instance s new commitment -// were to be created which evals all the log entries, what would our available -// balance me. This method is useful when deciding if a given channel can -// accept an HTLC in the multi-hop forwarding scenario. +// NewAnchorResolutions returns the anchor resolutions for all currently valid +// commitment transactions. Because we have no view on the mempool, we can only +// blindly anchor all of these txes down. +func (lc *LightningChannel) NewAnchorResolutions() ([]*AnchorResolution, + error) { + + lc.Lock() + defer lc.Unlock() + + var resolutions []*AnchorResolution + + // Add anchor for local commitment tx, if any. + localRes, err := NewAnchorResolution( + lc.channelState, lc.channelState.LocalCommitment.CommitTx, + ) + if err != nil { + return nil, err + } + if localRes != nil { + resolutions = append(resolutions, localRes) + } + + // Add anchor for remote commitment tx, if any. + remoteRes, err := NewAnchorResolution( + lc.channelState, lc.channelState.RemoteCommitment.CommitTx, + ) + if err != nil { + return nil, err + } + if remoteRes != nil { + resolutions = append(resolutions, remoteRes) + } + + // Add anchor for remote pending commitment tx, if any. + remotePendingCommit, err := lc.channelState.RemoteCommitChainTip() + if err != nil && err != channeldb.ErrNoPendingCommit { + return nil, err + } + + if remotePendingCommit != nil { + remotePendingRes, err := NewAnchorResolution( + lc.channelState, + remotePendingCommit.Commitment.CommitTx, + ) + if err != nil { + return nil, err + } + + if remotePendingRes != nil { + resolutions = append(resolutions, remotePendingRes) + } + } + + return resolutions, nil +} + +// NewAnchorResolution returns the information that is required to sweep the +// local anchor. +func NewAnchorResolution(chanState *channeldb.OpenChannel, + commitTx *wire.MsgTx) (*AnchorResolution, error) { + + // Return nil resolution if the channel has no anchors. + if !chanState.ChanType.HasAnchors() { + return nil, nil + } + + // Derive our local anchor script. + localAnchor, _, err := CommitScriptAnchors( + &chanState.LocalChanCfg, &chanState.RemoteChanCfg, + ) + if err != nil { + return nil, err + } + + // Look up the script on the commitment transaction. It may not be + // present if there is no output paying to us. + found, index := input.FindScriptOutputIndex(commitTx, localAnchor.PkScript) + if !found { + return nil, nil + } + + outPoint := &wire.OutPoint{ + Hash: commitTx.TxHash(), + Index: index, + } + + // Instantiate the sign descriptor that allows sweeping of the anchor. + signDesc := &input.SignDescriptor{ + KeyDesc: chanState.LocalChanCfg.MultiSigKey, + WitnessScript: localAnchor.WitnessScript, + Output: &wire.TxOut{ + PkScript: localAnchor.PkScript, + Value: int64(anchorSize), + }, + HashType: txscript.SigHashAll, + } + + return &AnchorResolution{ + CommitAnchor: *outPoint, + AnchorSignDescriptor: *signDesc, + }, nil +} + +// AvailableBalance returns the current balance available for sending within +// the channel. By available balance, we mean that if at this very instance a +// new commitment were to be created which evals all the log entries, what +// would our available balance for adding an additional HTLC be. It takes into +// account the fee that must be paid for adding this HTLC (if we're the +// initiator), and that we cannot spend from the channel reserve. This method +// is useful when deciding if a given channel can accept an HTLC in the +// multi-hop forwarding scenario. func (lc *LightningChannel) AvailableBalance() lnwire.MilliSatoshi { lc.RLock() defer lc.RUnlock() @@ -5961,15 +6231,132 @@ func (lc *LightningChannel) availableBalance() (lnwire.MilliSatoshi, int64) { htlcView := lc.fetchHTLCView(remoteACKedIndex, lc.localUpdateLog.logIndex) - // Then compute our current balance for that view. - ourBalance, _, commitWeight, filteredView := - lc.computeView(htlcView, false, false) + // Calculate our available balance from our local commitment. + // TODO(halseth): could reuse parts validateCommitmentSanity to do this + // balance calculation, as most of the logic is the same. + // + // NOTE: This is not always accurate, since the remote node can always + // add updates concurrently, causing our balance to go down if we're + // the initiator, but this is a problem on the protocol level. + ourLocalCommitBalance, commitWeight := lc.availableCommitmentBalance( + htlcView, false, + ) + + // Do the same calculation from the remote commitment point of view. + ourRemoteCommitBalance, _ := lc.availableCommitmentBalance( + htlcView, true, + ) + + // Return which ever balance is lowest. + if ourRemoteCommitBalance < ourLocalCommitBalance { + return ourRemoteCommitBalance, commitWeight + } + + return ourLocalCommitBalance, commitWeight +} + +// availableCommitmentBalance attempts to calculate the balance we have +// available for HTLCs on the local/remote commitment given the htlcView. To +// account for sending HTLCs of different sizes, it will report the balance +// available for sending non-dust HTLCs, which will be manifested on the +// commitment, increasing the commitment fee we must pay as an initiator, +// eating into our balance. It will make sure we won't violate the channel +// reserve constraints for this amount. +func (lc *LightningChannel) availableCommitmentBalance(view *htlcView, + remoteChain bool) (lnwire.MilliSatoshi, int64) { + + // Compute the current balances for this commitment. This will take + // into account HTLCs to determine the commit weight, which the + // initiator must pay the fee for. + ourBalance, theirBalance, commitWeight, filteredView, err := lc.computeView( + view, remoteChain, false, + ) + if err != nil { + lc.log.Errorf("Unable to fetch available balance: %v", err) + return 0, 0 + } + + // We can never spend from the channel reserve, so we'll subtract it + // from our available balance. + ourReserve := lnwire.NewMSatFromSatoshis( + lc.channelState.LocalChanCfg.ChanReserve, + ) + if ourReserve <= ourBalance { + ourBalance -= ourReserve + } else { + ourBalance = 0 + } + + // Calculate the commitment fee in the case where we would add another + // HTLC to the commitment, as only the balance remaining after this fee + // has been paid is actually available for sending. + feePerKw := filteredView.feePerKw + htlcCommitFee := lnwire.NewMSatFromSatoshis( + feePerKw.FeeForWeight(commitWeight + input.HTLCWeight), + ) - // If we are the channel initiator, we must remember to subtract the - // commitment fee from our available balance. - commitFee := filteredView.feePerKw.FeeForWeight(commitWeight) + // If we are the channel initiator, we must to subtract this commitment + // fee from our available balance in order to ensure we can afford both + // the value of the HTLC and the additional commitment fee from adding + // the HTLC. if lc.channelState.IsInitiator { - ourBalance -= lnwire.NewMSatFromSatoshis(commitFee) + // There is an edge case where our non-zero balance is lower + // than the htlcCommitFee, where we could still be sending dust + // HTLCs, but we return 0 in this case. This is to avoid + // lowering our balance even further, as this takes us into a + // bad state wehere neither we nor our channel counterparty can + // add HTLCs. + if ourBalance < htlcCommitFee { + return 0, commitWeight + } + + return ourBalance - htlcCommitFee, commitWeight + } + + // If we're not the initiator, we must check whether the remote has + // enough balance to pay for the fee of our HTLC. We'll start by also + // subtracting our counterparty's reserve from their balance. + theirReserve := lnwire.NewMSatFromSatoshis( + lc.channelState.RemoteChanCfg.ChanReserve, + ) + if theirReserve <= theirBalance { + theirBalance -= theirReserve + } else { + theirBalance = 0 + } + + // We'll use the dustlimit and htlcFee to find the largest HTLC value + // that will be considered dust on the commitment. + dustlimit := lnwire.NewMSatFromSatoshis( + lc.channelState.LocalChanCfg.DustLimit, + ) + + // For an extra HTLC fee to be paid on our commitment, the HTLC must be + // large enough to make a non-dust HTLC timeout transaction. + htlcFee := lnwire.NewMSatFromSatoshis( + HtlcTimeoutFee(lc.channelState.ChanType, feePerKw), + ) + + // If we are looking at the remote commitment, we must use the remote + // dust limit and the fee for adding an HTLC success transaction. + if remoteChain { + dustlimit = lnwire.NewMSatFromSatoshis( + lc.channelState.RemoteChanCfg.DustLimit, + ) + htlcFee = lnwire.NewMSatFromSatoshis( + HtlcSuccessFee(lc.channelState.ChanType, feePerKw), + ) + } + + // The HTLC output will be manifested on the commitment if it + // is non-dust after paying the HTLC fee. + nonDustHtlcAmt := dustlimit + htlcFee + + // If they cannot pay the fee if we add another non-dust HTLC, we'll + // report our available balance just below the non-dust amount, to + // avoid attempting HTLCs larger than this size. + if theirBalance < htlcCommitFee && ourBalance >= nonDustHtlcAmt { + ourBalance = nonDustHtlcAmt - 1 } return ourBalance, commitWeight @@ -5987,7 +6374,7 @@ func (lc *LightningChannel) StateSnapshot() *channeldb.ChannelSnapshot { // validateFeeRate ensures that if the passed fee is applied to the channel, // and a new commitment is created (which evaluates this fee), then the // initiator of the channel does not dip below their reserve. -func (lc *LightningChannel) validateFeeRate(feePerKw SatPerKWeight) error { +func (lc *LightningChannel) validateFeeRate(feePerKw chainfee.SatPerKWeight) error { // We'll ensure that we can accommodate this new fee change, yet still // be above our reserve balance. Otherwise, we'll reject the fee // update. @@ -6035,7 +6422,7 @@ func (lc *LightningChannel) validateFeeRate(feePerKw SatPerKWeight) error { // UpdateFee initiates a fee update for this channel. Must only be called by // the channel initiator, and must be called before sending update_fee to // the remote. -func (lc *LightningChannel) UpdateFee(feePerKw SatPerKWeight) error { +func (lc *LightningChannel) UpdateFee(feePerKw chainfee.SatPerKWeight) error { lc.Lock() defer lc.Unlock() @@ -6063,7 +6450,7 @@ func (lc *LightningChannel) UpdateFee(feePerKw SatPerKWeight) error { // ReceiveUpdateFee handles an updated fee sent from remote. This method will // return an error if called as channel initiator. -func (lc *LightningChannel) ReceiveUpdateFee(feePerKw SatPerKWeight) error { +func (lc *LightningChannel) ReceiveUpdateFee(feePerKw chainfee.SatPerKWeight) error { lc.Lock() defer lc.Unlock() @@ -6125,60 +6512,6 @@ func (lc *LightningChannel) generateRevocation(height uint64) (*lnwire.RevokeAnd return revocationMsg, nil } -// CreateCommitTx creates a commitment transaction, spending from specified -// funding output. The commitment transaction contains two outputs: one paying -// to the "owner" of the commitment transaction which can be spent after a -// relative block delay or revocation event, and the other paying the -// counterparty within the channel, which can be spent immediately. -func CreateCommitTx(fundingOutput wire.TxIn, - keyRing *CommitmentKeyRing, csvTimeout uint32, - amountToSelf, amountToThem, dustLimit btcutil.Amount) (*wire.MsgTx, error) { - - // First, we create the script for the delayed "pay-to-self" output. - // This output has 2 main redemption clauses: either we can redeem the - // output after a relative block delay, or the remote node can claim - // the funds with the revocation key if we broadcast a revoked - // commitment transaction. - ourRedeemScript, err := input.CommitScriptToSelf(csvTimeout, keyRing.DelayKey, - keyRing.RevocationKey) - if err != nil { - return nil, err - } - payToUsScriptHash, err := input.WitnessScriptHash(ourRedeemScript) - if err != nil { - return nil, err - } - - // Next, we create the script paying to them. This is just a regular - // P2WPKH output, without any added CSV delay. - theirWitnessKeyHash, err := input.CommitScriptUnencumbered(keyRing.NoDelayKey) - if err != nil { - return nil, err - } - - // Now that both output scripts have been created, we can finally create - // the transaction itself. We use a transaction version of 2 since CSV - // will fail unless the tx version is >= 2. - commitTx := wire.NewMsgTx(2) - commitTx.AddTxIn(&fundingOutput) - - // Avoid creating dust outputs within the commitment transaction. - if amountToSelf >= dustLimit { - commitTx.AddTxOut(&wire.TxOut{ - PkScript: payToUsScriptHash, - Value: int64(amountToSelf), - }) - } - if amountToThem >= dustLimit { - commitTx.AddTxOut(&wire.TxOut{ - PkScript: theirWitnessKeyHash, - Value: int64(amountToThem), - }) - } - - return commitTx, nil -} - // CreateCooperativeCloseTx creates a transaction which if signed by both // parties, then broadcast cooperatively closes an active channel. The creation // of the closure transaction is modified by a boolean indicating if the party @@ -6187,8 +6520,7 @@ func CreateCommitTx(fundingOutput wire.TxIn, // transaction in full. func CreateCooperativeCloseTx(fundingTxIn wire.TxIn, localDust, remoteDust, ourBalance, theirBalance btcutil.Amount, - ourDeliveryScript, theirDeliveryScript []byte, - initiator bool) *wire.MsgTx { + ourDeliveryScript, theirDeliveryScript []byte) *wire.MsgTx { // Construct the transaction to perform a cooperative closure of the // channel. In the event that one side doesn't have any settled funds @@ -6219,8 +6551,8 @@ func CreateCooperativeCloseTx(fundingTxIn wire.TxIn, // CalcFee returns the commitment fee to use for the given // fee rate (fee-per-kw). -func (lc *LightningChannel) CalcFee(feeRate SatPerKWeight) btcutil.Amount { - return feeRate.FeeForWeight(input.CommitWeight) +func (lc *LightningChannel) CalcFee(feeRate chainfee.SatPerKWeight) btcutil.Amount { + return feeRate.FeeForWeight(CommitWeight(lc.channelState.ChanType)) } // MaxFeeRate returns the maximum fee rate given an allocation of the channel @@ -6229,21 +6561,24 @@ func (lc *LightningChannel) CalcFee(feeRate SatPerKWeight) btcutil.Amount { // // NOTE: This should only be used for channels in which the local commitment is // the initiator. -func (lc *LightningChannel) MaxFeeRate(maxAllocation float64) SatPerKWeight { +func (lc *LightningChannel) MaxFeeRate(maxAllocation float64) chainfee.SatPerKWeight { lc.RLock() defer lc.RUnlock() // The maximum fee depends of the available balance that can be // committed towards fees. - balance, weight := lc.availableBalance() + commit := lc.channelState.LocalCommitment feeBalance := float64( - balance.ToSatoshis() + lc.channelState.LocalCommitment.CommitFee, + commit.LocalBalance.ToSatoshis() + commit.CommitFee, ) maxFee := feeBalance * maxAllocation // Ensure the fee rate doesn't dip below the fee floor. + _, weight := lc.availableBalance() maxFeeRate := maxFee / (float64(weight) / 1000) - return SatPerKWeight(math.Max(maxFeeRate, float64(FeePerKwFloor))) + return chainfee.SatPerKWeight( + math.Max(maxFeeRate, float64(chainfee.FeePerKwFloor)), + ) } // RemoteNextRevocation returns the channelState's RemoteNextRevocation. @@ -6266,11 +6601,11 @@ func (lc *LightningChannel) IsInitiator() bool { // CommitFeeRate returns the current fee rate of the commitment transaction in // units of sat-per-kw. -func (lc *LightningChannel) CommitFeeRate() SatPerKWeight { +func (lc *LightningChannel) CommitFeeRate() chainfee.SatPerKWeight { lc.RLock() defer lc.RUnlock() - return SatPerKWeight(lc.channelState.LocalCommitment.FeePerKw) + return chainfee.SatPerKWeight(lc.channelState.LocalCommitment.FeePerKw) } // IsPending returns true if the channel's funding transaction has been fully @@ -6299,12 +6634,28 @@ func (lc *LightningChannel) MarkBorked() error { // MarkCommitmentBroadcasted marks the channel as a commitment transaction has // been broadcast, either our own or the remote, and we should watch the chain -// for it to confirm before taking any further action. -func (lc *LightningChannel) MarkCommitmentBroadcasted(tx *wire.MsgTx) error { +// for it to confirm before taking any further action. It takes a boolean which +// indicates whether we initiated the close. +func (lc *LightningChannel) MarkCommitmentBroadcasted(tx *wire.MsgTx, + locallyInitiated bool) error { + + lc.Lock() + defer lc.Unlock() + + return lc.channelState.MarkCommitmentBroadcasted(tx, locallyInitiated) +} + +// MarkCoopBroadcasted marks the channel as a cooperative close transaction has +// been broadcast, and that we should watch the chain for it to confirm before +// taking any further action. It takes a locally initiated bool which is true +// if we initiated the cooperative close. +func (lc *LightningChannel) MarkCoopBroadcasted(tx *wire.MsgTx, + localInitiated bool) error { + lc.Lock() defer lc.Unlock() - return lc.channelState.MarkCommitmentBroadcasted(tx) + return lc.channelState.MarkCoopBroadcasted(tx, localInitiated) } // MarkDataLoss marks sets the channel status to LocalDataLoss and stores the @@ -6323,32 +6674,12 @@ func (lc *LightningChannel) ActiveHtlcs() []channeldb.HTLC { lc.RLock() defer lc.RUnlock() - // We'll only return HTLC's that are locked into *both* commitment - // transactions. So we'll iterate through their set of HTLC's to note - // which ones are present on their commitment. - remoteHtlcs := make(map[[32]byte]struct{}) - for _, htlc := range lc.channelState.RemoteCommitment.Htlcs { - onionHash := sha256.Sum256(htlc.OnionBlob[:]) - remoteHtlcs[onionHash] = struct{}{} - } - - // Now that we know which HTLC's they have, we'll only mark the HTLC's - // as active if *we* know them as well. - activeHtlcs := make([]channeldb.HTLC, 0, len(remoteHtlcs)) - for _, htlc := range lc.channelState.LocalCommitment.Htlcs { - if _, ok := remoteHtlcs[sha256.Sum256(htlc.OnionBlob[:])]; !ok { - continue - } - - activeHtlcs = append(activeHtlcs, htlc) - } - - return activeHtlcs + return lc.channelState.ActiveHtlcs() } // LocalChanReserve returns our local ChanReserve requirement for the remote party. func (lc *LightningChannel) LocalChanReserve() btcutil.Amount { - return lc.localChanCfg.ChanReserve + return lc.channelState.LocalChanCfg.ChanReserve } // NextLocalHtlcIndex returns the next unallocated local htlc index. To ensure @@ -6362,16 +6693,8 @@ func (lc *LightningChannel) NextLocalHtlcIndex() (uint64, error) { return lc.channelState.NextLocalHtlcIndex() } -// RemoteCommitHeight returns the commitment height of the remote chain. -func (lc *LightningChannel) RemoteCommitHeight() uint64 { - lc.RLock() - defer lc.RUnlock() - - return lc.channelState.RemoteCommitment.CommitHeight -} - // FwdMinHtlc returns the minimum HTLC value required by the remote node, i.e. // the minimum value HTLC we can forward on this channel. func (lc *LightningChannel) FwdMinHtlc() lnwire.MilliSatoshi { - return lc.localChanCfg.MinHTLC + return lc.channelState.LocalChanCfg.MinHTLC } diff --git a/lnwallet/channel_test.go b/lnwallet/channel_test.go index ef0b5234de..9f10be1603 100644 --- a/lnwallet/channel_test.go +++ b/lnwallet/channel_test.go @@ -19,6 +19,8 @@ import ( "github.com/lightningnetwork/lnd/chainntnfs" "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/input" + "github.com/lightningnetwork/lnd/lntypes" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" "github.com/lightningnetwork/lnd/lnwire" ) @@ -58,7 +60,12 @@ func testAddSettleWorkflow(t *testing.T, tweakless bool) { // Create a test channel which will be used for the duration of this // unittest. The channel will be funded evenly with Alice having 5 BTC, // and Bob having 5 BTC. - aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(tweakless) + chanType := channeldb.SingleFunderTweaklessBit + if !tweakless { + chanType = channeldb.SingleFunderBit + } + + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(chanType) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -388,7 +395,9 @@ func TestCheckCommitTxSize(t *testing.T) { // Create a test channel which will be used for the duration of this // unittest. The channel will be funded evenly with Alice having 5 BTC, // and Bob having 5 BTC. - aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(true) + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -441,13 +450,148 @@ func TestCheckCommitTxSize(t *testing.T) { } } +// TestCommitHTLCSigTieBreak asserts that HTLC signatures are sent the proper +// BIP69+CLTV sorting expected by BOLT 3 when multiple HTLCs have identical +// payment hashes and amounts, but differing CLTVs. This is exercised by adding +// the HTLCs in the descending order of their CLTVs, and asserting that their +// order is reversed when signing. +func TestCommitHTLCSigTieBreak(t *testing.T) { + t.Run("no restart", func(t *testing.T) { + testCommitHTLCSigTieBreak(t, false) + }) + t.Run("restart", func(t *testing.T) { + testCommitHTLCSigTieBreak(t, true) + }) +} + +func testCommitHTLCSigTieBreak(t *testing.T, restart bool) { + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) + if err != nil { + t.Fatalf("unable to create test channels; %v", err) + } + defer cleanUp() + + const ( + htlcAmt = lnwire.MilliSatoshi(20000000) + numHtlcs = 2 + ) + + // Add HTLCs with identical payment hashes and amounts, but descending + // CLTV values. We will expect the signatures to appear in the reverse + // order that the HTLCs are added due to the commitment sorting. + for i := 0; i < numHtlcs; i++ { + var ( + preimage lntypes.Preimage + hash = preimage.Hash() + ) + + htlc := &lnwire.UpdateAddHTLC{ + ID: uint64(i), + PaymentHash: hash, + Amount: htlcAmt, + Expiry: uint32(numHtlcs - i), + } + + if _, err := aliceChannel.AddHTLC(htlc, nil); err != nil { + t.Fatalf("alice unable to add htlc: %v", err) + } + if _, err := bobChannel.ReceiveHTLC(htlc); err != nil { + t.Fatalf("bob unable to receive htlc: %v", err) + } + } + + // Have Alice initiate the first half of the commitment dance. The + // tie-breaking for commitment sorting won't affect the commitment + // signed by Alice because received HTLC scripts commit to the CLTV + // directly, so the outputs will have different scriptPubkeys. + aliceSig, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment() + if err != nil { + t.Fatalf("unable to sign alice's commitment: %v", err) + } + + err = bobChannel.ReceiveNewCommitment(aliceSig, aliceHtlcSigs) + if err != nil { + t.Fatalf("unable to receive alice's commitment: %v", err) + } + + bobRevocation, _, err := bobChannel.RevokeCurrentCommitment() + if err != nil { + t.Fatalf("unable to revoke bob's commitment: %v", err) + } + _, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation) + if err != nil { + t.Fatalf("unable to receive bob's revocation: %v", err) + } + + // Now have Bob initiate the second half of the commitment dance. Here + // the offered HTLC scripts he adds for Alice will need to have the + // tie-breaking applied because the CLTV is not committed, but instead + // implicit via the construction of the second-level transactions. + bobSig, bobHtlcSigs, bobHtlcs, err := bobChannel.SignNextCommitment() + if err != nil { + t.Fatalf("unable to sign bob's commitment: %v", err) + } + + if len(bobHtlcs) != numHtlcs { + t.Fatalf("expected %d htlcs, got: %v", numHtlcs, len(bobHtlcs)) + } + + // Ensure that our HTLCs appear in the reverse order from which they + // were added by inspecting each's outpoint index. We expect the output + // indexes to be in descending order, i.e. the first HTLC added had the + // highest CLTV and should end up last. + lastIndex := bobHtlcs[0].OutputIndex + for i, htlc := range bobHtlcs[1:] { + if htlc.OutputIndex >= lastIndex { + t.Fatalf("htlc %d output index %d is not descending", + i, htlc.OutputIndex) + } + + lastIndex = htlc.OutputIndex + } + + // If requsted, restart Alice so that we can test that the necessary + // indexes can be reconstructed before needing to validate the + // signatures from Bob. + if restart { + aliceState := aliceChannel.channelState + aliceChannels, err := aliceState.Db.FetchOpenChannels( + aliceState.IdentityPub, + ) + if err != nil { + t.Fatalf("unable to fetch channel: %v", err) + } + + aliceChannelNew, err := NewLightningChannel( + aliceChannel.Signer, aliceChannels[0], + aliceChannel.sigPool, + ) + if err != nil { + t.Fatalf("unable to create new channel: %v", err) + } + + aliceChannel = aliceChannelNew + } + + // Finally, have Alice validate the signatures to ensure that she is + // expecting the signatures in the proper order. + err = aliceChannel.ReceiveNewCommitment(bobSig, bobHtlcSigs) + if err != nil { + t.Fatalf("unable to receive bob's commitment: %v", err) + } +} + func TestCooperativeChannelClosure(t *testing.T) { t.Parallel() // Create a test channel which will be used for the duration of this // unittest. The channel will be funded evenly with Alice having 5 BTC, // and Bob having 5 BTC. - aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(true) + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -456,8 +600,12 @@ func TestCooperativeChannelClosure(t *testing.T) { aliceDeliveryScript := bobsPrivKey[:] bobDeliveryScript := testHdSeed[:] - aliceFeeRate := SatPerKWeight(aliceChannel.channelState.LocalCommitment.FeePerKw) - bobFeeRate := SatPerKWeight(bobChannel.channelState.LocalCommitment.FeePerKw) + aliceFeeRate := chainfee.SatPerKWeight( + aliceChannel.channelState.LocalCommitment.FeePerKw, + ) + bobFeeRate := chainfee.SatPerKWeight( + bobChannel.channelState.LocalCommitment.FeePerKw, + ) // We'll store with both Alice and Bob creating a new close proposal // with the same fee. @@ -468,7 +616,6 @@ func TestCooperativeChannelClosure(t *testing.T) { if err != nil { t.Fatalf("unable to create alice coop close proposal: %v", err) } - aliceCloseSig := append(aliceSig, byte(txscript.SigHashAll)) bobFee := bobChannel.CalcFee(bobFeeRate) bobSig, _, _, err := bobChannel.CreateCloseProposal( @@ -477,14 +624,13 @@ func TestCooperativeChannelClosure(t *testing.T) { if err != nil { t.Fatalf("unable to create bob coop close proposal: %v", err) } - bobCloseSig := append(bobSig, byte(txscript.SigHashAll)) // With the proposals created, both sides should be able to properly // process the other party's signature. This indicates that the // transaction is well formed, and the signatures verify. aliceCloseTx, _, err := bobChannel.CompleteCooperativeClose( - bobCloseSig, aliceCloseSig, bobDeliveryScript, - aliceDeliveryScript, bobFee, + bobSig, aliceSig, bobDeliveryScript, aliceDeliveryScript, + bobFee, ) if err != nil { t.Fatalf("unable to complete alice cooperative close: %v", err) @@ -492,8 +638,8 @@ func TestCooperativeChannelClosure(t *testing.T) { bobCloseSha := aliceCloseTx.TxHash() bobCloseTx, _, err := aliceChannel.CompleteCooperativeClose( - aliceCloseSig, bobCloseSig, aliceDeliveryScript, - bobDeliveryScript, aliceFee, + aliceSig, bobSig, aliceDeliveryScript, bobDeliveryScript, + aliceFee, ) if err != nil { t.Fatalf("unable to complete bob cooperative close: %v", err) @@ -511,12 +657,37 @@ func TestCooperativeChannelClosure(t *testing.T) { // force close generates HTLC resolutions that are capable of sweeping both // incoming and outgoing HTLC's. func TestForceClose(t *testing.T) { + t.Run("tweakless", func(t *testing.T) { + testForceClose(t, &forceCloseTestCase{ + chanType: channeldb.SingleFunderTweaklessBit, + expectedCommitWeight: input.CommitWeight, + }) + }) + t.Run("anchors", func(t *testing.T) { + testForceClose(t, &forceCloseTestCase{ + chanType: channeldb.SingleFunderTweaklessBit | + channeldb.AnchorOutputsBit, + expectedCommitWeight: input.AnchorCommitWeight, + anchorAmt: anchorSize * 2, + }) + }) +} + +type forceCloseTestCase struct { + chanType channeldb.ChannelType + expectedCommitWeight int64 + anchorAmt btcutil.Amount +} + +func testForceClose(t *testing.T, testCase *forceCloseTestCase) { t.Parallel() // Create a test channel which will be used for the duration of this // unittest. The channel will be funded evenly with Alice having 5 BTC, // and Bob having 5 BTC. - aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(true) + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + testCase.chanType, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -577,6 +748,36 @@ func TestForceClose(t *testing.T) { 1, len(closeSummary.HtlcResolutions.IncomingHTLCs)) } + // Verify the anchor resolutions for the anchor commitment format. + if testCase.chanType.HasAnchors() { + // Check the close summary resolution. + anchorRes := closeSummary.AnchorResolution + if anchorRes == nil { + t.Fatal("expected anchor resolution") + } + if anchorRes.CommitAnchor.Hash != closeSummary.CloseTx.TxHash() { + t.Fatal("commit tx not referenced by anchor res") + } + if anchorRes.AnchorSignDescriptor.Output.Value != + int64(anchorSize) { + + t.Fatal("unexpected anchor size") + } + if anchorRes.AnchorSignDescriptor.WitnessScript == nil { + t.Fatal("expected anchor witness script") + } + + // Check the pre-confirmation resolutions. + resList, err := aliceChannel.NewAnchorResolutions() + if err != nil { + t.Fatalf("pre-confirmation resolution error: %v", err) + } + + if len(resList) != 2 { + t.Fatal("expected two resolutions") + } + } + // The SelfOutputSignDesc should be non-nil since the output to-self is // non-dust. aliceCommitResolution := closeSummary.CommitResolution @@ -595,10 +796,16 @@ func TestForceClose(t *testing.T) { // Factoring in the fee rate, Alice's amount should properly reflect // that we've added two additional HTLC to the commitment transaction. - totalCommitWeight := input.CommitWeight + (input.HtlcWeight * 2) - feePerKw := SatPerKWeight(aliceChannel.channelState.LocalCommitment.FeePerKw) + totalCommitWeight := testCase.expectedCommitWeight + + (input.HTLCWeight * 2) + feePerKw := chainfee.SatPerKWeight( + aliceChannel.channelState.LocalCommitment.FeePerKw, + ) commitFee := feePerKw.FeeForWeight(totalCommitWeight) - expectedAmount := (aliceChannel.Capacity / 2) - htlcAmount.ToSatoshis() - commitFee + + expectedAmount := (aliceChannel.Capacity / 2) - + htlcAmount.ToSatoshis() - commitFee - testCase.anchorAmt + if aliceCommitResolution.SelfOutputSignDesc.Output.Value != int64(expectedAmount) { t.Fatalf("alice incorrect output value in SelfOutputSignDesc, "+ "expected %v, got %v", int64(expectedAmount), @@ -608,7 +815,7 @@ func TestForceClose(t *testing.T) { // Alice's listed CSV delay should also match the delay that was // pre-committed to at channel opening. if aliceCommitResolution.MaturityDelay != - uint32(aliceChannel.localChanCfg.CsvDelay) { + uint32(aliceChannel.channelState.LocalChanCfg.CsvDelay) { t.Fatalf("alice: incorrect local CSV delay in ForceCloseSummary, "+ "expected %v, got %v", @@ -800,7 +1007,9 @@ func TestForceCloseDustOutput(t *testing.T) { // Create a test channel which will be used for the duration of this // unittest. The channel will be funded evenly with Alice having 5 BTC, // and Bob having 5 BTC. - aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(true) + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -809,10 +1018,10 @@ func TestForceCloseDustOutput(t *testing.T) { // We set both node's channel reserves to 0, to make sure // they can create small dust ouputs without going under // their channel reserves. - aliceChannel.localChanCfg.ChanReserve = 0 - bobChannel.localChanCfg.ChanReserve = 0 - aliceChannel.remoteChanCfg.ChanReserve = 0 - bobChannel.remoteChanCfg.ChanReserve = 0 + aliceChannel.channelState.LocalChanCfg.ChanReserve = 0 + bobChannel.channelState.LocalChanCfg.ChanReserve = 0 + aliceChannel.channelState.RemoteChanCfg.ChanReserve = 0 + bobChannel.channelState.RemoteChanCfg.ChanReserve = 0 htlcAmount := lnwire.NewMSatFromSatoshis(500) @@ -918,7 +1127,9 @@ func TestDustHTLCFees(t *testing.T) { // Create a test channel which will be used for the duration of this // unittest. The channel will be funded evenly with Alice having 5 BTC, // and Bob having 5 BTC. - aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(true) + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -962,7 +1173,7 @@ func TestDustHTLCFees(t *testing.T) { // The commitment fee paid should be the same, as there have been no // new material outputs added. - defaultFee := calcStaticFee(0) + defaultFee := calcStaticFee(channeldb.SingleFunderTweaklessBit, 0) if aliceChannel.channelState.LocalCommitment.CommitFee != defaultFee { t.Fatalf("dust htlc amounts not subtracted from commitment fee "+ "expected %v, got %v", defaultFee, @@ -995,7 +1206,9 @@ func TestHTLCDustLimit(t *testing.T) { // Create a test channel which will be used for the duration of this // unittest. The channel will be funded evenly with Alice having 5 BTC, // and Bob having 5 BTC. - aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(true) + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -1003,8 +1216,12 @@ func TestHTLCDustLimit(t *testing.T) { // The amount of the HTLC should be above Alice's dust limit and below // Bob's dust limit. - htlcSat := (btcutil.Amount(500) + htlcTimeoutFee( - SatPerKWeight(aliceChannel.channelState.LocalCommitment.FeePerKw))) + htlcSat := (btcutil.Amount(500) + HtlcTimeoutFee( + aliceChannel.channelState.ChanType, + chainfee.SatPerKWeight( + aliceChannel.channelState.LocalCommitment.FeePerKw, + ), + )) htlcAmount := lnwire.NewMSatFromSatoshis(htlcSat) htlc, preimage := createHTLC(0, htlcAmount) @@ -1034,7 +1251,7 @@ func TestHTLCDustLimit(t *testing.T) { t.Fatalf("incorrect # of outputs: expected %v, got %v", 2, len(bobCommitment.txn.TxOut)) } - defaultFee := calcStaticFee(0) + defaultFee := calcStaticFee(channeldb.SingleFunderTweaklessBit, 0) if bobChannel.channelState.LocalCommitment.CommitFee != defaultFee { t.Fatalf("dust htlc amount was subtracted from commitment fee "+ "expected %v, got %v", defaultFee, @@ -1080,7 +1297,9 @@ func TestHTLCSigNumber(t *testing.T) { // Create a test channel funded evenly with Alice having 5 BTC, // and Bob having 5 BTC. Alice's dustlimit is 200 sat, while // Bob has 1300 sat. - aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(true) + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -1102,14 +1321,18 @@ func TestHTLCSigNumber(t *testing.T) { } // Calculate two values that will be below and above Bob's dust limit. - estimator := NewStaticFeeEstimator(6000, 0) + estimator := chainfee.NewStaticEstimator(6000, 0) feePerKw, err := estimator.EstimateFeePerKW(1) if err != nil { t.Fatalf("unable to get fee: %v", err) } - belowDust := btcutil.Amount(500) + htlcTimeoutFee(feePerKw) - aboveDust := btcutil.Amount(1400) + htlcSuccessFee(feePerKw) + belowDust := btcutil.Amount(500) + HtlcTimeoutFee( + channeldb.SingleFunderTweaklessBit, feePerKw, + ) + aboveDust := btcutil.Amount(1400) + HtlcSuccessFee( + channeldb.SingleFunderTweaklessBit, feePerKw, + ) // =================================================================== // Test that Bob will reject a commitment if Alice doesn't send enough @@ -1250,7 +1473,9 @@ func TestChannelBalanceDustLimit(t *testing.T) { // Create a test channel which will be used for the duration of this // unittest. The channel will be funded evenly with Alice having 5 BTC, // and Bob having 5 BTC. - aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(true) + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -1258,17 +1483,20 @@ func TestChannelBalanceDustLimit(t *testing.T) { // To allow Alice's balance to get beneath her dust limit, set the // channel reserve to be 0. - aliceChannel.localChanCfg.ChanReserve = 0 - bobChannel.remoteChanCfg.ChanReserve = 0 + aliceChannel.channelState.LocalChanCfg.ChanReserve = 0 + bobChannel.channelState.RemoteChanCfg.ChanReserve = 0 // This amount should leave an amount larger than Alice's dust limit // once fees have been subtracted, but smaller than Bob's dust limit. // We account in fees for the HTLC we will be adding. - defaultFee := calcStaticFee(1) + defaultFee := calcStaticFee(channeldb.SingleFunderTweaklessBit, 1) aliceBalance := aliceChannel.channelState.LocalCommitment.LocalBalance.ToSatoshis() htlcSat := aliceBalance - defaultFee - htlcSat += htlcSuccessFee( - SatPerKWeight(aliceChannel.channelState.LocalCommitment.FeePerKw), + htlcSat += HtlcSuccessFee( + aliceChannel.channelState.ChanType, + chainfee.SatPerKWeight( + aliceChannel.channelState.LocalCommitment.FeePerKw, + ), ) htlcAmount := lnwire.NewMSatFromSatoshis(htlcSat) @@ -1318,7 +1546,9 @@ func TestStateUpdatePersistence(t *testing.T) { // Create a test channel which will be used for the duration of this // unittest. The channel will be funded evenly with Alice having 5 BTC, // and Bob having 5 BTC. - aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(true) + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -1366,7 +1596,7 @@ func TestStateUpdatePersistence(t *testing.T) { } // Also add a fee update to the update logs. - fee := SatPerKWeight(333) + fee := chainfee.SatPerKWeight(333) if err := aliceChannel.UpdateFee(fee); err != nil { t.Fatalf("unable to send fee update") } @@ -1659,7 +1889,9 @@ func TestCancelHTLC(t *testing.T) { // Create a test channel which will be used for the duration of this // unittest. The channel will be funded evenly with Alice having 5 BTC, // and Bob having 5 BTC. - aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(true) + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -1692,7 +1924,7 @@ func TestCancelHTLC(t *testing.T) { // With the HTLC committed, Alice's balance should reflect the clearing // of the new HTLC. aliceExpectedBalance := btcutil.Amount(btcutil.SatoshiPerBitcoin*4) - - calcStaticFee(1) + calcStaticFee(channeldb.SingleFunderTweaklessBit, 1) if aliceChannel.channelState.LocalCommitment.LocalBalance.ToSatoshis() != aliceExpectedBalance { t.Fatalf("Alice's balance is wrong: expected %v, got %v", @@ -1737,12 +1969,13 @@ func TestCancelHTLC(t *testing.T) { } expectedBalance := btcutil.Amount(btcutil.SatoshiPerBitcoin * 5) + staticFee := calcStaticFee(channeldb.SingleFunderTweaklessBit, 0) if aliceChannel.channelState.LocalCommitment.LocalBalance.ToSatoshis() != - expectedBalance-calcStaticFee(0) { + expectedBalance-staticFee { t.Fatalf("balance is wrong: expected %v, got %v", aliceChannel.channelState.LocalCommitment.LocalBalance.ToSatoshis(), - expectedBalance-calcStaticFee(0)) + expectedBalance-staticFee) } if aliceChannel.channelState.LocalCommitment.RemoteBalance.ToSatoshis() != expectedBalance { @@ -1759,11 +1992,11 @@ func TestCancelHTLC(t *testing.T) { expectedBalance) } if bobChannel.channelState.LocalCommitment.RemoteBalance.ToSatoshis() != - expectedBalance-calcStaticFee(0) { + expectedBalance-staticFee { t.Fatalf("balance is wrong: expected %v, got %v", bobChannel.channelState.LocalCommitment.RemoteBalance.ToSatoshis(), - expectedBalance-calcStaticFee(0)) + expectedBalance-staticFee) } } @@ -1773,14 +2006,20 @@ func TestCooperativeCloseDustAdherence(t *testing.T) { // Create a test channel which will be used for the duration of this // unittest. The channel will be funded evenly with Alice having 5 BTC, // and Bob having 5 BTC. - aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(true) + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } defer cleanUp() - aliceFeeRate := SatPerKWeight(aliceChannel.channelState.LocalCommitment.FeePerKw) - bobFeeRate := SatPerKWeight(bobChannel.channelState.LocalCommitment.FeePerKw) + aliceFeeRate := chainfee.SatPerKWeight( + aliceChannel.channelState.LocalCommitment.FeePerKw, + ) + bobFeeRate := chainfee.SatPerKWeight( + bobChannel.channelState.LocalCommitment.FeePerKw, + ) setDustLimit := func(dustVal btcutil.Amount) { aliceChannel.channelState.LocalChanCfg.DustLimit = dustVal @@ -1813,24 +2052,25 @@ func TestCooperativeCloseDustAdherence(t *testing.T) { // balances. As a result, performing a cooperative closure now result // in both sides having an output within the closure transaction. aliceFee := btcutil.Amount(aliceChannel.CalcFee(aliceFeeRate)) + 1000 - aliceSig, _, _, err := aliceChannel.CreateCloseProposal(aliceFee, - aliceDeliveryScript, bobDeliveryScript) + aliceSig, _, _, err := aliceChannel.CreateCloseProposal( + aliceFee, aliceDeliveryScript, bobDeliveryScript, + ) if err != nil { t.Fatalf("unable to close channel: %v", err) } - aliceCloseSig := append(aliceSig, byte(txscript.SigHashAll)) bobFee := btcutil.Amount(bobChannel.CalcFee(bobFeeRate)) + 1000 - bobSig, _, _, err := bobChannel.CreateCloseProposal(bobFee, - bobDeliveryScript, aliceDeliveryScript) + bobSig, _, _, err := bobChannel.CreateCloseProposal( + bobFee, bobDeliveryScript, aliceDeliveryScript, + ) if err != nil { t.Fatalf("unable to close channel: %v", err) } - bobCloseSig := append(bobSig, byte(txscript.SigHashAll)) closeTx, _, err := bobChannel.CompleteCooperativeClose( - bobCloseSig, aliceCloseSig, - bobDeliveryScript, aliceDeliveryScript, bobFee) + bobSig, aliceSig, bobDeliveryScript, aliceDeliveryScript, + bobFee, + ) if err != nil { t.Fatalf("unable to accept channel close: %v", err) } @@ -1852,23 +2092,24 @@ func TestCooperativeCloseDustAdherence(t *testing.T) { // Attempt another cooperative channel closure. It should succeed // without any issues. - aliceSig, _, _, err = aliceChannel.CreateCloseProposal(aliceFee, - aliceDeliveryScript, bobDeliveryScript) + aliceSig, _, _, err = aliceChannel.CreateCloseProposal( + aliceFee, aliceDeliveryScript, bobDeliveryScript, + ) if err != nil { t.Fatalf("unable to close channel: %v", err) } - aliceCloseSig = append(aliceSig, byte(txscript.SigHashAll)) - bobSig, _, _, err = bobChannel.CreateCloseProposal(bobFee, - bobDeliveryScript, aliceDeliveryScript) + bobSig, _, _, err = bobChannel.CreateCloseProposal( + bobFee, bobDeliveryScript, aliceDeliveryScript, + ) if err != nil { t.Fatalf("unable to close channel: %v", err) } - bobCloseSig = append(bobSig, byte(txscript.SigHashAll)) closeTx, _, err = bobChannel.CompleteCooperativeClose( - bobCloseSig, aliceCloseSig, - bobDeliveryScript, aliceDeliveryScript, bobFee) + bobSig, aliceSig, bobDeliveryScript, aliceDeliveryScript, + bobFee, + ) if err != nil { t.Fatalf("unable to accept channel close: %v", err) } @@ -1900,7 +2141,6 @@ func TestCooperativeCloseDustAdherence(t *testing.T) { if err != nil { t.Fatalf("unable to close channel: %v", err) } - aliceCloseSig = append(aliceSig, byte(txscript.SigHashAll)) bobSig, _, _, err = bobChannel.CreateCloseProposal( bobFee, bobDeliveryScript, aliceDeliveryScript, @@ -1908,11 +2148,11 @@ func TestCooperativeCloseDustAdherence(t *testing.T) { if err != nil { t.Fatalf("unable to close channel: %v", err) } - bobCloseSig = append(bobSig, byte(txscript.SigHashAll)) closeTx, _, err = bobChannel.CompleteCooperativeClose( - bobCloseSig, aliceCloseSig, - bobDeliveryScript, aliceDeliveryScript, bobFee) + bobSig, aliceSig, bobDeliveryScript, aliceDeliveryScript, + bobFee, + ) if err != nil { t.Fatalf("unable to accept channel close: %v", err) } @@ -1934,7 +2174,9 @@ func TestCooperativeCloseDustAdherence(t *testing.T) { func TestUpdateFeeAdjustments(t *testing.T) { t.Parallel() - aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(true) + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -1946,7 +2188,7 @@ func TestUpdateFeeAdjustments(t *testing.T) { // We'll first try to increase the fee rate 5x, this should be able to // be committed without any issue. - newFee := SatPerKWeight(baseFeeRate * 5) + newFee := chainfee.SatPerKWeight(baseFeeRate * 5) if err := aliceChannel.UpdateFee(newFee); err != nil { t.Fatalf("unable to alice update fee: %v", err) @@ -1964,7 +2206,7 @@ func TestUpdateFeeAdjustments(t *testing.T) { // We'll now attempt to increase the fee rate 1,000,000x of the base // fee. This should result in an error as Alice won't be able to pay // this new fee rate. - newFee = SatPerKWeight(baseFeeRate * 1000000) + newFee = chainfee.SatPerKWeight(baseFeeRate * 1000000) if err := aliceChannel.UpdateFee(newFee); err == nil { t.Fatalf("alice should reject the fee rate") } @@ -1972,7 +2214,7 @@ func TestUpdateFeeAdjustments(t *testing.T) { // Finally, we'll attempt to adjust the fee down and use a fee which is // smaller than the initial base fee rate. The fee application and // state transition should proceed without issue. - newFee = SatPerKWeight(baseFeeRate / 10) + newFee = chainfee.SatPerKWeight(baseFeeRate / 10) if err := aliceChannel.UpdateFee(newFee); err != nil { t.Fatalf("unable to alice update fee: %v", err) } @@ -1989,7 +2231,9 @@ func TestUpdateFeeAdjustments(t *testing.T) { func TestUpdateFeeFail(t *testing.T) { t.Parallel() - aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(true) + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -2023,7 +2267,9 @@ func TestUpdateFeeFail(t *testing.T) { func TestUpdateFeeConcurrentSig(t *testing.T) { t.Parallel() - aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(true) + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -2048,7 +2294,7 @@ func TestUpdateFeeConcurrentSig(t *testing.T) { } // Simulate Alice sending update fee message to bob. - fee := SatPerKWeight(333) + fee := chainfee.SatPerKWeight(333) if err := aliceChannel.UpdateFee(fee); err != nil { t.Fatalf("unable to send fee update") } @@ -2084,7 +2330,7 @@ func TestUpdateFeeConcurrentSig(t *testing.T) { t.Fatalf("bob unable to process alice's new commitment: %v", err) } - if SatPerKWeight(bobChannel.channelState.LocalCommitment.FeePerKw) == fee { + if chainfee.SatPerKWeight(bobChannel.channelState.LocalCommitment.FeePerKw) == fee { t.Fatalf("bob's feePerKw was unexpectedly locked in") } @@ -2095,7 +2341,7 @@ func TestUpdateFeeConcurrentSig(t *testing.T) { t.Fatalf("unable to generate bob revocation: %v", err) } - if SatPerKWeight(bobChannel.channelState.LocalCommitment.FeePerKw) != fee { + if chainfee.SatPerKWeight(bobChannel.channelState.LocalCommitment.FeePerKw) != fee { t.Fatalf("bob's feePerKw was not locked in") } } @@ -2109,7 +2355,9 @@ func TestUpdateFeeSenderCommits(t *testing.T) { // Create a test channel which will be used for the duration of this // unittest. The channel will be funded evenly with Alice having 5 BTC, // and Bob having 5 BTC. - aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(true) + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -2134,7 +2382,7 @@ func TestUpdateFeeSenderCommits(t *testing.T) { } // Simulate Alice sending update fee message to bob. - fee := SatPerKWeight(333) + fee := chainfee.SatPerKWeight(333) aliceChannel.UpdateFee(fee) bobChannel.ReceiveUpdateFee(fee) @@ -2154,7 +2402,9 @@ func TestUpdateFeeSenderCommits(t *testing.T) { t.Fatalf("bob unable to process alice's new commitment: %v", err) } - if SatPerKWeight(bobChannel.channelState.LocalCommitment.FeePerKw) == fee { + if chainfee.SatPerKWeight( + bobChannel.channelState.LocalCommitment.FeePerKw, + ) == fee { t.Fatalf("bob's feePerKw was unexpectedly locked in") } @@ -2165,7 +2415,9 @@ func TestUpdateFeeSenderCommits(t *testing.T) { t.Fatalf("unable to generate bob revocation: %v", err) } - if SatPerKWeight(bobChannel.channelState.LocalCommitment.FeePerKw) != fee { + if chainfee.SatPerKWeight( + bobChannel.channelState.LocalCommitment.FeePerKw, + ) != fee { t.Fatalf("bob's feePerKw was not locked in") } @@ -2191,7 +2443,9 @@ func TestUpdateFeeSenderCommits(t *testing.T) { t.Fatalf("alice unable to process bob's new commitment: %v", err) } - if SatPerKWeight(aliceChannel.channelState.LocalCommitment.FeePerKw) == fee { + if chainfee.SatPerKWeight( + aliceChannel.channelState.LocalCommitment.FeePerKw, + ) == fee { t.Fatalf("alice's feePerKw was unexpectedly locked in") } @@ -2202,7 +2456,9 @@ func TestUpdateFeeSenderCommits(t *testing.T) { t.Fatalf("unable to revoke alice channel: %v", err) } - if SatPerKWeight(aliceChannel.channelState.LocalCommitment.FeePerKw) != fee { + if chainfee.SatPerKWeight( + aliceChannel.channelState.LocalCommitment.FeePerKw, + ) != fee { t.Fatalf("alice's feePerKw was not locked in") } @@ -2223,7 +2479,9 @@ func TestUpdateFeeReceiverCommits(t *testing.T) { // Create a test channel which will be used for the duration of this // unittest. The channel will be funded evenly with Alice having 5 BTC, // and Bob having 5 BTC. - aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(true) + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -2248,7 +2506,7 @@ func TestUpdateFeeReceiverCommits(t *testing.T) { } // Simulate Alice sending update fee message to bob - fee := SatPerKWeight(333) + fee := chainfee.SatPerKWeight(333) aliceChannel.UpdateFee(fee) bobChannel.ReceiveUpdateFee(fee) @@ -2296,7 +2554,9 @@ func TestUpdateFeeReceiverCommits(t *testing.T) { t.Fatalf("alice unable to process bob's new commitment: %v", err) } - if SatPerKWeight(bobChannel.channelState.LocalCommitment.FeePerKw) == fee { + if chainfee.SatPerKWeight( + bobChannel.channelState.LocalCommitment.FeePerKw, + ) == fee { t.Fatalf("bob's feePerKw was unexpectedly locked in") } @@ -2308,7 +2568,9 @@ func TestUpdateFeeReceiverCommits(t *testing.T) { t.Fatalf("unable to revoke alice channel: %v", err) } - if SatPerKWeight(bobChannel.channelState.LocalCommitment.FeePerKw) != fee { + if chainfee.SatPerKWeight( + bobChannel.channelState.LocalCommitment.FeePerKw, + ) != fee { t.Fatalf("bob's feePerKw was not locked in") } @@ -2333,7 +2595,9 @@ func TestUpdateFeeReceiverCommits(t *testing.T) { t.Fatalf("alice unable to process bob's new commitment: %v", err) } - if SatPerKWeight(aliceChannel.channelState.LocalCommitment.FeePerKw) == fee { + if chainfee.SatPerKWeight( + aliceChannel.channelState.LocalCommitment.FeePerKw, + ) == fee { t.Fatalf("alice's feePerKw was unexpectedly locked in") } @@ -2344,7 +2608,9 @@ func TestUpdateFeeReceiverCommits(t *testing.T) { t.Fatalf("unable to generate bob revocation: %v", err) } - if SatPerKWeight(aliceChannel.channelState.LocalCommitment.FeePerKw) != fee { + if chainfee.SatPerKWeight( + aliceChannel.channelState.LocalCommitment.FeePerKw, + ) != fee { t.Fatalf("Alice's feePerKw was not locked in") } @@ -2364,7 +2630,9 @@ func TestUpdateFeeReceiverSendsUpdate(t *testing.T) { // Create a test channel which will be used for the duration of this // unittest. The channel will be funded evenly with Alice having 5 BTC, // and Bob having 5 BTC. - aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(true) + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -2372,7 +2640,7 @@ func TestUpdateFeeReceiverSendsUpdate(t *testing.T) { // Since Alice is the channel initiator, she should fail when receiving // fee update - fee := SatPerKWeight(333) + fee := chainfee.SatPerKWeight(333) err = aliceChannel.ReceiveUpdateFee(fee) if err == nil { t.Fatalf("expected alice to fail receiving fee update") @@ -2393,16 +2661,18 @@ func TestUpdateFeeMultipleUpdates(t *testing.T) { // Create a test channel which will be used for the duration of this // unittest. The channel will be funded evenly with Alice having 5 BTC, // and Bob having 5 BTC. - aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(true) + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } defer cleanUp() // Simulate Alice sending update fee message to bob. - fee1 := SatPerKWeight(333) - fee2 := SatPerKWeight(333) - fee := SatPerKWeight(333) + fee1 := chainfee.SatPerKWeight(333) + fee2 := chainfee.SatPerKWeight(333) + fee := chainfee.SatPerKWeight(333) aliceChannel.UpdateFee(fee1) aliceChannel.UpdateFee(fee2) aliceChannel.UpdateFee(fee) @@ -2427,15 +2697,17 @@ func TestUpdateFeeMultipleUpdates(t *testing.T) { t.Fatalf("bob unable to process alice's new commitment: %v", err) } - if SatPerKWeight(bobChannel.channelState.LocalCommitment.FeePerKw) == fee { + if chainfee.SatPerKWeight( + bobChannel.channelState.LocalCommitment.FeePerKw, + ) == fee { t.Fatalf("bob's feePerKw was unexpectedly locked in") } // Alice sending more fee updates now should not mess up the old fee // they both committed to. - fee3 := SatPerKWeight(444) - fee4 := SatPerKWeight(555) - fee5 := SatPerKWeight(666) + fee3 := chainfee.SatPerKWeight(444) + fee4 := chainfee.SatPerKWeight(555) + fee5 := chainfee.SatPerKWeight(666) aliceChannel.UpdateFee(fee3) aliceChannel.UpdateFee(fee4) aliceChannel.UpdateFee(fee5) @@ -2450,7 +2722,9 @@ func TestUpdateFeeMultipleUpdates(t *testing.T) { t.Fatalf("unable to generate bob revocation: %v", err) } - if SatPerKWeight(bobChannel.channelState.LocalCommitment.FeePerKw) != fee { + if chainfee.SatPerKWeight( + bobChannel.channelState.LocalCommitment.FeePerKw, + ) != fee { t.Fatalf("bob's feePerKw was not locked in") } @@ -2475,7 +2749,9 @@ func TestUpdateFeeMultipleUpdates(t *testing.T) { t.Fatalf("alice unable to process bob's new commitment: %v", err) } - if SatPerKWeight(aliceChannel.channelState.LocalCommitment.FeePerKw) == fee { + if chainfee.SatPerKWeight( + aliceChannel.channelState.LocalCommitment.FeePerKw, + ) == fee { t.Fatalf("alice's feePerKw was unexpectedly locked in") } @@ -2486,7 +2762,9 @@ func TestUpdateFeeMultipleUpdates(t *testing.T) { t.Fatalf("unable to revoke alice channel: %v", err) } - if SatPerKWeight(aliceChannel.channelState.LocalCommitment.FeePerKw) != fee { + if chainfee.SatPerKWeight( + aliceChannel.channelState.LocalCommitment.FeePerKw, + ) != fee { t.Fatalf("alice's feePerKw was not locked in") } @@ -2505,7 +2783,9 @@ func TestAddHTLCNegativeBalance(t *testing.T) { // We'll kick off the test by creating our channels which both are // loaded with 5 BTC each. - aliceChannel, _, cleanUp, err := CreateTestChannels(true) + aliceChannel, _, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -2513,7 +2793,7 @@ func TestAddHTLCNegativeBalance(t *testing.T) { // We set the channel reserve to 0, such that we can add HTLCs all the // way to a negative balance. - aliceChannel.localChanCfg.ChanReserve = 0 + aliceChannel.channelState.LocalChanCfg.ChanReserve = 0 // First, we'll add 3 HTLCs of 1 BTC each to Alice's commitment. const numHTLCs = 3 @@ -2586,7 +2866,9 @@ func TestChanSyncFullySynced(t *testing.T) { // Create a test channel which will be used for the duration of this // unittest. The channel will be funded evenly with Alice having 5 BTC, // and Bob having 5 BTC. - aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(true) + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -2706,7 +2988,9 @@ func TestChanSyncOweCommitment(t *testing.T) { // Create a test channel which will be used for the duration of this // unittest. The channel will be funded evenly with Alice having 5 BTC, // and Bob having 5 BTC. - aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(true) + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -3010,6 +3294,128 @@ func TestChanSyncOweCommitment(t *testing.T) { } } +// TestChanSyncOweCommitmentPendingRemote asserts that local updates are applied +// to the remote commit across restarts. +func TestChanSyncOweCommitmentPendingRemote(t *testing.T) { + t.Parallel() + + // Create a test channel which will be used for the duration of this + // unittest. + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) + if err != nil { + t.Fatalf("unable to create test channels: %v", err) + } + defer cleanUp() + + var fakeOnionBlob [lnwire.OnionPacketSize]byte + copy(fakeOnionBlob[:], bytes.Repeat([]byte{0x05}, lnwire.OnionPacketSize)) + + // We'll start off the scenario where Bob send two htlcs to Alice in a + // single state update. + var preimages []lntypes.Preimage + const numHtlcs = 2 + for id := byte(0); id < numHtlcs; id++ { + htlcAmt := lnwire.NewMSatFromSatoshis(20000) + var bobPreimage [32]byte + copy(bobPreimage[:], bytes.Repeat([]byte{id}, 32)) + rHash := sha256.Sum256(bobPreimage[:]) + h := &lnwire.UpdateAddHTLC{ + PaymentHash: rHash, + Amount: htlcAmt, + Expiry: uint32(10), + OnionBlob: fakeOnionBlob, + } + + htlcIndex, err := bobChannel.AddHTLC(h, nil) + if err != nil { + t.Fatalf("unable to add bob's htlc: %v", err) + } + + h.ID = htlcIndex + if _, err := aliceChannel.ReceiveHTLC(h); err != nil { + t.Fatalf("unable to recv bob's htlc: %v", err) + } + + preimages = append(preimages, bobPreimage) + } + + // With the HTLCs applied to both update logs, we'll initiate a state + // transition from Bob. + if err := ForceStateTransition(bobChannel, aliceChannel); err != nil { + t.Fatalf("unable to complete bob's state transition: %v", err) + } + + // Next, Alice settles the HTLCs from Bob in distinct state updates. + for i := 0; i < numHtlcs; i++ { + err = aliceChannel.SettleHTLC(preimages[i], uint64(i), nil, nil, nil) + if err != nil { + t.Fatalf("unable to settle htlc: %v", err) + } + err = bobChannel.ReceiveHTLCSettle(preimages[i], uint64(i)) + if err != nil { + t.Fatalf("unable to settle htlc: %v", err) + } + + aliceSig, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment() + if err != nil { + t.Fatalf("unable to sign commitment: %v", err) + } + + err = bobChannel.ReceiveNewCommitment(aliceSig, aliceHtlcSigs) + if err != nil { + t.Fatalf("unable to receive commitment: %v", err) + } + + // Bob revokes his current commitment. After this call + // completes, the htlc is settled on the local commitment + // transaction. Bob still owes Alice a signature to also settle + // the htlc on her local commitment transaction. + bobRevoke, _, err := bobChannel.RevokeCurrentCommitment() + if err != nil { + t.Fatalf("unable to revoke commitment: %v", err) + } + + _, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevoke) + if err != nil { + t.Fatalf("unable to revoke commitment: %v", err) + } + } + + // We restart Bob. This should have no impact on further message that + // are generated. + bobChannel, err = restartChannel(bobChannel) + if err != nil { + t.Fatalf("unable to restart bob: %v", err) + } + + // Bob signs the commitment he owes. + bobCommit, bobHtlcSigs, _, err := bobChannel.SignNextCommitment() + if err != nil { + t.Fatalf("unable to sign commitment: %v", err) + } + + // This commitment is expected to contain no htlcs anymore. + if len(bobHtlcSigs) != 0 { + t.Fatalf("no htlcs expected, but got %v", len(bobHtlcSigs)) + } + + // Get Alice to revoke and trigger Bob to compact his logs. + err = aliceChannel.ReceiveNewCommitment(bobCommit, bobHtlcSigs) + if err != nil { + t.Fatal(err) + } + aliceRevoke, _, err := aliceChannel.RevokeCurrentCommitment() + if err != nil { + t.Fatal(err) + } + _, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevoke) + if err != nil { + t.Fatal(err) + } +} + // TestChanSyncOweRevocation tests that if Bob restarts (and then Alice) before // he receiver's Alice's RevokeAndAck message, then Alice concludes that she // needs to re-send the RevokeAndAck. After the revocation has been sent, both @@ -3020,7 +3426,9 @@ func TestChanSyncOweRevocation(t *testing.T) { // Create a test channel which will be used for the duration of this // unittest. The channel will be funded evenly with Alice having 5 BTC, // and Bob having 5 BTC. - aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(true) + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -3210,7 +3618,9 @@ func TestChanSyncOweRevocationAndCommit(t *testing.T) { // Create a test channel which will be used for the duration of this // unittest. The channel will be funded evenly with Alice having 5 BTC, // and Bob having 5 BTC. - aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(true) + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -3379,8 +3789,10 @@ func TestChanSyncOweRevocationAndCommitForceTransition(t *testing.T) { // Create a test channel which will be used for the duration of this // unittest. The channel will be funded evenly with Alice having 5 BTC, // and Bob having 5 BTC. - aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(true) - if err != nil { + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) + if err != nil { t.Fatalf("unable to create test channels: %v", err) } defer cleanUp() @@ -3608,7 +4020,9 @@ func TestChanSyncFailure(t *testing.T) { // Create a test channel which will be used for the duration of this // unittest. The channel will be funded evenly with Alice having 5 BTC, // and Bob having 5 BTC. - aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(false) + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -3861,7 +4275,9 @@ func TestFeeUpdateRejectInsaneFee(t *testing.T) { // Create a test channel which will be used for the duration of this // unittest. The channel will be funded evenly with Alice having 5 BTC, // and Bob having 5 BTC. - aliceChannel, _, cleanUp, err := CreateTestChannels(true) + aliceChannel, _, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -3869,7 +4285,9 @@ func TestFeeUpdateRejectInsaneFee(t *testing.T) { // Next, we'll try to add a fee rate to Alice which is 1,000,000x her // starting fee rate. - startingFeeRate := SatPerKWeight(aliceChannel.channelState.LocalCommitment.FeePerKw) + startingFeeRate := chainfee.SatPerKWeight( + aliceChannel.channelState.LocalCommitment.FeePerKw, + ) newFeeRate := startingFeeRate * 1000000 // Both Alice and Bob should reject this new fee rate as it is far too @@ -3887,7 +4305,9 @@ func TestChannelRetransmissionFeeUpdate(t *testing.T) { // Create a test channel which will be used for the duration of this // unittest. The channel will be funded evenly with Alice having 5 BTC, // and Bob having 5 BTC. - aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(true) + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -3895,7 +4315,9 @@ func TestChannelRetransmissionFeeUpdate(t *testing.T) { // First, we'll fetch the current fee rate present within the // commitment transactions. - startingFeeRate := SatPerKWeight(aliceChannel.channelState.LocalCommitment.FeePerKw) + startingFeeRate := chainfee.SatPerKWeight( + aliceChannel.channelState.LocalCommitment.FeePerKw, + ) // Next, we'll start a commitment update, with Alice sending a new // update to double the fee rate of the commitment. @@ -4034,10 +4456,14 @@ func TestChannelRetransmissionFeeUpdate(t *testing.T) { } // Both parties should now have the latest fee rate locked-in. - if SatPerKWeight(aliceChannel.channelState.LocalCommitment.FeePerKw) != newFeeRate { + if chainfee.SatPerKWeight( + aliceChannel.channelState.LocalCommitment.FeePerKw, + ) != newFeeRate { t.Fatalf("alice's feePerKw was not locked in") } - if SatPerKWeight(bobChannel.channelState.LocalCommitment.FeePerKw) != newFeeRate { + if chainfee.SatPerKWeight( + bobChannel.channelState.LocalCommitment.FeePerKw, + ) != newFeeRate { t.Fatalf("bob's feePerKw was not locked in") } @@ -4070,7 +4496,9 @@ func TestFeeUpdateOldDiskFormat(t *testing.T) { // Create a test channel which will be used for the duration of this // unittest. The channel will be funded evenly with Alice having 5 BTC, // and Bob having 5 BTC. - aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(true) + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -4118,7 +4546,7 @@ func TestFeeUpdateOldDiskFormat(t *testing.T) { // First, we'll fetch the current fee rate present within the // commitment transactions. - startingFeeRate := SatPerKWeight( + startingFeeRate := chainfee.SatPerKWeight( aliceChannel.channelState.LocalCommitment.FeePerKw, ) newFeeRate := startingFeeRate @@ -4247,10 +4675,14 @@ func TestFeeUpdateOldDiskFormat(t *testing.T) { } // Both parties should now have the latest fee rate locked-in. - if SatPerKWeight(aliceChannel.channelState.LocalCommitment.FeePerKw) != newFeeRate { + if chainfee.SatPerKWeight( + aliceChannel.channelState.LocalCommitment.FeePerKw, + ) != newFeeRate { t.Fatalf("alice's feePerKw was not locked in") } - if SatPerKWeight(bobChannel.channelState.LocalCommitment.FeePerKw) != newFeeRate { + if chainfee.SatPerKWeight( + bobChannel.channelState.LocalCommitment.FeePerKw, + ) != newFeeRate { t.Fatalf("bob's feePerKw was not locked in") } @@ -4272,10 +4704,14 @@ func TestFeeUpdateOldDiskFormat(t *testing.T) { assertLogItems(0, numHTLCs+1) // ...and the final fee rate locked in. - if SatPerKWeight(aliceChannel.channelState.LocalCommitment.FeePerKw) != newFeeRate { + if chainfee.SatPerKWeight( + aliceChannel.channelState.LocalCommitment.FeePerKw, + ) != newFeeRate { t.Fatalf("alice's feePerKw was not locked in") } - if SatPerKWeight(bobChannel.channelState.LocalCommitment.FeePerKw) != newFeeRate { + if chainfee.SatPerKWeight( + bobChannel.channelState.LocalCommitment.FeePerKw, + ) != newFeeRate { t.Fatalf("bob's feePerKw was not locked in") } } @@ -4289,7 +4725,9 @@ func TestChanSyncUnableToSync(t *testing.T) { // Create a test channel which will be used for the duration of this // unittest. The channel will be funded evenly with Alice having 5 BTC, // and Bob having 5 BTC. - aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(false) + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -4326,7 +4764,9 @@ func TestChanSyncInvalidLastSecret(t *testing.T) { // Create a test channel which will be used for the duration of this // unittest. The channel will be funded evenly with Alice having 5 BTC, // and Bob having 5 BTC. - aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(false) + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -4416,12 +4856,24 @@ func TestChanAvailableBandwidth(t *testing.T) { // Create a test channel which will be used for the duration of this // unittest. The channel will be funded evenly with Alice having 5 BTC, // and Bob having 5 BTC. - aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(true) + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } defer cleanUp() + aliceReserve := lnwire.NewMSatFromSatoshis( + aliceChannel.channelState.LocalChanCfg.ChanReserve, + ) + feeRate := chainfee.SatPerKWeight( + aliceChannel.channelState.LocalCommitment.FeePerKw, + ) + htlcFee := lnwire.NewMSatFromSatoshis( + feeRate.FeeForWeight(input.HTLCWeight), + ) + assertBandwidthEstimateCorrect := func(aliceInitiate bool) { // With the HTLC's added, we'll now query the AvailableBalance // method for the current available channel bandwidth from @@ -4448,11 +4900,15 @@ func TestChanAvailableBandwidth(t *testing.T) { // Now, we'll obtain the current available bandwidth in Alice's // latest commitment and compare that to the prior estimate. aliceBalance := aliceChannel.channelState.LocalCommitment.LocalBalance - if aliceBalance != aliceAvailableBalance { + + // The balance we have available for new HTLCs should be the + // current local commitment balance, minus the channel reserve + // and the fee for adding an HTLC. + expBalance := aliceBalance - aliceReserve - htlcFee + if expBalance != aliceAvailableBalance { _, _, line, _ := runtime.Caller(1) t.Fatalf("line: %v, incorrect balance: expected %v, "+ - "got %v", line, aliceBalance, - aliceAvailableBalance) + "got %v", line, expBalance, aliceAvailableBalance) } } @@ -4530,6 +4986,187 @@ func TestChanAvailableBandwidth(t *testing.T) { // TODO(roasbeef): additional tests from diff starting conditions } +// TestChanAvailableBalanceNearHtlcFee checks that we get the expected reported +// balance when it is close to the htlc fee. +func TestChanAvailableBalanceNearHtlcFee(t *testing.T) { + t.Parallel() + + // Create a test channel which will be used for the duration of this + // unittest. The channel will be funded evenly with Alice having 5 BTC, + // and Bob having 5 BTC. + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) + if err != nil { + t.Fatalf("unable to create test channels: %v", err) + } + defer cleanUp() + + // Alice and Bob start with half the channel capacity. + aliceBalance := lnwire.NewMSatFromSatoshis(5 * btcutil.SatoshiPerBitcoin) + bobBalance := lnwire.NewMSatFromSatoshis(5 * btcutil.SatoshiPerBitcoin) + + aliceReserve := lnwire.NewMSatFromSatoshis( + aliceChannel.channelState.LocalChanCfg.ChanReserve, + ) + bobReserve := lnwire.NewMSatFromSatoshis( + bobChannel.channelState.LocalChanCfg.ChanReserve, + ) + + aliceDustlimit := lnwire.NewMSatFromSatoshis( + aliceChannel.channelState.LocalChanCfg.DustLimit, + ) + feeRate := chainfee.SatPerKWeight( + aliceChannel.channelState.LocalCommitment.FeePerKw, + ) + htlcFee := lnwire.NewMSatFromSatoshis( + feeRate.FeeForWeight(input.HTLCWeight), + ) + commitFee := lnwire.NewMSatFromSatoshis( + aliceChannel.channelState.LocalCommitment.CommitFee, + ) + htlcTimeoutFee := lnwire.NewMSatFromSatoshis( + HtlcTimeoutFee(aliceChannel.channelState.ChanType, feeRate), + ) + htlcSuccessFee := lnwire.NewMSatFromSatoshis( + HtlcSuccessFee(aliceChannel.channelState.ChanType, feeRate), + ) + + // Helper method to check the current reported balance. + checkBalance := func(t *testing.T, expBalanceAlice, + expBalanceBob lnwire.MilliSatoshi) { + + t.Helper() + aliceBalance := aliceChannel.AvailableBalance() + if aliceBalance != expBalanceAlice { + t.Fatalf("Expected alice balance %v, got %v", + expBalanceAlice, aliceBalance) + } + + bobBalance := bobChannel.AvailableBalance() + if bobBalance != expBalanceBob { + t.Fatalf("Expected bob balance %v, got %v", + expBalanceBob, bobBalance) + } + } + + // Helper method to send an HTLC from Alice to Bob, decreasing Alice's + // balance. + htlcIndex := uint64(0) + sendHtlc := func(htlcAmt lnwire.MilliSatoshi) { + t.Helper() + + htlc, preImage := createHTLC(int(htlcIndex), htlcAmt) + if _, err := aliceChannel.AddHTLC(htlc, nil); err != nil { + t.Fatalf("unable to add htlc: %v", err) + } + if _, err := bobChannel.ReceiveHTLC(htlc); err != nil { + t.Fatalf("unable to recv htlc: %v", err) + } + + if err := ForceStateTransition(aliceChannel, bobChannel); err != nil { + t.Fatalf("unable to complete alice's state "+ + "transition: %v", err) + } + + err = bobChannel.SettleHTLC(preImage, htlcIndex, nil, nil, nil) + if err != nil { + t.Fatalf("unable to settle htlc: %v", err) + } + err = aliceChannel.ReceiveHTLCSettle(preImage, htlcIndex) + if err != nil { + t.Fatalf("unable to settle htlc: %v", err) + } + + if err := ForceStateTransition(aliceChannel, bobChannel); err != nil { + t.Fatalf("unable to complete alice's state "+ + "transition: %v", err) + } + + htlcIndex++ + aliceBalance -= htlcAmt + bobBalance += htlcAmt + } + + // Balance should start out equal to half the channel capacity minus + // the commitment fee Alice must pay and the channel reserve. In + // addition the HTLC fee will be subtracted fromt the balance to + // reflect that this value must be reserved for any payment above the + // dust limit. + expAliceBalance := aliceBalance - commitFee - aliceReserve - htlcFee + + // Bob is not the initiator, so he will have all his balance available, + // since Alice pays for fees. Bob only need to keep his balance above + // the reserve. + expBobBalance := bobBalance - bobReserve + checkBalance(t, expAliceBalance, expBobBalance) + + // Find the minumim size of a non-dust HTLC. + aliceNonDustHtlc := aliceDustlimit + htlcTimeoutFee + + // Send a HTLC leaving Alice's remaining balance just enough to have + // nonDustHtlc left after paying the commit fee and htlc fee. + htlcAmt := aliceBalance - (commitFee + aliceReserve + htlcFee + aliceNonDustHtlc) + sendHtlc(htlcAmt) + + // Now the real balance left will be + // nonDustHtlc+commitfee+aliceReserve+htlcfee. The available balance + // reported will just be nonDustHtlc, since the rest of the balance is + // reserved. + expAliceBalance = aliceNonDustHtlc + expBobBalance = bobBalance - bobReserve + checkBalance(t, expAliceBalance, expBobBalance) + + // Send an HTLC using all but one msat of the reported balance. + htlcAmt = aliceNonDustHtlc - 1 + sendHtlc(htlcAmt) + + // 1 msat should be left. + expAliceBalance = 1 + + // Bob should still have all his balance available, since even though + // Alice cannot afford to add a non-dust HTLC, she can afford to add a + // non-dust HTLC from Bob. + expBobBalance = bobBalance - bobReserve + checkBalance(t, expAliceBalance, expBobBalance) + + // Sendng the last msat. + htlcAmt = 1 + sendHtlc(htlcAmt) + + // No balance left. + expAliceBalance = 0 + + // We try to always reserve enough for the non-iniitator to be able to + // add an HTLC, hence Bob should still have all his non-reserved + // balance available. + expBobBalance = bobBalance - bobReserve + checkBalance(t, expAliceBalance, expBobBalance) + + // Even though Alice has a reported balance of 0, this is because we + // try to avoid getting into the position where she cannot pay the fee + // for Bob adding another HTLC. This means she actually _has_ some + // balance left, and we now force the channel into this situation by + // sending yet another HTLC. In practice this can also happen if a fee + // update eats into Alice's balance. + htlcAmt = 1 + sendHtlc(htlcAmt) + + // Now Alice balance is so low that she cannot even afford to add a new + // HTLC from Bob to the commitment transaction. Bob's balance should + // reflect this, by only reporting dust amount being available. Alice + // should still report a zero balance. + + // Since the dustlimit is different for the two commitments, the + // largest HTLC Bob can send that Alice can afford on both commitments + // (remember she cannot afford to pay the HTLC fee) is the largest dust + // HTLC on Alice's commitemnt, since her dust limit is lower. + bobNonDustHtlc := aliceDustlimit + htlcSuccessFee + expBobBalance = bobNonDustHtlc - 1 + expAliceBalance = 0 + checkBalance(t, expAliceBalance, expBobBalance) +} + // TestSignCommitmentFailNotLockedIn tests that a channel will not attempt to // create a new state if it doesn't yet know of the next revocation point for // the remote party. @@ -4539,7 +5176,9 @@ func TestSignCommitmentFailNotLockedIn(t *testing.T) { // Create a test channel which will be used for the duration of this // unittest. The channel will be funded evenly with Alice having 5 BTC, // and Bob having 5 BTC. - aliceChannel, _, cleanUp, err := CreateTestChannels(true) + aliceChannel, _, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -4564,7 +5203,9 @@ func TestLockedInHtlcForwardingSkipAfterRestart(t *testing.T) { t.Parallel() // First, we'll make a channel between Alice and Bob. - aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(true) + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -4878,7 +5519,9 @@ func TestInvalidCommitSigError(t *testing.T) { t.Parallel() // First, we'll make a channel between Alice and Bob. - aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(true) + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -4925,7 +5568,9 @@ func TestChannelUnilateralCloseHtlcResolution(t *testing.T) { // Create a test channel which will be used for the duration of this // unittest. The channel will be funded evenly with Alice having 5 BTC, // and Bob having 5 BTC. - aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(true) + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -5084,7 +5729,7 @@ func TestChannelUnilateralClosePendingCommit(t *testing.T) { // unittest. The channel will be funded evenly with Alice having 5 BTC, // and Bob having 5 BTC. aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( - false, + channeldb.SingleFunderBit, ) if err != nil { t.Fatalf("unable to create test channels: %v", err) @@ -5211,7 +5856,9 @@ func TestDesyncHTLCs(t *testing.T) { // We'll kick off the test by creating our channels which both are // loaded with 5 BTC each. - aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(true) + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -5278,30 +5925,35 @@ func TestMaxAcceptedHTLCs(t *testing.T) { // We'll kick off the test by creating our channels which both are // loaded with 5 BTC each. - aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(true) + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } defer cleanUp() // One over the maximum number of HTLCs that either can accept. - const numHTLCs = 20 - const numHTLCsReceived = 12 + const numHTLCs = 12 - // Set the remote's required MaxAcceptedHtlcs. This means that alice + // Set the remote's required MaxAcceptedHtlcs. This means that Alice // can only offer the remote up to numHTLCs HTLCs. - aliceChannel.localChanCfg.MaxAcceptedHtlcs = numHTLCs - bobChannel.remoteChanCfg.MaxAcceptedHtlcs = numHTLCs + aliceChannel.channelState.LocalChanCfg.MaxAcceptedHtlcs = numHTLCs + bobChannel.channelState.RemoteChanCfg.MaxAcceptedHtlcs = numHTLCs // Similarly, set the remote config's MaxAcceptedHtlcs. This means - // that the remote will be aware that Alice will only accept up to - // numHTLCsRecevied at a time. - aliceChannel.remoteChanCfg.MaxAcceptedHtlcs = numHTLCsReceived - bobChannel.localChanCfg.MaxAcceptedHtlcs = numHTLCsReceived + // that the remote will be aware that Bob will only accept up to + // numHTLCs at a time. + aliceChannel.channelState.RemoteChanCfg.MaxAcceptedHtlcs = numHTLCs + bobChannel.channelState.LocalChanCfg.MaxAcceptedHtlcs = numHTLCs // Each HTLC amount is 0.1 BTC. htlcAmt := lnwire.NewMSatFromSatoshis(0.1 * btcutil.SatoshiPerBitcoin) + // htlcID is used to keep track of the HTLC that Bob will fail back to + // Alice. + var htlcID uint64 + // Send the maximum allowed number of HTLCs. for i := 0; i < numHTLCs; i++ { htlc, _ := createHTLC(i, htlcAmt) @@ -5311,6 +5963,13 @@ func TestMaxAcceptedHTLCs(t *testing.T) { if _, err := bobChannel.ReceiveHTLC(htlc); err != nil { t.Fatalf("unable to recv htlc: %v", err) } + + // Just assign htlcID to the last received HTLC. + htlcID = htlc.ID + } + + if err := ForceStateTransition(aliceChannel, bobChannel); err != nil { + t.Fatalf("unable to transition state: %v", err) } // The next HTLC should fail with ErrMaxHTLCNumber. @@ -5320,17 +5979,215 @@ func TestMaxAcceptedHTLCs(t *testing.T) { t.Fatalf("expected ErrMaxHTLCNumber, instead received: %v", err) } - // After receiving the next HTLC, next state transition should fail - // with ErrMaxHTLCNumber. + // Receiving the next HTLC should fail. + if _, err := bobChannel.ReceiveHTLC(htlc); err != ErrMaxHTLCNumber { + t.Fatalf("expected ErrMaxHTLCNumber, instead received: %v", err) + } + + // Bob will fail the htlc specified by htlcID and then force a state + // transition. + err = bobChannel.FailHTLC(htlcID, []byte{}, nil, nil, nil) + if err != nil { + t.Fatalf("unable to fail htlc: %v", err) + } + + if err := aliceChannel.ReceiveFailHTLC(htlcID, []byte{}); err != nil { + t.Fatalf("unable to receive fail htlc: %v", err) + } + + if err := ForceStateTransition(bobChannel, aliceChannel); err != nil { + t.Fatalf("unable to transition state: %v", err) + } + + // Bob should succeed in adding a new HTLC since a previous HTLC was just + // failed. We use numHTLCs here since the previous AddHTLC with this index + // failed. + htlc, _ = createHTLC(numHTLCs, htlcAmt) + if _, err := aliceChannel.AddHTLC(htlc, nil); err != nil { + t.Fatalf("unable to add htlc: %v", err) + } if _, err := bobChannel.ReceiveHTLC(htlc); err != nil { t.Fatalf("unable to recv htlc: %v", err) } - err = ForceStateTransition(aliceChannel, bobChannel) - if err != ErrMaxHTLCNumber { + + // Add a commitment to Bob's commitment chain. + aliceSig, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment() + if err != nil { + t.Fatalf("unable to sign next commitment: %v", err) + } + err = bobChannel.ReceiveNewCommitment(aliceSig, aliceHtlcSigs) + if err != nil { + t.Fatalf("unable to recv new commitment: %v", err) + } + + // The next HTLC should fail with ErrMaxHTLCNumber. The index is incremented + // by one. + htlc, _ = createHTLC(numHTLCs+1, htlcAmt) + if _, err = aliceChannel.AddHTLC(htlc, nil); err != ErrMaxHTLCNumber { + t.Fatalf("expected ErrMaxHTLCNumber, instead received: %v", err) + } + + // Likewise, Bob should not be able to receive this HTLC if Alice can't + // add it. + if _, err := bobChannel.ReceiveHTLC(htlc); err != ErrMaxHTLCNumber { t.Fatalf("expected ErrMaxHTLCNumber, instead received: %v", err) } } +// TestMaxAsynchronousHtlcs tests that Bob correctly receives (and does not +// fail) an HTLC from Alice when exchanging asynchronous payments. We want to +// mimic the following case where Bob's commitment transaction is full before +// starting: +// Alice Bob +// 1. <---settle/fail--- +// 2. <-------sig------- +// 3. --------sig------> (covers an add sent before step 1) +// 4. <-------rev------- +// 5. --------rev------> +// 6. --------add------> +// 7. - - - - sig - - -> +// This represents an asynchronous commitment dance in which both sides are +// sending signatures at the same time. In step 3, the signature does not +// cover the recent settle/fail that Bob sent in step 1. However, the add that +// Alice sends to Bob in step 6 does not overflow Bob's commitment transaction. +// This is because validateCommitmentSanity counts the HTLC's by ignoring +// HTLC's which will be removed in the next signature that Alice sends. Thus, +// the add won't overflow. This is because the signature received in step 7 +// covers the settle/fail in step 1 and makes space for the add in step 6. +func TestMaxAsynchronousHtlcs(t *testing.T) { + t.Parallel() + + // We'll kick off the test by creating our channels which both are + // loaded with 5 BTC each. + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) + if err != nil { + t.Fatalf("unable to create test channels: %v", err) + } + defer cleanUp() + + // One over the maximum number of HTLCs that either can accept. + const numHTLCs = 12 + + // Set the remote's required MaxAcceptedHtlcs. This means that Alice + // can only offer the remote up to numHTLCs HTLCs. + aliceChannel.channelState.LocalChanCfg.MaxAcceptedHtlcs = numHTLCs + bobChannel.channelState.RemoteChanCfg.MaxAcceptedHtlcs = numHTLCs + + // Similarly, set the remote config's MaxAcceptedHtlcs. This means + // that the remote will be aware that Bob will only accept up to + // numHTLCs at a time. + aliceChannel.channelState.RemoteChanCfg.MaxAcceptedHtlcs = numHTLCs + bobChannel.channelState.LocalChanCfg.MaxAcceptedHtlcs = numHTLCs + + // Each HTLC amount is 0.1 BTC. + htlcAmt := lnwire.NewMSatFromSatoshis(0.1 * btcutil.SatoshiPerBitcoin) + + var htlcID uint64 + + // Send the maximum allowed number of HTLCs minus one. + for i := 0; i < numHTLCs-1; i++ { + htlc, _ := createHTLC(i, htlcAmt) + if _, err := aliceChannel.AddHTLC(htlc, nil); err != nil { + t.Fatalf("unable to add htlc: %v", err) + } + if _, err := bobChannel.ReceiveHTLC(htlc); err != nil { + t.Fatalf("unable to recv htlc: %v", err) + } + + // Just assign htlcID to the last received HTLC. + htlcID = htlc.ID + } + + if err := ForceStateTransition(aliceChannel, bobChannel); err != nil { + t.Fatalf("unable to transition state: %v", err) + } + + // Send an HTLC to Bob so that Bob's commitment transaction is full. + htlc, _ := createHTLC(numHTLCs-1, htlcAmt) + if _, err := aliceChannel.AddHTLC(htlc, nil); err != nil { + t.Fatalf("unable to add htlc: %v", err) + } + if _, err := bobChannel.ReceiveHTLC(htlc); err != nil { + t.Fatalf("unable to recv htlc: %v", err) + } + + // Fail back an HTLC and sign a commitment as in steps 1 & 2. + err = bobChannel.FailHTLC(htlcID, []byte{}, nil, nil, nil) + if err != nil { + t.Fatalf("unable to fail htlc: %v", err) + } + + if err := aliceChannel.ReceiveFailHTLC(htlcID, []byte{}); err != nil { + t.Fatalf("unable to receive fail htlc: %v", err) + } + + bobSig, bobHtlcSigs, _, err := bobChannel.SignNextCommitment() + if err != nil { + t.Fatalf("unable to sign next commitment: %v", err) + } + + err = aliceChannel.ReceiveNewCommitment(bobSig, bobHtlcSigs) + if err != nil { + t.Fatalf("unable to receive new commitment: %v", err) + } + + // Cover the HTLC referenced with id equal to numHTLCs-1 with a new + // signature (step 3). + aliceSig, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment() + if err != nil { + t.Fatalf("unable to sign next commitment: %v", err) + } + + err = bobChannel.ReceiveNewCommitment(aliceSig, aliceHtlcSigs) + if err != nil { + t.Fatalf("unable to receive new commitment: %v", err) + } + + // Both sides exchange revocations as in step 4 & 5. + bobRevocation, _, err := bobChannel.RevokeCurrentCommitment() + if err != nil { + t.Fatalf("unable to revoke revocation: %v", err) + } + + _, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation) + if err != nil { + t.Fatalf("unable to receive revocation: %v", err) + } + + aliceRevocation, _, err := aliceChannel.RevokeCurrentCommitment() + if err != nil { + t.Fatalf("unable to revoke revocation: %v", err) + } + + _, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation) + if err != nil { + t.Fatalf("unable to receive revocation: %v", err) + } + + // Send the final Add which should succeed as in step 6. + htlc, _ = createHTLC(numHTLCs, htlcAmt) + if _, err := aliceChannel.AddHTLC(htlc, nil); err != nil { + t.Fatalf("unable to add htlc: %v", err) + } + if _, err := bobChannel.ReceiveHTLC(htlc); err != nil { + t.Fatalf("unable to recv htlc: %v", err) + } + + // Receiving the commitment should succeed as in step 7 since space was + // made. + aliceSig, aliceHtlcSigs, _, err = aliceChannel.SignNextCommitment() + if err != nil { + t.Fatalf("unable to sign next commitment: %v", err) + } + + err = bobChannel.ReceiveNewCommitment(aliceSig, aliceHtlcSigs) + if err != nil { + t.Fatalf("unable to receive new commitment: %v", err) + } +} + // TestMaxPendingAmount tests that the maximum overall pending HTLC value is met // given several HTLCs that, combined, exceed this value. An ErrMaxPendingAmount // error should be returned. @@ -5339,7 +6196,9 @@ func TestMaxPendingAmount(t *testing.T) { // We'll kick off the test by creating our channels which both are // loaded with 5 BTC each. - aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(true) + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -5353,8 +6212,8 @@ func TestMaxPendingAmount(t *testing.T) { // We set the max pending amount of Alice's config. This mean that she // cannot offer Bob HTLCs with a total value above this limit at a given // time. - aliceChannel.localChanCfg.MaxPendingAmount = maxPending - bobChannel.remoteChanCfg.MaxPendingAmount = maxPending + aliceChannel.channelState.LocalChanCfg.MaxPendingAmount = maxPending + bobChannel.channelState.RemoteChanCfg.MaxPendingAmount = maxPending // First, we'll add 2 HTLCs of 1.5 BTC each to Alice's commitment. // This won't trigger Alice's ErrMaxPendingAmount error. @@ -5379,13 +6238,8 @@ func TestMaxPendingAmount(t *testing.T) { t.Fatalf("expected ErrMaxPendingAmount, instead received: %v", err) } - // And also Bob shouldn't be accepting this HTLC in the next state - // transition. - if _, err := bobChannel.ReceiveHTLC(htlc); err != nil { - t.Fatalf("unable to recv htlc: %v", err) - } - err = ForceStateTransition(aliceChannel, bobChannel) - if err != ErrMaxPendingAmount { + // And also Bob shouldn't be accepting this HTLC upon calling ReceiveHTLC. + if _, err := bobChannel.ReceiveHTLC(htlc); err != ErrMaxPendingAmount { t.Fatalf("expected ErrMaxPendingAmount, instead received: %v", err) } } @@ -5428,7 +6282,7 @@ func TestChanReserve(t *testing.T) { // We'll kick off the test by creating our channels which both // are loaded with 5 BTC each. aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( - true, + channeldb.SingleFunderTweaklessBit, ) if err != nil { t.Fatalf("unable to create test channels: %v", err) @@ -5442,20 +6296,20 @@ func TestChanReserve(t *testing.T) { // Alice will need to keep her reserve above aliceMinReserve, // so set this limit to here local config. - aliceChannel.localChanCfg.ChanReserve = aliceMinReserve + aliceChannel.channelState.LocalChanCfg.ChanReserve = aliceMinReserve // During channel opening Bob will also get to know Alice's // minimum reserve, and this will be found in his remote // config. - bobChannel.remoteChanCfg.ChanReserve = aliceMinReserve + bobChannel.channelState.RemoteChanCfg.ChanReserve = aliceMinReserve // We set Bob's channel reserve to a value that is larger than // his current balance in the channel. This will ensure that // after a channel is first opened, Bob can still receive HTLCs // even though his balance is less than his channel reserve. bobMinReserve := btcutil.Amount(6 * btcutil.SatoshiPerBitcoin) - bobChannel.localChanCfg.ChanReserve = bobMinReserve - aliceChannel.remoteChanCfg.ChanReserve = bobMinReserve + bobChannel.channelState.LocalChanCfg.ChanReserve = bobMinReserve + aliceChannel.channelState.RemoteChanCfg.ChanReserve = bobMinReserve return aliceChannel, bobChannel, cleanUp } @@ -5507,12 +6361,8 @@ func TestChanReserve(t *testing.T) { t.Fatalf("expected ErrBelowChanReserve, instead received: %v", err) } - // Alice will reject this htlc when a state transition is attempted. - if _, err := aliceChannel.ReceiveHTLC(htlc); err != nil { - t.Fatalf("unable to recv htlc: %v", err) - } - err = ForceStateTransition(aliceChannel, bobChannel) - if err != ErrBelowChanReserve { + // Alice will reject this htlc upon receiving the htlc. + if _, err := aliceChannel.ReceiveHTLC(htlc); err != ErrBelowChanReserve { t.Fatalf("expected ErrBelowChanReserve, instead received: %v", err) } @@ -5554,13 +6404,8 @@ func TestChanReserve(t *testing.T) { t.Fatalf("expected ErrBelowChanReserve, instead received: %v", err) } - // Likewise, Bob will reject a state transition after this htlc is - // received, of the same reason. - if _, err := bobChannel.ReceiveHTLC(htlc); err != nil { - t.Fatalf("unable to recv htlc: %v", err) - } - err = ForceStateTransition(aliceChannel, bobChannel) - if err != ErrBelowChanReserve { + // Likewise, Bob will reject receiving the htlc because of the same reason. + if _, err := bobChannel.ReceiveHTLC(htlc); err != ErrBelowChanReserve { t.Fatalf("expected ErrBelowChanReserve, instead received: %v", err) } @@ -5638,6 +6483,106 @@ func TestChanReserve(t *testing.T) { ) } +// TestChanReserveRemoteInitiator tests that the channel reserve of the +// initiator is accounted for when adding HTLCs, whether the initiator is the +// local or remote node. +func TestChanReserveRemoteInitiator(t *testing.T) { + t.Parallel() + + // We start out with a channel where both parties have 5 BTC. + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) + if err != nil { + t.Fatal(err) + } + defer cleanUp() + + // Set Alice's channel reserve to be 5 BTC-commitfee. This means she + // has just enough balance to cover the comitment fee, but not enough + // to add any more HTLCs to the commitment. Although a reserve this + // high is unrealistic, a channel can easiliy get into a situation + // where the initiator cannot pay for the fee of any more HTLCs. + commitFee := aliceChannel.channelState.LocalCommitment.CommitFee + aliceMinReserve := 5*btcutil.SatoshiPerBitcoin - commitFee + + aliceChannel.channelState.LocalChanCfg.ChanReserve = aliceMinReserve + bobChannel.channelState.RemoteChanCfg.ChanReserve = aliceMinReserve + + // Now let Bob attempt to add an HTLC of 0.1 BTC. He has plenty of + // money available to spend, but Alice, which is the initiator, cannot + // afford any more HTLCs on the commitment transaction because that + // would take here below her channel reserve.. + htlcAmt := lnwire.NewMSatFromSatoshis(0.1 * btcutil.SatoshiPerBitcoin) + htlc, _ := createHTLC(0, htlcAmt) + + // Bob should refuse to add this HTLC, since he realizes it will create + // an invalid commitment. + _, err = bobChannel.AddHTLC(htlc, nil) + if err != ErrBelowChanReserve { + t.Fatalf("expected ErrBelowChanReserve, instead received: %v", + err) + } + + // Of course Alice will also not have enough balance to add it herself. + _, err = aliceChannel.AddHTLC(htlc, nil) + if err != ErrBelowChanReserve { + t.Fatalf("expected ErrBelowChanReserve, instead received: %v", + err) + } + + // Same for Alice, she should refuse to accept this second HTLC. + if _, err := aliceChannel.ReceiveHTLC(htlc); err != ErrBelowChanReserve { + t.Fatalf("expected ErrBelowChanReserve, instead received: %v", err) + } +} + +// TestChanReserveLocalInitiatorDustHtlc tests that fee the initiator must pay +// when adding HTLCs is accounted for, even though the HTLC is considered dust +// by the remote bode. +func TestChanReserveLocalInitiatorDustHtlc(t *testing.T) { + t.Parallel() + + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) + if err != nil { + t.Fatal(err) + } + defer cleanUp() + + // The amount of the HTLC should not be considered dust according to + // Alice's dust limit (200 sat), but be dust according to Bob's dust + // limit (1300 sat). It is considered dust if the amount remaining + // after paying the HTLC fee is below the dustlimit, so we choose a + // size of 500+htlcFee. + htlcSat := btcutil.Amount(500) + HtlcTimeoutFee( + aliceChannel.channelState.ChanType, + chainfee.SatPerKWeight( + aliceChannel.channelState.LocalCommitment.FeePerKw, + ), + ) + + // Set Alice's channel reserve to be low enough to carry the value of + // the HTLC, but not low enough to allow the extra fee from adding the + // HTLC to the commitment. + commitFee := aliceChannel.channelState.LocalCommitment.CommitFee + aliceMinReserve := 5*btcutil.SatoshiPerBitcoin - commitFee - htlcSat + + aliceChannel.channelState.LocalChanCfg.ChanReserve = aliceMinReserve + bobChannel.channelState.RemoteChanCfg.ChanReserve = aliceMinReserve + + htlcDustAmt := lnwire.NewMSatFromSatoshis(htlcSat) + htlc, _ := createHTLC(0, htlcDustAmt) + + // Alice should realize that the fee she must pay to add this HTLC to + // the local commitment would take her below the channel reserve. + _, err = aliceChannel.AddHTLC(htlc, nil) + if err != ErrBelowChanReserve { + t.Fatalf("expected ErrBelowChanReserve, instead received: %v", err) + } +} + // TestMinHTLC tests that the ErrBelowMinHTLC error is thrown if an HTLC is added // that is below the minimm allowed value for HTLCs. func TestMinHTLC(t *testing.T) { @@ -5645,7 +6590,9 @@ func TestMinHTLC(t *testing.T) { // We'll kick off the test by creating our channels which both are // loaded with 5 BTC each. - aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(true) + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -5657,8 +6604,8 @@ func TestMinHTLC(t *testing.T) { // Setting the min value in Alice's local config means that the // remote will not accept any HTLCs of value less than specified. - aliceChannel.localChanCfg.MinHTLC = minValue - bobChannel.remoteChanCfg.MinHTLC = minValue + aliceChannel.channelState.LocalChanCfg.MinHTLC = minValue + bobChannel.channelState.RemoteChanCfg.MinHTLC = minValue // First, we will add an HTLC of 0.5 BTC. This will not trigger // ErrBelowMinHTLC. @@ -5680,18 +6627,49 @@ func TestMinHTLC(t *testing.T) { t.Fatalf("expected ErrBelowMinHTLC, instead received: %v", err) } - // Bob will receive this HTLC, but reject the next state update, since + // Bob will receive this HTLC, but reject the next received htlc, since // the htlc is too small. _, err = bobChannel.ReceiveHTLC(htlc) - if err != nil { - t.Fatalf("error receiving htlc: %v", err) - } - err = ForceStateTransition(aliceChannel, bobChannel) if err != ErrBelowMinHTLC { t.Fatalf("expected ErrBelowMinHTLC, instead received: %v", err) } } +// TestInvalidHTLCAmt tests that ErrInvalidHTLCAmt is returned when trying to +// add HTLCs that don't carry a positive value. +func TestInvalidHTLCAmt(t *testing.T) { + t.Parallel() + + // We'll kick off the test by creating our channels which both are + // loaded with 5 BTC each. + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) + if err != nil { + t.Fatalf("unable to create test channels: %v", err) + } + defer cleanUp() + + // We'll set the min HTLC values for each party to zero, which + // technically would permit zero-value HTLCs. + aliceChannel.channelState.LocalChanCfg.MinHTLC = 0 + bobChannel.channelState.RemoteChanCfg.MinHTLC = 0 + + // Create a zero-value HTLC. + htlcAmt := lnwire.MilliSatoshi(0) + htlc, _ := createHTLC(0, htlcAmt) + + // Sending or receiving the HTLC should fail with ErrInvalidHTLCAmt. + _, err = aliceChannel.AddHTLC(htlc, nil) + if err != ErrInvalidHTLCAmt { + t.Fatalf("expected ErrInvalidHTLCAmt, got: %v", err) + } + _, err = bobChannel.ReceiveHTLC(htlc) + if err != ErrInvalidHTLCAmt { + t.Fatalf("expected ErrInvalidHTLCAmt, got: %v", err) + } +} + // TestNewBreachRetributionSkipsDustHtlcs ensures that in the case of a // contract breach, all dust HTLCs are ignored and not reflected in the // produced BreachRetribution struct. We ignore these HTLCs as they aren't @@ -5702,7 +6680,9 @@ func TestNewBreachRetributionSkipsDustHtlcs(t *testing.T) { // We'll kick off the test by creating our channels which both are // loaded with 5 BTC each. - aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(true) + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -5745,8 +6725,8 @@ func TestNewBreachRetributionSkipsDustHtlcs(t *testing.T) { // With the HTLC's applied to both update logs, we'll initiate a state // transition from Alice. - if err := ForceStateTransition(bobChannel, aliceChannel); err != nil { - t.Fatalf("unable to complete bob's state transition: %v", err) + if err := ForceStateTransition(aliceChannel, bobChannel); err != nil { + t.Fatalf("unable to complete alice's state transition: %v", err) } // At this point, we'll capture the current state number, as well as @@ -5874,7 +6854,9 @@ func compareLogs(a, b *updateLog) error { func TestChannelRestoreUpdateLogs(t *testing.T) { t.Parallel() - aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(true) + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -6043,7 +7025,9 @@ func restoreAndAssert(t *testing.T, channel *LightningChannel, numAddsLocal, func TestChannelRestoreUpdateLogsFailedHTLC(t *testing.T) { t.Parallel() - aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(true) + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -6122,9 +7106,11 @@ func TestChannelRestoreUpdateLogsFailedHTLC(t *testing.T) { // At this point Alice has advanced her local commitment chain to a // commitment with no HTLCs left. The current state on her remote // commitment chain, however, still has the HTLC active, as she hasn't - // sent a new signature yet. + // sent a new signature yet. If we'd now restart and restore, the htlc + // failure update should still be waiting for inclusion in Alice's next + // signature. Otherwise the produced signature would be invalid. assertInLogs(t, aliceChannel, 1, 0, 0, 1) - restoreAndAssert(t, aliceChannel, 1, 0, 0, 0) + restoreAndAssert(t, aliceChannel, 1, 0, 0, 1) // Now send a signature from Alice. This will give Bob a new commitment // where the HTLC is removed. @@ -6164,7 +7150,9 @@ func TestChannelRestoreUpdateLogsFailedHTLC(t *testing.T) { func TestDuplicateFailRejection(t *testing.T) { t.Parallel() - aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(true) + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -6242,7 +7230,9 @@ func TestDuplicateFailRejection(t *testing.T) { func TestDuplicateSettleRejection(t *testing.T) { t.Parallel() - aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(true) + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -6323,7 +7313,9 @@ func TestDuplicateSettleRejection(t *testing.T) { func TestChannelRestoreCommitHeight(t *testing.T) { t.Parallel() - aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(true) + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -6510,7 +7502,9 @@ func TestChannelRestoreCommitHeight(t *testing.T) { func TestForceCloseFailLocalDataLoss(t *testing.T) { t.Parallel() - aliceChannel, _, cleanUp, err := CreateTestChannels(false) + aliceChannel, _, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -6541,7 +7535,9 @@ func TestForceCloseFailLocalDataLoss(t *testing.T) { func TestForceCloseBorkedState(t *testing.T) { t.Parallel() - aliceChannel, bobChannel, cleanUp, err := CreateTestChannels(false) + aliceChannel, bobChannel, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -6618,13 +7614,17 @@ func TestForceCloseBorkedState(t *testing.T) { func TestChannelMaxFeeRate(t *testing.T) { t.Parallel() - aliceChannel, _, cleanUp, err := CreateTestChannels(true) + aliceChannel, _, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } defer cleanUp() - assertMaxFeeRate := func(maxAlloc float64, expFeeRate SatPerKWeight) { + assertMaxFeeRate := func(maxAlloc float64, + expFeeRate chainfee.SatPerKWeight) { + maxFeeRate := aliceChannel.MaxFeeRate(maxAlloc) if maxFeeRate != expFeeRate { t.Fatalf("expected max fee rate of %v with max "+ @@ -6636,5 +7636,43 @@ func TestChannelMaxFeeRate(t *testing.T) { assertMaxFeeRate(1.0, 690607734) assertMaxFeeRate(0.001, 690607) assertMaxFeeRate(0.000001, 690) - assertMaxFeeRate(0.0000001, FeePerKwFloor) + assertMaxFeeRate(0.0000001, chainfee.FeePerKwFloor) +} + +// TestChannelFeeRateFloor asserts that valid commitments can be proposed and +// received using chainfee.FeePerKwFloor as the initiator's fee rate. +func TestChannelFeeRateFloor(t *testing.T) { + t.Parallel() + + alice, bob, cleanUp, err := CreateTestChannels( + channeldb.SingleFunderTweaklessBit, + ) + if err != nil { + t.Fatalf("unable to create test channels: %v", err) + } + defer cleanUp() + + // Set the fee rate to the proposing fee rate floor. + minFee := chainfee.FeePerKwFloor + + // Alice is the initiator, so only she can propose fee updates. + if err := alice.UpdateFee(minFee); err != nil { + t.Fatalf("unable to send fee update") + } + if err := bob.ReceiveUpdateFee(minFee); err != nil { + t.Fatalf("unable to receive fee update") + } + + // Check that alice can still sign commitments. + sig, htlcSigs, _, err := alice.SignNextCommitment() + if err != nil { + t.Fatalf("alice unable to sign commitment: %v", err) + } + + // Check that bob can still receive commitments. + err = bob.ReceiveNewCommitment(sig, htlcSigs) + if err != nil { + t.Fatalf("bob unable to process alice's new commitment: %v", + err) + } } diff --git a/lnwallet/chanvalidate/validate_test.go b/lnwallet/chanvalidate/validate_test.go index 12bb5c093e..e979b014b0 100644 --- a/lnwallet/chanvalidate/validate_test.go +++ b/lnwallet/chanvalidate/validate_test.go @@ -98,7 +98,7 @@ func newChannelTestCtx(chanSize int64) (*channelTestCtx, error) { } sigHashes := txscript.NewTxSigHashes(commitTx) - aliceSig, err := txscript.RawTxInWitnessSignature( + aliceSigRaw, err := txscript.RawTxInWitnessSignature( commitTx, sigHashes, 0, chanSize, multiSigScript, txscript.SigHashAll, alicePriv, ) @@ -106,7 +106,14 @@ func newChannelTestCtx(chanSize int64) (*channelTestCtx, error) { return nil, err } - bobSig, err := txscript.RawTxInWitnessSignature( + aliceSig, err := btcec.ParseDERSignature( + aliceSigRaw, btcec.S256(), + ) + if err != nil { + return nil, err + } + + bobSigRaw, err := txscript.RawTxInWitnessSignature( commitTx, sigHashes, 0, chanSize, multiSigScript, txscript.SigHashAll, bobPriv, ) @@ -114,6 +121,13 @@ func newChannelTestCtx(chanSize int64) (*channelTestCtx, error) { return nil, err } + bobSig, err := btcec.ParseDERSignature( + bobSigRaw, btcec.S256(), + ) + if err != nil { + return nil, err + } + commitTx.TxIn[0].Witness = input.SpendMultiSig( multiSigScript, alicePub.SerializeCompressed(), aliceSig, bobPub.SerializeCompressed(), bobSig, diff --git a/lnwallet/commitment.go b/lnwallet/commitment.go new file mode 100644 index 0000000000..d70f3ca0ee --- /dev/null +++ b/lnwallet/commitment.go @@ -0,0 +1,774 @@ +package lnwallet + +import ( + "fmt" + + "github.com/btcsuite/btcd/blockchain" + "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/txscript" + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" + "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/input" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" + "github.com/lightningnetwork/lnd/lnwire" +) + +// anchorSize is the constant anchor output size. +const anchorSize = btcutil.Amount(330) + +// CommitmentKeyRing holds all derived keys needed to construct commitment and +// HTLC transactions. The keys are derived differently depending whether the +// commitment transaction is ours or the remote peer's. Private keys associated +// with each key may belong to the commitment owner or the "other party" which +// is referred to in the field comments, regardless of which is local and which +// is remote. +type CommitmentKeyRing struct { + // CommitPoint is the "per commitment point" used to derive the tweak + // for each base point. + CommitPoint *btcec.PublicKey + + // LocalCommitKeyTweak is the tweak used to derive the local public key + // from the local payment base point or the local private key from the + // base point secret. This may be included in a SignDescriptor to + // generate signatures for the local payment key. + // + // NOTE: This will always refer to "our" local key, regardless of + // whether this is our commit or not. + LocalCommitKeyTweak []byte + + // TODO(roasbeef): need delay tweak as well? + + // LocalHtlcKeyTweak is the tweak used to derive the local HTLC key + // from the local HTLC base point. This value is needed in order to + // derive the final key used within the HTLC scripts in the commitment + // transaction. + // + // NOTE: This will always refer to "our" local HTLC key, regardless of + // whether this is our commit or not. + LocalHtlcKeyTweak []byte + + // LocalHtlcKey is the key that will be used in any clause paying to + // our node of any HTLC scripts within the commitment transaction for + // this key ring set. + // + // NOTE: This will always refer to "our" local HTLC key, regardless of + // whether this is our commit or not. + LocalHtlcKey *btcec.PublicKey + + // RemoteHtlcKey is the key that will be used in clauses within the + // HTLC script that send money to the remote party. + // + // NOTE: This will always refer to "their" remote HTLC key, regardless + // of whether this is our commit or not. + RemoteHtlcKey *btcec.PublicKey + + // ToLocalKey is the commitment transaction owner's key which is + // included in HTLC success and timeout transaction scripts. This is + // the public key used for the to_local output of the commitment + // transaction. + // + // NOTE: Who's key this is depends on the current perspective. If this + // is our commitment this will be our key. + ToLocalKey *btcec.PublicKey + + // ToRemoteKey is the non-owner's payment key in the commitment tx. + // This is the key used to generate the to_remote output within the + // commitment transaction. + // + // NOTE: Who's key this is depends on the current perspective. If this + // is our commitment this will be their key. + ToRemoteKey *btcec.PublicKey + + // RevocationKey is the key that can be used by the other party to + // redeem outputs from a revoked commitment transaction if it were to + // be published. + // + // NOTE: Who can sign for this key depends on the current perspective. + // If this is our commitment, it means the remote node can sign for + // this key in case of a breach. + RevocationKey *btcec.PublicKey +} + +// DeriveCommitmentKeys generates a new commitment key set using the base points +// and commitment point. The keys are derived differently depending on the type +// of channel, and whether the commitment transaction is ours or the remote +// peer's. +func DeriveCommitmentKeys(commitPoint *btcec.PublicKey, + isOurCommit bool, chanType channeldb.ChannelType, + localChanCfg, remoteChanCfg *channeldb.ChannelConfig) *CommitmentKeyRing { + + tweaklessCommit := chanType.IsTweakless() + + // Depending on if this is our commit or not, we'll choose the correct + // base point. + localBasePoint := localChanCfg.PaymentBasePoint + if isOurCommit { + localBasePoint = localChanCfg.DelayBasePoint + } + + // First, we'll derive all the keys that don't depend on the context of + // whose commitment transaction this is. + keyRing := &CommitmentKeyRing{ + CommitPoint: commitPoint, + + LocalCommitKeyTweak: input.SingleTweakBytes( + commitPoint, localBasePoint.PubKey, + ), + LocalHtlcKeyTweak: input.SingleTweakBytes( + commitPoint, localChanCfg.HtlcBasePoint.PubKey, + ), + LocalHtlcKey: input.TweakPubKey( + localChanCfg.HtlcBasePoint.PubKey, commitPoint, + ), + RemoteHtlcKey: input.TweakPubKey( + remoteChanCfg.HtlcBasePoint.PubKey, commitPoint, + ), + } + + // We'll now compute the to_local, to_remote, and revocation key based + // on the current commitment point. All keys are tweaked each state in + // order to ensure the keys from each state are unlinkable. To create + // the revocation key, we take the opposite party's revocation base + // point and combine that with the current commitment point. + var ( + toLocalBasePoint *btcec.PublicKey + toRemoteBasePoint *btcec.PublicKey + revocationBasePoint *btcec.PublicKey + ) + if isOurCommit { + toLocalBasePoint = localChanCfg.DelayBasePoint.PubKey + toRemoteBasePoint = remoteChanCfg.PaymentBasePoint.PubKey + revocationBasePoint = remoteChanCfg.RevocationBasePoint.PubKey + } else { + toLocalBasePoint = remoteChanCfg.DelayBasePoint.PubKey + toRemoteBasePoint = localChanCfg.PaymentBasePoint.PubKey + revocationBasePoint = localChanCfg.RevocationBasePoint.PubKey + } + + // With the base points assigned, we can now derive the actual keys + // using the base point, and the current commitment tweak. + keyRing.ToLocalKey = input.TweakPubKey(toLocalBasePoint, commitPoint) + keyRing.RevocationKey = input.DeriveRevocationPubkey( + revocationBasePoint, commitPoint, + ) + + // If this commitment should omit the tweak for the remote point, then + // we'll use that directly, and ignore the commitPoint tweak. + if tweaklessCommit { + keyRing.ToRemoteKey = toRemoteBasePoint + + // If this is not our commitment, the above ToRemoteKey will be + // ours, and we blank out the local commitment tweak to + // indicate that the key should not be tweaked when signing. + if !isOurCommit { + keyRing.LocalCommitKeyTweak = nil + } + } else { + keyRing.ToRemoteKey = input.TweakPubKey( + toRemoteBasePoint, commitPoint, + ) + } + + return keyRing +} + +// ScriptInfo holds a redeem script and hash. +type ScriptInfo struct { + // PkScript is the output's PkScript. + PkScript []byte + + // WitnessScript is the full script required to properly redeem the + // output. This field should be set to the full script if a p2wsh + // output is being signed. For p2wkh it should be set equal to the + // PkScript. + WitnessScript []byte +} + +// CommitScriptToRemote creates the script that will pay to the non-owner of +// the commitment transaction, adding a delay to the script based on the +// channel type. The second return value is the CSV deleay of the output +// script, what must be satisfied in order to spend the output. +func CommitScriptToRemote(chanType channeldb.ChannelType, + key *btcec.PublicKey) (*ScriptInfo, uint32, error) { + + // If this channel type has anchors, we derive the delayed to_remote + // script. + if chanType.HasAnchors() { + script, err := input.CommitScriptToRemoteConfirmed(key) + if err != nil { + return nil, 0, err + } + + p2wsh, err := input.WitnessScriptHash(script) + if err != nil { + return nil, 0, err + } + + return &ScriptInfo{ + PkScript: p2wsh, + WitnessScript: script, + }, 1, nil + } + + // Otherwise the to_remote will be a simple p2wkh. + p2wkh, err := input.CommitScriptUnencumbered(key) + if err != nil { + return nil, 0, err + } + + // Since this is a regular P2WKH, the WitnessScipt and PkScript should + // both be set to the script hash. + return &ScriptInfo{ + WitnessScript: p2wkh, + PkScript: p2wkh, + }, 0, nil +} + +// HtlcSigHashType returns the sighash type to use for HTLC success and timeout +// transactions given the channel type. +func HtlcSigHashType(chanType channeldb.ChannelType) txscript.SigHashType { + if chanType.HasAnchors() { + return txscript.SigHashSingle | txscript.SigHashAnyOneCanPay + } + + return txscript.SigHashAll +} + +// HtlcSecondLevelInputSequence dictates the sequence number we must use on the +// input to a second level HTLC transaction. +func HtlcSecondLevelInputSequence(chanType channeldb.ChannelType) uint32 { + if chanType.HasAnchors() { + return 1 + } + + return 0 +} + +// CommitWeight returns the base commitment weight before adding HTLCs. +func CommitWeight(chanType channeldb.ChannelType) int64 { + // If this commitment has anchors, it will be slightly heavier. + if chanType.HasAnchors() { + return input.AnchorCommitWeight + } + + return input.CommitWeight +} + +// HtlcTimeoutFee returns the fee in satoshis required for an HTLC timeout +// transaction based on the current fee rate. +func HtlcTimeoutFee(chanType channeldb.ChannelType, + feePerKw chainfee.SatPerKWeight) btcutil.Amount { + + if chanType.HasAnchors() { + return feePerKw.FeeForWeight(input.HtlcTimeoutWeightConfirmed) + } + + return feePerKw.FeeForWeight(input.HtlcTimeoutWeight) +} + +// HtlcSuccessFee returns the fee in satoshis required for an HTLC success +// transaction based on the current fee rate. +func HtlcSuccessFee(chanType channeldb.ChannelType, + feePerKw chainfee.SatPerKWeight) btcutil.Amount { + + if chanType.HasAnchors() { + return feePerKw.FeeForWeight(input.HtlcSuccessWeightConfirmed) + } + return feePerKw.FeeForWeight(input.HtlcSuccessWeight) +} + +// CommitScriptAnchors return the scripts to use for the local and remote +// anchor. +func CommitScriptAnchors(localChanCfg, + remoteChanCfg *channeldb.ChannelConfig) (*ScriptInfo, + *ScriptInfo, error) { + + // Helper to create anchor ScriptInfo from key. + anchorScript := func(key *btcec.PublicKey) (*ScriptInfo, error) { + script, err := input.CommitScriptAnchor(key) + if err != nil { + return nil, err + } + + scriptHash, err := input.WitnessScriptHash(script) + if err != nil { + return nil, err + } + + return &ScriptInfo{ + PkScript: scriptHash, + WitnessScript: script, + }, nil + } + + // Get the script used for the anchor output spendable by the local + // node. + localAnchor, err := anchorScript(localChanCfg.MultiSigKey.PubKey) + if err != nil { + return nil, nil, err + } + + // And the anchor spendable by the remote node. + remoteAnchor, err := anchorScript(remoteChanCfg.MultiSigKey.PubKey) + if err != nil { + return nil, nil, err + } + + return localAnchor, remoteAnchor, nil +} + +// CommitmentBuilder is a type that wraps the type of channel we are dealing +// with, and abstracts the various ways of constructing commitment +// transactions. +type CommitmentBuilder struct { + // chanState is the underlying channels's state struct, used to + // determine the type of channel we are dealing with, and relevant + // parameters. + chanState *channeldb.OpenChannel + + // obfuscator is a 48-bit state hint that's used to obfuscate the + // current state number on the commitment transactions. + obfuscator [StateHintSize]byte +} + +// NewCommitmentBuilder creates a new CommitmentBuilder from chanState. +func NewCommitmentBuilder(chanState *channeldb.OpenChannel) *CommitmentBuilder { + // The anchor channel type MUST be tweakless. + if chanState.ChanType.HasAnchors() && !chanState.ChanType.IsTweakless() { + panic("invalid channel type combination") + } + + return &CommitmentBuilder{ + chanState: chanState, + obfuscator: createStateHintObfuscator(chanState), + } +} + +// createStateHintObfuscator derives and assigns the state hint obfuscator for +// the channel, which is used to encode the commitment height in the sequence +// number of commitment transaction inputs. +func createStateHintObfuscator(state *channeldb.OpenChannel) [StateHintSize]byte { + if state.IsInitiator { + return DeriveStateHintObfuscator( + state.LocalChanCfg.PaymentBasePoint.PubKey, + state.RemoteChanCfg.PaymentBasePoint.PubKey, + ) + } + + return DeriveStateHintObfuscator( + state.RemoteChanCfg.PaymentBasePoint.PubKey, + state.LocalChanCfg.PaymentBasePoint.PubKey, + ) +} + +// unsignedCommitmentTx is the final commitment created from evaluating an HTLC +// view at a given height, along with some meta data. +type unsignedCommitmentTx struct { + // txn is the final, unsigned commitment transaction for this view. + txn *wire.MsgTx + + // fee is the total fee of the commitment transaction. + fee btcutil.Amount + + // ourBalance is our balance on this commitment *after* subtracting + // commitment fees and anchor outputs. This can be different than the + // balances before creating the commitment transaction as one party must + // pay the commitment fee. + ourBalance lnwire.MilliSatoshi + + // theirBalance is their balance of this commitment *after* subtracting + // commitment fees and anchor outputs. This can be different than the + // balances before creating the commitment transaction as one party must + // pay the commitment fee. + theirBalance lnwire.MilliSatoshi + + // cltvs is a sorted list of CLTV deltas for each HTLC on the commitment + // transaction. Any non-htlc outputs will have a CLTV delay of zero. + cltvs []uint32 +} + +// createUnsignedCommitmentTx generates the unsigned commitment transaction for +// a commitment view and returns it as part of the unsignedCommitmentTx. The +// passed in balances should be balances *before* subtracting any commitment +// fees, but after anchor outputs. +func (cb *CommitmentBuilder) createUnsignedCommitmentTx(ourBalance, + theirBalance lnwire.MilliSatoshi, isOurs bool, + feePerKw chainfee.SatPerKWeight, height uint64, + filteredHTLCView *htlcView, + keyRing *CommitmentKeyRing) (*unsignedCommitmentTx, error) { + + dustLimit := cb.chanState.LocalChanCfg.DustLimit + if !isOurs { + dustLimit = cb.chanState.RemoteChanCfg.DustLimit + } + + numHTLCs := int64(0) + for _, htlc := range filteredHTLCView.ourUpdates { + if htlcIsDust( + cb.chanState.ChanType, false, isOurs, feePerKw, + htlc.Amount.ToSatoshis(), dustLimit, + ) { + continue + } + + numHTLCs++ + } + for _, htlc := range filteredHTLCView.theirUpdates { + if htlcIsDust( + cb.chanState.ChanType, true, isOurs, feePerKw, + htlc.Amount.ToSatoshis(), dustLimit, + ) { + continue + } + + numHTLCs++ + } + + // Next, we'll calculate the fee for the commitment transaction based + // on its total weight. Once we have the total weight, we'll multiply + // by the current fee-per-kw, then divide by 1000 to get the proper + // fee. + totalCommitWeight := CommitWeight(cb.chanState.ChanType) + + input.HTLCWeight*numHTLCs + + // With the weight known, we can now calculate the commitment fee, + // ensuring that we account for any dust outputs trimmed above. + commitFee := feePerKw.FeeForWeight(totalCommitWeight) + commitFeeMSat := lnwire.NewMSatFromSatoshis(commitFee) + + // Currently, within the protocol, the initiator always pays the fees. + // So we'll subtract the fee amount from the balance of the current + // initiator. If the initiator is unable to pay the fee fully, then + // their entire output is consumed. + switch { + case cb.chanState.IsInitiator && commitFee > ourBalance.ToSatoshis(): + ourBalance = 0 + + case cb.chanState.IsInitiator: + ourBalance -= commitFeeMSat + + case !cb.chanState.IsInitiator && commitFee > theirBalance.ToSatoshis(): + theirBalance = 0 + + case !cb.chanState.IsInitiator: + theirBalance -= commitFeeMSat + } + + var ( + commitTx *wire.MsgTx + err error + ) + + // Depending on whether the transaction is ours or not, we call + // CreateCommitTx with parameters matching the perspective, to generate + // a new commitment transaction with all the latest unsettled/un-timed + // out HTLCs. + if isOurs { + commitTx, err = CreateCommitTx( + cb.chanState.ChanType, fundingTxIn(cb.chanState), keyRing, + &cb.chanState.LocalChanCfg, &cb.chanState.RemoteChanCfg, + ourBalance.ToSatoshis(), theirBalance.ToSatoshis(), + numHTLCs, + ) + } else { + commitTx, err = CreateCommitTx( + cb.chanState.ChanType, fundingTxIn(cb.chanState), keyRing, + &cb.chanState.RemoteChanCfg, &cb.chanState.LocalChanCfg, + theirBalance.ToSatoshis(), ourBalance.ToSatoshis(), + numHTLCs, + ) + } + if err != nil { + return nil, err + } + + // We'll now add all the HTLC outputs to the commitment transaction. + // Each output includes an off-chain 2-of-2 covenant clause, so we'll + // need the objective local/remote keys for this particular commitment + // as well. For any non-dust HTLCs that are manifested on the commitment + // transaction, we'll also record its CLTV which is required to sort the + // commitment transaction below. The slice is initially sized to the + // number of existing outputs, since any outputs already added are + // commitment outputs and should correspond to zero values for the + // purposes of sorting. + cltvs := make([]uint32, len(commitTx.TxOut)) + for _, htlc := range filteredHTLCView.ourUpdates { + if htlcIsDust( + cb.chanState.ChanType, false, isOurs, feePerKw, + htlc.Amount.ToSatoshis(), dustLimit, + ) { + continue + } + + err := addHTLC( + commitTx, isOurs, false, htlc, keyRing, + cb.chanState.ChanType, + ) + if err != nil { + return nil, err + } + cltvs = append(cltvs, htlc.Timeout) + } + for _, htlc := range filteredHTLCView.theirUpdates { + if htlcIsDust( + cb.chanState.ChanType, true, isOurs, feePerKw, + htlc.Amount.ToSatoshis(), dustLimit, + ) { + continue + } + + err := addHTLC( + commitTx, isOurs, true, htlc, keyRing, + cb.chanState.ChanType, + ) + if err != nil { + return nil, err + } + cltvs = append(cltvs, htlc.Timeout) + } + + // Set the state hint of the commitment transaction to facilitate + // quickly recovering the necessary penalty state in the case of an + // uncooperative broadcast. + err = SetStateNumHint(commitTx, height, cb.obfuscator) + if err != nil { + return nil, err + } + + // Sort the transactions according to the agreed upon canonical + // ordering. This lets us skip sending the entire transaction over, + // instead we'll just send signatures. + InPlaceCommitSort(commitTx, cltvs) + + // Next, we'll ensure that we don't accidentally create a commitment + // transaction which would be invalid by consensus. + uTx := btcutil.NewTx(commitTx) + if err := blockchain.CheckTransactionSanity(uTx); err != nil { + return nil, err + } + + // Finally, we'll assert that were not attempting to draw more out of + // the channel that was originally placed within it. + var totalOut btcutil.Amount + for _, txOut := range commitTx.TxOut { + totalOut += btcutil.Amount(txOut.Value) + } + if totalOut > cb.chanState.Capacity { + return nil, fmt.Errorf("height=%v, for ChannelPoint(%v) "+ + "attempts to consume %v while channel capacity is %v", + height, cb.chanState.FundingOutpoint, + totalOut, cb.chanState.Capacity) + } + + return &unsignedCommitmentTx{ + txn: commitTx, + fee: commitFee, + ourBalance: ourBalance, + theirBalance: theirBalance, + cltvs: cltvs, + }, nil +} + +// CreateCommitTx creates a commitment transaction, spending from specified +// funding output. The commitment transaction contains two outputs: one local +// output paying to the "owner" of the commitment transaction which can be +// spent after a relative block delay or revocation event, and a remote output +// paying the counterparty within the channel, which can be spent immediately +// or after a delay depending on the commitment type.. +func CreateCommitTx(chanType channeldb.ChannelType, + fundingOutput wire.TxIn, keyRing *CommitmentKeyRing, + localChanCfg, remoteChanCfg *channeldb.ChannelConfig, + amountToLocal, amountToRemote btcutil.Amount, + numHTLCs int64) (*wire.MsgTx, error) { + + // First, we create the script for the delayed "pay-to-self" output. + // This output has 2 main redemption clauses: either we can redeem the + // output after a relative block delay, or the remote node can claim + // the funds with the revocation key if we broadcast a revoked + // commitment transaction. + toLocalRedeemScript, err := input.CommitScriptToSelf( + uint32(localChanCfg.CsvDelay), keyRing.ToLocalKey, + keyRing.RevocationKey, + ) + if err != nil { + return nil, err + } + toLocalScriptHash, err := input.WitnessScriptHash( + toLocalRedeemScript, + ) + if err != nil { + return nil, err + } + + // Next, we create the script paying to the remote. + toRemoteScript, _, err := CommitScriptToRemote( + chanType, keyRing.ToRemoteKey, + ) + if err != nil { + return nil, err + } + + // Now that both output scripts have been created, we can finally create + // the transaction itself. We use a transaction version of 2 since CSV + // will fail unless the tx version is >= 2. + commitTx := wire.NewMsgTx(2) + commitTx.AddTxIn(&fundingOutput) + + // Avoid creating dust outputs within the commitment transaction. + localOutput := amountToLocal >= localChanCfg.DustLimit + if localOutput { + commitTx.AddTxOut(&wire.TxOut{ + PkScript: toLocalScriptHash, + Value: int64(amountToLocal), + }) + } + + remoteOutput := amountToRemote >= localChanCfg.DustLimit + if remoteOutput { + commitTx.AddTxOut(&wire.TxOut{ + PkScript: toRemoteScript.PkScript, + Value: int64(amountToRemote), + }) + } + + // If this channel type has anchors, we'll also add those. + if chanType.HasAnchors() { + localAnchor, remoteAnchor, err := CommitScriptAnchors( + localChanCfg, remoteChanCfg, + ) + if err != nil { + return nil, err + } + + // Add local anchor output only if we have a commitment output + // or there are HTLCs. + if localOutput || numHTLCs > 0 { + commitTx.AddTxOut(&wire.TxOut{ + PkScript: localAnchor.PkScript, + Value: int64(anchorSize), + }) + } + + // Add anchor output to remote only if they have a commitment + // output or there are HTLCs. + if remoteOutput || numHTLCs > 0 { + commitTx.AddTxOut(&wire.TxOut{ + PkScript: remoteAnchor.PkScript, + Value: int64(anchorSize), + }) + } + } + + return commitTx, nil +} + +// genHtlcScript generates the proper P2WSH public key scripts for the HTLC +// output modified by two-bits denoting if this is an incoming HTLC, and if the +// HTLC is being applied to their commitment transaction or ours. +func genHtlcScript(chanType channeldb.ChannelType, isIncoming, ourCommit bool, + timeout uint32, rHash [32]byte, + keyRing *CommitmentKeyRing) ([]byte, []byte, error) { + + var ( + witnessScript []byte + err error + ) + + // Choose scripts based on channel type. + confirmedHtlcSpends := false + if chanType.HasAnchors() { + confirmedHtlcSpends = true + } + + // Generate the proper redeem scripts for the HTLC output modified by + // two-bits denoting if this is an incoming HTLC, and if the HTLC is + // being applied to their commitment transaction or ours. + switch { + // The HTLC is paying to us, and being applied to our commitment + // transaction. So we need to use the receiver's version of HTLC the + // script. + case isIncoming && ourCommit: + witnessScript, err = input.ReceiverHTLCScript( + timeout, keyRing.RemoteHtlcKey, keyRing.LocalHtlcKey, + keyRing.RevocationKey, rHash[:], confirmedHtlcSpends, + ) + + // We're being paid via an HTLC by the remote party, and the HTLC is + // being added to their commitment transaction, so we use the sender's + // version of the HTLC script. + case isIncoming && !ourCommit: + witnessScript, err = input.SenderHTLCScript( + keyRing.RemoteHtlcKey, keyRing.LocalHtlcKey, + keyRing.RevocationKey, rHash[:], confirmedHtlcSpends, + ) + + // We're sending an HTLC which is being added to our commitment + // transaction. Therefore, we need to use the sender's version of the + // HTLC script. + case !isIncoming && ourCommit: + witnessScript, err = input.SenderHTLCScript( + keyRing.LocalHtlcKey, keyRing.RemoteHtlcKey, + keyRing.RevocationKey, rHash[:], confirmedHtlcSpends, + ) + + // Finally, we're paying the remote party via an HTLC, which is being + // added to their commitment transaction. Therefore, we use the + // receiver's version of the HTLC script. + case !isIncoming && !ourCommit: + witnessScript, err = input.ReceiverHTLCScript( + timeout, keyRing.LocalHtlcKey, keyRing.RemoteHtlcKey, + keyRing.RevocationKey, rHash[:], confirmedHtlcSpends, + ) + } + if err != nil { + return nil, nil, err + } + + // Now that we have the redeem scripts, create the P2WSH public key + // script for the output itself. + htlcP2WSH, err := input.WitnessScriptHash(witnessScript) + if err != nil { + return nil, nil, err + } + + return htlcP2WSH, witnessScript, nil +} + +// addHTLC adds a new HTLC to the passed commitment transaction. One of four +// full scripts will be generated for the HTLC output depending on if the HTLC +// is incoming and if it's being applied to our commitment transaction or that +// of the remote node's. Additionally, in order to be able to efficiently +// locate the added HTLC on the commitment transaction from the +// PaymentDescriptor that generated it, the generated script is stored within +// the descriptor itself. +func addHTLC(commitTx *wire.MsgTx, ourCommit bool, + isIncoming bool, paymentDesc *PaymentDescriptor, + keyRing *CommitmentKeyRing, chanType channeldb.ChannelType) error { + + timeout := paymentDesc.Timeout + rHash := paymentDesc.RHash + + p2wsh, witnessScript, err := genHtlcScript( + chanType, isIncoming, ourCommit, timeout, rHash, keyRing, + ) + if err != nil { + return err + } + + // Add the new HTLC outputs to the respective commitment transactions. + amountPending := int64(paymentDesc.Amount.ToSatoshis()) + commitTx.AddTxOut(wire.NewTxOut(amountPending, p2wsh)) + + // Store the pkScript of this particular PaymentDescriptor so we can + // quickly locate it within the commitment transaction later. + if ourCommit { + paymentDesc.ourPkScript = p2wsh + paymentDesc.ourWitnessScript = witnessScript + } else { + paymentDesc.theirPkScript = p2wsh + paymentDesc.theirWitnessScript = witnessScript + } + + return nil +} diff --git a/lnwallet/config.go b/lnwallet/config.go index e86a8cbefc..a73120c020 100644 --- a/lnwallet/config.go +++ b/lnwallet/config.go @@ -6,6 +6,7 @@ import ( "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/keychain" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" ) // Config is a struct which houses configuration parameters which modify the @@ -42,7 +43,7 @@ type Config struct { // FeeEstimator is the implementation that the wallet will use for the // calculation of on-chain transaction fees. - FeeEstimator FeeEstimator + FeeEstimator chainfee.Estimator // ChainIO is an instance of the BlockChainIO interface. ChainIO is // used to lookup the existence of outputs within the UTXO set. diff --git a/lnwallet/interface.go b/lnwallet/interface.go index bf6c255da5..41052503cd 100644 --- a/lnwallet/interface.go +++ b/lnwallet/interface.go @@ -10,6 +10,8 @@ import ( "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcutil" "github.com/btcsuite/btcwallet/wallet/txauthor" + "github.com/lightningnetwork/lnd/input" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" ) // AddressType is an enum-like type which denotes the possible address types @@ -172,7 +174,7 @@ type WalletController interface { // This method also takes the target fee expressed in sat/kw that should // be used when crafting the transaction. SendOutputs(outputs []*wire.TxOut, - feeRate SatPerKWeight) (*wire.MsgTx, error) + feeRate chainfee.SatPerKWeight) (*wire.MsgTx, error) // CreateSimpleTx creates a Bitcoin transaction paying to the specified // outputs. The transaction is not broadcasted to the network. In the @@ -184,7 +186,7 @@ type WalletController interface { // NOTE: The dryRun argument can be set true to create a tx that // doesn't alter the database. A tx created with this set to true // SHOULD NOT be broadcasted. - CreateSimpleTx(outputs []*wire.TxOut, feeRate SatPerKWeight, + CreateSimpleTx(outputs []*wire.TxOut, feeRate chainfee.SatPerKWeight, dryRun bool) (*txauthor.AuthoredTx, error) // ListUnspentWitness returns all unspent outputs which are version 0 @@ -291,7 +293,7 @@ type MessageSigner interface { // that corresponds to the passed public key. If the target private key // is unable to be found, then an error will be returned. The actual // digest signed is the double SHA-256 of the passed message. - SignMessage(pubKey *btcec.PublicKey, msg []byte) (*btcec.Signature, error) + SignMessage(pubKey *btcec.PublicKey, msg []byte) (input.Signature, error) } // WalletDriver represents a "driver" for a particular concrete diff --git a/lnwallet/interface_test.go b/lnwallet/interface_test.go index 04af443fae..16fc7ce431 100644 --- a/lnwallet/interface_test.go +++ b/lnwallet/interface_test.go @@ -30,16 +30,19 @@ import ( "github.com/btcsuite/btcwallet/chain" "github.com/btcsuite/btcwallet/walletdb" _ "github.com/btcsuite/btcwallet/walletdb/bdb" - "github.com/coreos/bbolt" "github.com/davecgh/go-spew/spew" "github.com/lightninglabs/neutrino" "github.com/lightningnetwork/lnd/chainntnfs" "github.com/lightningnetwork/lnd/chainntnfs/btcdnotify" "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/keychain" + "github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lnwallet" "github.com/lightningnetwork/lnd/lnwallet/btcwallet" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" + "github.com/lightningnetwork/lnd/lnwallet/chanfunding" "github.com/lightningnetwork/lnd/lnwire" ) @@ -166,7 +169,7 @@ func newPkScript(t *testing.T, w *lnwallet.LightningWallet, // parties to send on-chain funds to each other. func sendCoins(t *testing.T, miner *rpctest.Harness, sender, receiver *lnwallet.LightningWallet, output *wire.TxOut, - feeRate lnwallet.SatPerKWeight) *wire.MsgTx { + feeRate chainfee.SatPerKWeight) *wire.MsgTx { //nolint:unparam t.Helper() @@ -330,7 +333,7 @@ func createTestWallet(tempTestDir string, miningNode *rpctest.Harness, WalletController: wc, Signer: signer, ChainIO: bio, - FeeEstimator: lnwallet.NewStaticFeeEstimator(2500, 0), + FeeEstimator: chainfee.NewStaticEstimator(2500, 0), DefaultConstraints: channeldb.ChannelConstraints{ DustLimit: 500, MaxPendingAmount: lnwire.NewMSatFromSatoshis(btcutil.SatoshiPerBitcoin) * 100, @@ -511,7 +514,7 @@ func testDualFundingReservationWorkflow(miner *rpctest.Harness, if !bytes.Equal(aliceChannels[0].FundingOutpoint.Hash[:], fundingSha[:]) { t.Fatalf("channel state not properly saved") } - if aliceChannels[0].ChanType != channeldb.DualFunder { + if !aliceChannels[0].ChanType.IsDualFunder() { t.Fatalf("channel not detected as dual funder") } bobChannels, err := bob.Cfg.Database.FetchOpenChannels(alicePub) @@ -521,7 +524,7 @@ func testDualFundingReservationWorkflow(miner *rpctest.Harness, if !bytes.Equal(bobChannels[0].FundingOutpoint.Hash[:], fundingSha[:]) { t.Fatalf("channel state not properly saved") } - if bobChannels[0].ChanType != channeldb.DualFunder { + if !bobChannels[0].ChanType.IsDualFunder() { t.Fatalf("channel not detected as dual funder") } @@ -588,6 +591,7 @@ func testFundingTransactionLockedOutputs(miner *rpctest.Harness, FundingFeePerKw: feePerKw, PushMSat: 0, Flags: lnwire.FFAnnounceChannel, + PendingChanID: [32]byte{0, 1, 2, 3}, } if _, err := alice.InitChannelReservation(req); err != nil { t.Fatalf("unable to initialize funding reservation 1: %v", err) @@ -610,12 +614,13 @@ func testFundingTransactionLockedOutputs(miner *rpctest.Harness, FundingFeePerKw: feePerKw, PushMSat: 0, Flags: lnwire.FFAnnounceChannel, + PendingChanID: [32]byte{1, 2, 3, 4}, } failedReservation, err := alice.InitChannelReservation(failedReq) if err == nil { t.Fatalf("not error returned, should fail on coin selection") } - if _, ok := err.(*lnwallet.ErrInsufficientFunds); !ok { + if _, ok := err.(*chanfunding.ErrInsufficientFunds); !ok { t.Fatalf("error not coinselect error: %v", err) } if failedReservation != nil { @@ -646,6 +651,7 @@ func testFundingCancellationNotEnoughFunds(miner *rpctest.Harness, FundingFeePerKw: feePerKw, PushMSat: 0, Flags: lnwire.FFAnnounceChannel, + PendingChanID: [32]byte{2, 3, 4, 5}, } chanReservation, err := alice.InitChannelReservation(req) if err != nil { @@ -653,8 +659,9 @@ func testFundingCancellationNotEnoughFunds(miner *rpctest.Harness, } // Attempt to create another channel with 44 BTC, this should fail. + req.PendingChanID = [32]byte{3, 4, 5, 6} _, err = alice.InitChannelReservation(req) - if _, ok := err.(*lnwallet.ErrInsufficientFunds); !ok { + if _, ok := err.(*chanfunding.ErrInsufficientFunds); !ok { t.Fatalf("coin selection succeeded should have insufficient funds: %v", err) } @@ -682,6 +689,7 @@ func testFundingCancellationNotEnoughFunds(miner *rpctest.Harness, // attempting coin selection. // Request to fund a new channel should now succeed. + req.PendingChanID = [32]byte{4, 5, 6, 7, 8} if _, err := alice.InitChannelReservation(req); err != nil { t.Fatalf("unable to initialize funding reservation: %v", err) } @@ -698,7 +706,8 @@ func testCancelNonExistentReservation(miner *rpctest.Harness, // Create our own reservation, give it some ID. res, err := lnwallet.NewChannelReservation( 10000, 10000, feePerKw, alice, 22, 10, &testHdSeed, - lnwire.FFAnnounceChannel, true, + lnwire.FFAnnounceChannel, lnwallet.CommitmentTypeTweakless, + nil, [32]byte{}, 0, ) if err != nil { t.Fatalf("unable to create res: %v", err) @@ -723,7 +732,7 @@ func testReservationInitiatorBalanceBelowDustCancel(miner *rpctest.Harness, t.Fatalf("unable to create amt: %v", err) } - feePerKw := lnwallet.SatPerKWeight( + feePerKw := chainfee.SatPerKWeight( numBTC * numBTC * btcutil.SatoshiPerBitcoin, ) req := &lnwallet.InitFundingReserveMsg{ @@ -736,7 +745,7 @@ func testReservationInitiatorBalanceBelowDustCancel(miner *rpctest.Harness, FundingFeePerKw: 1000, PushMSat: 0, Flags: lnwire.FFAnnounceChannel, - Tweakless: true, + CommitType: lnwallet.CommitmentTypeTweakless, } _, err = alice.InitChannelReservation(req) switch { @@ -791,7 +800,10 @@ func assertContributionInitPopulated(t *testing.T, c *lnwallet.ChannelContributi } func testSingleFunderReservationWorkflow(miner *rpctest.Harness, - alice, bob *lnwallet.LightningWallet, t *testing.T, tweakless bool) { + alice, bob *lnwallet.LightningWallet, t *testing.T, + commitType lnwallet.CommitmentType, + aliceChanFunder chanfunding.Assembler, fetchFundingTx func() *wire.MsgTx, + pendingChanID [32]byte, thawHeight uint32) { // For this scenario, Alice will be the channel initiator while bob // will act as the responder to the workflow. @@ -810,6 +822,7 @@ func testSingleFunderReservationWorkflow(miner *rpctest.Harness, } aliceReq := &lnwallet.InitFundingReserveMsg{ ChainHash: chainHash, + PendingChanID: pendingChanID, NodeID: bobPub, NodeAddr: bobAddr, LocalFundingAmt: fundingAmt, @@ -818,7 +831,8 @@ func testSingleFunderReservationWorkflow(miner *rpctest.Harness, FundingFeePerKw: feePerKw, PushMSat: pushAmt, Flags: lnwire.FFAnnounceChannel, - Tweakless: tweakless, + CommitType: commitType, + ChanFunder: aliceChanFunder, } aliceChanReservation, err := alice.InitChannelReservation(aliceReq) if err != nil { @@ -838,15 +852,20 @@ func testSingleFunderReservationWorkflow(miner *rpctest.Harness, t.Fatalf("unable to verify constraints: %v", err) } - // Verify all contribution fields have been set properly. + // Verify all contribution fields have been set properly, but only if + // Alice is the funder herself. aliceContribution := aliceChanReservation.OurContribution() - if len(aliceContribution.Inputs) < 1 { - t.Fatalf("outputs for funding tx not properly selected, have %v "+ - "outputs should at least 1", len(aliceContribution.Inputs)) - } - if len(aliceContribution.ChangeOutputs) != 1 { - t.Fatalf("coin selection failed, should have one change outputs, "+ - "instead have: %v", len(aliceContribution.ChangeOutputs)) + if fetchFundingTx == nil { + if len(aliceContribution.Inputs) < 1 { + t.Fatalf("outputs for funding tx not properly "+ + "selected, have %v outputs should at least 1", + len(aliceContribution.Inputs)) + } + if len(aliceContribution.ChangeOutputs) != 1 { + t.Fatalf("coin selection failed, should have one "+ + "change outputs, instead have: %v", + len(aliceContribution.ChangeOutputs)) + } } assertContributionInitPopulated(t, aliceContribution) @@ -854,6 +873,7 @@ func testSingleFunderReservationWorkflow(miner *rpctest.Harness, // reservation initiation, then consume Alice's contribution. bobReq := &lnwallet.InitFundingReserveMsg{ ChainHash: chainHash, + PendingChanID: pendingChanID, NodeID: alicePub, NodeAddr: aliceAddr, LocalFundingAmt: 0, @@ -862,7 +882,7 @@ func testSingleFunderReservationWorkflow(miner *rpctest.Harness, FundingFeePerKw: feePerKw, PushMSat: pushAmt, Flags: lnwire.FFAnnounceChannel, - Tweakless: tweakless, + CommitType: commitType, } bobChanReservation, err := bob.InitChannelReservation(bobReq) if err != nil { @@ -896,10 +916,11 @@ func testSingleFunderReservationWorkflow(miner *rpctest.Harness, // At this point, Alice should have generated all the signatures // required for the funding transaction, as well as Alice's commitment - // signature to bob. + // signature to bob, but only if the funding transaction was + // constructed internally. aliceRemoteContribution := aliceChanReservation.TheirContribution() aliceFundingSigs, aliceCommitSig := aliceChanReservation.OurSignatures() - if aliceFundingSigs == nil { + if fetchFundingTx == nil && aliceFundingSigs == nil { t.Fatalf("funding sigs not found") } if aliceCommitSig == nil { @@ -908,7 +929,7 @@ func testSingleFunderReservationWorkflow(miner *rpctest.Harness, // Additionally, the funding tx and the funding outpoint should have // been populated. - if aliceChanReservation.FinalFundingTx() == nil { + if aliceChanReservation.FinalFundingTx() == nil && fetchFundingTx == nil { t.Fatalf("funding transaction never created!") } if aliceChanReservation.FundingOutpoint() == nil { @@ -950,9 +971,17 @@ func testSingleFunderReservationWorkflow(miner *rpctest.Harness, t.Fatalf("alice unable to complete reservation: %v", err) } + // If the caller provided an alternative way to obtain the funding tx, + // then we'll use that. Otherwise, we'll obtain it directly from Alice. + var fundingTx *wire.MsgTx + if fetchFundingTx != nil { + fundingTx = fetchFundingTx() + } else { + fundingTx = aliceChanReservation.FinalFundingTx() + } + // The resulting active channel state should have been persisted to the // DB for both Alice and Bob. - fundingTx := aliceChanReservation.FinalFundingTx() fundingSha := fundingTx.TxHash() aliceChannels, err := alice.Cfg.Database.FetchOpenChannels(bobPub) if err != nil { @@ -971,7 +1000,7 @@ func testSingleFunderReservationWorkflow(miner *rpctest.Harness, } if !aliceChannels[0].ChanType.IsSingleFunder() { t.Fatalf("channel type is incorrect, expected %v instead got %v", - channeldb.SingleFunder, aliceChannels[0].ChanType) + channeldb.SingleFunderBit, aliceChannels[0].ChanType) } bobChannels, err := bob.Cfg.Database.FetchOpenChannels(alicePub) @@ -991,7 +1020,7 @@ func testSingleFunderReservationWorkflow(miner *rpctest.Harness, } if !bobChannels[0].ChanType.IsSingleFunder() { t.Fatalf("channel type is incorrect, expected %v instead got %v", - channeldb.SingleFunder, bobChannels[0].ChanType) + channeldb.SingleFunderBit, bobChannels[0].ChanType) } // Let Alice publish the funding transaction. @@ -1022,6 +1051,24 @@ func testSingleFunderReservationWorkflow(miner *rpctest.Harness, t.Fatalf("incorrect transaction was mined") } + // If a frozen channel was requested, then we expect that both channel + // types show as being a frozen channel type. + aliceChanFrozen := aliceChannels[0].ChanType.IsFrozen() + bobChanFrozen := bobChannels[0].ChanType.IsFrozen() + if thawHeight != 0 && (!aliceChanFrozen || !bobChanFrozen) { + t.Fatalf("expected both alice and bob to have frozen chans: "+ + "alice_frozen=%v, bob_frozen=%v", aliceChanFrozen, + bobChanFrozen) + } + if thawHeight != bobChannels[0].ThawHeight { + t.Fatalf("wrong thaw height: expected %v got %v", thawHeight, + bobChannels[0].ThawHeight) + } + if thawHeight != aliceChannels[0].ThawHeight { + t.Fatalf("wrong thaw height: expected %v got %v", thawHeight, + aliceChannels[0].ThawHeight) + } + assertReservationDeleted(aliceChanReservation, t) assertReservationDeleted(bobChanReservation, t) } @@ -1385,7 +1432,7 @@ func testTransactionSubscriptions(miner *rpctest.Harness, // notifications when we _create_ transactions ourselves that spend our // own outputs. b := txscript.NewScriptBuilder() - b.AddOp(txscript.OP_0) + b.AddOp(txscript.OP_RETURN) outputScript, err := b.Script() if err != nil { t.Fatalf("unable to make output script: %v", err) @@ -1544,7 +1591,7 @@ func txFromOutput(tx *wire.MsgTx, signer input.Signer, fromPubKey, return nil, fmt.Errorf("unable to generate signature: %v", err) } witness := make([][]byte, 2) - witness[0] = append(spendSig, byte(txscript.SigHashAll)) + witness[0] = append(spendSig.Serialize(), byte(txscript.SigHashAll)) witness[1] = fromPubKey.SerializeCompressed() tx1.TxIn[0].Witness = witness @@ -1594,7 +1641,7 @@ func newTx(t *testing.T, r *rpctest.Harness, pubKey *btcec.PublicKey, } // Create a new unconfirmed tx that spends this output. - txFee := btcutil.Amount(0.1 * btcutil.SatoshiPerBitcoin) + txFee := btcutil.Amount(0.001 * btcutil.SatoshiPerBitcoin) tx1, err := txFromOutput( tx, alice.Cfg.Signer, pubKey, pubKey, txFee, rbf, ) @@ -1671,7 +1718,7 @@ func testPublishTransaction(r *rpctest.Harness, // We'll do the next mempool check on both RBF and non-RBF enabled // transactions. var ( - txFee = btcutil.Amount(0.05 * btcutil.SatoshiPerBitcoin) + txFee = btcutil.Amount(0.005 * btcutil.SatoshiPerBitcoin) tx3, tx3Spend *wire.MsgTx ) @@ -1929,7 +1976,7 @@ func testSignOutputUsingTweaks(r *rpctest.Harness, t.Fatalf("unable to generate signature: %v", err) } witness := make([][]byte, 2) - witness[0] = append(spendSig, byte(txscript.SigHashAll)) + witness[0] = append(spendSig.Serialize(), byte(txscript.SigHashAll)) witness[1] = tweakedKey.SerializeCompressed() sweepTx.TxIn[0].Witness = witness @@ -2151,7 +2198,7 @@ func testChangeOutputSpendConfirmation(r *rpctest.Harness, // // TODO(wilmer): replace this once SendOutputs easily supports sending // all funds in one transaction. - txFeeRate := lnwallet.SatPerKWeight(2500) + txFeeRate := chainfee.SatPerKWeight(2500) txFee := btcutil.Amount(14380) output := &wire.TxOut{ Value: int64(aliceBalance - txFee), @@ -2247,7 +2294,7 @@ func testLastUnusedAddr(miner *rpctest.Harness, if err != nil { t.Fatalf("unable to convert addr to script: %v", err) } - feeRate := lnwallet.SatPerKWeight(2500) + feeRate := chainfee.SatPerKWeight(2500) output := &wire.TxOut{ Value: 1000000, PkScript: addrScript, @@ -2281,7 +2328,7 @@ func testCreateSimpleTx(r *rpctest.Harness, w *lnwallet.LightningWallet, // The test cases we will run through for all backends. testCases := []struct { outVals []int64 - feeRate lnwallet.SatPerKWeight + feeRate chainfee.SatPerKWeight valid bool }{ { @@ -2522,7 +2569,9 @@ var walletTests = []walletTestCase{ bob *lnwallet.LightningWallet, t *testing.T) { testSingleFunderReservationWorkflow( - miner, alice, bob, t, false, + miner, alice, bob, t, + lnwallet.CommitmentTypeLegacy, nil, + nil, [32]byte{}, 0, ) }, }, @@ -2532,10 +2581,16 @@ var walletTests = []walletTestCase{ bob *lnwallet.LightningWallet, t *testing.T) { testSingleFunderReservationWorkflow( - miner, alice, bob, t, true, + miner, alice, bob, t, + lnwallet.CommitmentTypeTweakless, nil, + nil, [32]byte{}, 0, ) }, }, + { + name: "single funding workflow external funding tx", + test: testSingleFunderExternalFundingTx, + }, { name: "dual funder workflow", test: testDualFundingReservationWorkflow, @@ -2674,6 +2729,122 @@ func waitForWalletSync(r *rpctest.Harness, w *lnwallet.LightningWallet) error { return nil } +// testSingleFunderExternalFundingTx tests that the wallet is able to properly +// carry out a funding flow backed by a channel point that has been crafted +// outside the wallet. +func testSingleFunderExternalFundingTx(miner *rpctest.Harness, + alice, bob *lnwallet.LightningWallet, t *testing.T) { + + // First, we'll obtain multi-sig keys from both Alice and Bob which + // simulates them exchanging keys on a higher level. + aliceFundingKey, err := alice.DeriveNextKey(keychain.KeyFamilyMultiSig) + if err != nil { + t.Fatalf("unable to obtain alice funding key: %v", err) + } + bobFundingKey, err := bob.DeriveNextKey(keychain.KeyFamilyMultiSig) + if err != nil { + t.Fatalf("unable to obtain bob funding key: %v", err) + } + + // We'll now set up for them to open a 4 BTC channel, with 1 BTC pushed + // to Bob's side. + chanAmt := 4 * btcutil.SatoshiPerBitcoin + + // Simulating external funding negotiation, we'll now create the + // funding transaction for both parties. Utilizing existing tools, + // we'll create a new chanfunding.Assembler hacked by Alice's wallet. + aliceChanFunder := chanfunding.NewWalletAssembler(chanfunding.WalletConfig{ + CoinSource: lnwallet.NewCoinSource(alice), + CoinSelectLocker: alice, + CoinLocker: alice, + Signer: alice.Cfg.Signer, + DustLimit: 600, + }) + + // With the chan funder created, we'll now provision a funding intent, + // bind the keys we obtained above, and finally obtain our funding + // transaction and outpoint. + fundingIntent, err := aliceChanFunder.ProvisionChannel(&chanfunding.Request{ + LocalAmt: btcutil.Amount(chanAmt), + MinConfs: 1, + FeeRate: 253, + ChangeAddr: func() (btcutil.Address, error) { + return alice.NewAddress(lnwallet.WitnessPubKey, true) + }, + }) + if err != nil { + t.Fatalf("unable to perform coin selection: %v", err) + } + + // With our intent created, we'll instruct it to finalize the funding + // transaction, and also hand us the outpoint so we can simulate + // external crafting of the funding transaction. + var ( + fundingTx *wire.MsgTx + chanPoint *wire.OutPoint + ) + if fullIntent, ok := fundingIntent.(*chanfunding.FullIntent); ok { + fullIntent.BindKeys(&aliceFundingKey, bobFundingKey.PubKey) + + fundingTx, err = fullIntent.CompileFundingTx(nil, nil) + if err != nil { + t.Fatalf("unable to compile funding tx: %v", err) + } + chanPoint, err = fullIntent.ChanPoint() + if err != nil { + t.Fatalf("unable to obtain chan point: %v", err) + } + } else { + t.Fatalf("expected full intent, instead got: %T", fullIntent) + } + + // Now that we have the fully constructed funding transaction, we'll + // create a new shim external funder out of it for Alice, and prep a + // shim intent for Bob. + thawHeight := uint32(200) + aliceExternalFunder := chanfunding.NewCannedAssembler( + thawHeight, *chanPoint, btcutil.Amount(chanAmt), &aliceFundingKey, + bobFundingKey.PubKey, true, + ) + bobShimIntent, err := chanfunding.NewCannedAssembler( + thawHeight, *chanPoint, btcutil.Amount(chanAmt), &bobFundingKey, + aliceFundingKey.PubKey, false, + ).ProvisionChannel(&chanfunding.Request{ + LocalAmt: btcutil.Amount(chanAmt), + MinConfs: 1, + FeeRate: 253, + ChangeAddr: func() (btcutil.Address, error) { + return bob.NewAddress(lnwallet.WitnessPubKey, true) + }, + }) + if err != nil { + t.Fatalf("unable to create shim intent for bob: %v", err) + } + + // At this point, we have everything we need to carry out our test, so + // we'll being the funding flow between Alice and Bob. + // + // However, before we do so, we'll register a new shim intent for Bob, + // so he knows what keys to use when he receives the funding request + // from Alice. + pendingChanID := testHdSeed + err = bob.RegisterFundingIntent(pendingChanID, bobShimIntent) + if err != nil { + t.Fatalf("unable to register intent: %v", err) + } + + // Now we can carry out the single funding flow as normal, we'll + // specify our external funder and funding transaction, as well as the + // pending channel ID generated above to allow Alice and Bob to track + // the funding flow externally. + testSingleFunderReservationWorkflow( + miner, alice, bob, t, lnwallet.CommitmentTypeTweakless, + aliceExternalFunder, func() *wire.MsgTx { + return fundingTx + }, pendingChanID, thawHeight, + ) +} + // TestInterfaces tests all registered interfaces with a unified set of tests // which exercise each of the required methods found within the WalletController // interface. @@ -2736,8 +2907,10 @@ func TestLightningWallet(t *testing.T) { for _, walletDriver := range lnwallet.RegisteredWallets() { for _, backEnd := range walletDriver.BackEnds() { - runTests(t, walletDriver, backEnd, miningNode, - rpcConfig, chainNotifier) + if !runTests(t, walletDriver, backEnd, miningNode, + rpcConfig, chainNotifier) { + return + } } } } @@ -2749,7 +2922,7 @@ func TestLightningWallet(t *testing.T) { func runTests(t *testing.T, walletDriver *lnwallet.WalletDriver, backEnd string, miningNode *rpctest.Harness, rpcConfig rpcclient.ConnConfig, - chainNotifier *btcdnotify.BtcdNotifier) { + chainNotifier chainntnfs.ChainNotifier) bool { var ( bio lnwallet.BlockChainIO @@ -2890,21 +3063,25 @@ func runTests(t *testing.T, walletDriver *lnwallet.WalletDriver, defer bitcoind.Process.Kill() // Wait for the bitcoind instance to start up. - time.Sleep(time.Second) host := fmt.Sprintf("127.0.0.1:%d", rpcPort) - chainConn, err := chain.NewBitcoindConn( - netParams, host, "weks", "weks", zmqBlockHost, - zmqTxHost, 100*time.Millisecond, - ) + var chainConn *chain.BitcoindConn + err = wait.NoError(func() error { + chainConn, err = chain.NewBitcoindConn( + netParams, host, "weks", "weks", + zmqBlockHost, zmqTxHost, + 100*time.Millisecond, + ) + if err != nil { + return err + } + + return chainConn.Start() + }, 10*time.Second) if err != nil { t.Fatalf("unable to establish connection to "+ "bitcoind: %v", err) } - if err := chainConn.Start(); err != nil { - t.Fatalf("unable to establish connection to "+ - "bitcoind: %v", err) - } defer chainConn.Stop() // Create a btcwallet bitcoind client for both Alice and @@ -2978,8 +3155,7 @@ func runTests(t *testing.T, walletDriver *lnwallet.WalletDriver, bob, err := createTestWallet( tempTestDirBob, miningNode, netParams, - chainNotifier, bobWalletController, bobKeyRing, - bobSigner, bio, + chainNotifier, bobWalletController, bobKeyRing, bobSigner, bio, ) if err != nil { t.Fatalf("unable to create test ln wallet: %v", err) @@ -2994,21 +3170,31 @@ func runTests(t *testing.T, walletDriver *lnwallet.WalletDriver, // Execute every test, clearing possibly mutated // wallet state after each step. for _, walletTest := range walletTests { + + walletTest := walletTest + testName := fmt.Sprintf("%v/%v:%v", walletType, backEnd, walletTest.name) success := t.Run(testName, func(t *testing.T) { + if backEnd == "neutrino" && + strings.Contains(walletTest.name, "dual funder") { + t.Skip("skipping dual funder tests for neutrino") + } + walletTest.test(miningNode, alice, bob, t) }) if !success { - break + return false } // TODO(roasbeef): possible reset mining // node's chainstate to initial level, cleanly // wipe buckets if err := clearWalletStates(alice, bob); err != - nil && err != bbolt.ErrBucketNotFound { + nil && err != kvdb.ErrBucketNotFound { t.Fatalf("unable to wipe wallet state: %v", err) } } + + return true } diff --git a/lnwallet/log.go b/lnwallet/log.go index 97f1c21ffe..31881e2d19 100644 --- a/lnwallet/log.go +++ b/lnwallet/log.go @@ -7,6 +7,7 @@ import ( "github.com/btcsuite/btcwallet/wtxmgr" "github.com/lightningnetwork/lnd/build" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" ) // walletLog is a logger that is initialized with no output filters. This @@ -34,6 +35,7 @@ func UseLogger(logger btclog.Logger) { btcwallet.UseLogger(logger) wtxmgr.UseLogger(logger) chain.UseLogger(logger) + chainfee.UseLogger(logger) } // logClosure is used to provide a closure over expensive logging operations diff --git a/lnwallet/reservation.go b/lnwallet/reservation.go index b96651ff06..22ef08b3a9 100644 --- a/lnwallet/reservation.go +++ b/lnwallet/reservation.go @@ -10,9 +10,44 @@ import ( "github.com/btcsuite/btcutil" "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/input" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" + "github.com/lightningnetwork/lnd/lnwallet/chanfunding" "github.com/lightningnetwork/lnd/lnwire" ) +// CommitmentType is an enum indicating the commitment type we should use for +// the channel we are opening. +type CommitmentType int + +const ( + // CommitmentTypeLegacy is the legacy commitment format with a tweaked + // to_remote key. + CommitmentTypeLegacy = iota + + // CommitmentTypeTweakless is a newer commitment format where the + // to_remote key is static. + CommitmentTypeTweakless + + // CommitmentTypeAnchors is a commitment type that is tweakless, and + // has extra anchor ouputs in order to bump the fee of the commitment + // transaction. + CommitmentTypeAnchors +) + +// String returns the name of the CommitmentType. +func (c CommitmentType) String() string { + switch c { + case CommitmentTypeLegacy: + return "legacy" + case CommitmentTypeTweakless: + return "tweakless" + case CommitmentTypeAnchors: + return "anchors" + default: + return "invalid" + } +} + // ChannelContribution is the primary constituent of the funding workflow // within lnwallet. Each side first exchanges their respective contributions // along with channel specific parameters like the min fee/KB. Once @@ -42,6 +77,10 @@ type ChannelContribution struct { // such as the min HTLC, and also all the keys which will be used for // the duration of the channel. *channeldb.ChannelConfig + + // UpfrontShutdown is an optional address to which the channel should be + // paid out to on cooperative close. + UpfrontShutdown lnwire.DeliveryAddress } // toChanConfig returns the raw channel configuration generated by a node's @@ -95,8 +134,8 @@ type ChannelReservation struct { theirFundingInputScripts []*input.Script // Our signature for their version of the commitment transaction. - ourCommitmentSig []byte - theirCommitmentSig []byte + ourCommitmentSig input.Signature + theirCommitmentSig input.Signature ourContribution *ChannelContribution theirContribution *ChannelContribution @@ -108,19 +147,19 @@ type ChannelReservation struct { // throughout its lifetime. reservationID uint64 + // pendingChanID is the pending channel ID for this channel as + // identified within the wire protocol. + pendingChanID [32]byte + // pushMSat the amount of milli-satoshis that should be pushed to the // responder of a single funding channel as part of the initial // commitment state. pushMSat lnwire.MilliSatoshi - // chanOpen houses a struct containing the channel and additional - // confirmation details will be sent on once the channel is considered - // 'open'. A channel is open once the funding transaction has reached a - // sufficient number of confirmations. - chanOpen chan *openChanDetails - chanOpenErr chan error + wallet *LightningWallet + chanFunder chanfunding.Assembler - wallet *LightningWallet + fundingIntent chanfunding.Intent } // NewChannelReservation creates a new channel reservation. This function is @@ -128,10 +167,11 @@ type ChannelReservation struct { // creation of all channel reservations should be carried out via the // lnwallet.InitChannelReservation interface. func NewChannelReservation(capacity, localFundingAmt btcutil.Amount, - commitFeePerKw SatPerKWeight, wallet *LightningWallet, + commitFeePerKw chainfee.SatPerKWeight, wallet *LightningWallet, id uint64, pushMSat lnwire.MilliSatoshi, chainHash *chainhash.Hash, - flags lnwire.FundingFlag, - tweaklessCommit bool) (*ChannelReservation, error) { + flags lnwire.FundingFlag, commitType CommitmentType, + fundingAssembler chanfunding.Assembler, + pendingChanID [32]byte, thawHeight uint32) (*ChannelReservation, error) { var ( ourBalance lnwire.MilliSatoshi @@ -139,12 +179,25 @@ func NewChannelReservation(capacity, localFundingAmt btcutil.Amount, initiator bool ) - commitFee := commitFeePerKw.FeeForWeight(input.CommitWeight) + // Based on the channel type, we determine the initial commit weight + // and fee. + commitWeight := int64(input.CommitWeight) + if commitType == CommitmentTypeAnchors { + commitWeight = input.AnchorCommitWeight + } + commitFee := commitFeePerKw.FeeForWeight(commitWeight) + localFundingMSat := lnwire.NewMSatFromSatoshis(localFundingAmt) // TODO(halseth): make method take remote funding amount directly // instead of inferring it from capacity and local amt. capacityMSat := lnwire.NewMSatFromSatoshis(capacity) + + // The total fee paid by the initiator will be the commitment fee in + // addition to the two anchor outputs. feeMSat := lnwire.NewMSatFromSatoshis(commitFee) + if commitType == CommitmentTypeAnchors { + feeMSat += 2 * lnwire.NewMSatFromSatoshis(anchorSize) + } // If we're the responder to a single-funder reservation, then we have // no initial balance in the channel unless the remote party is pushing @@ -206,6 +259,16 @@ func NewChannelReservation(capacity, localFundingAmt btcutil.Amount, ) } + // Similarly we ensure their balance is reasonable if we are not the + // initiator. + if !initiator && theirBalance.ToSatoshis() <= 2*DefaultDustLimit() { + return nil, ErrFunderBalanceDust( + int64(commitFee), + int64(theirBalance.ToSatoshis()), + int64(2*DefaultDustLimit()), + ) + } + // Next we'll set the channel type based on what we can ascertain about // the balances/push amount within the channel. var chanType channeldb.ChannelType @@ -214,16 +277,39 @@ func NewChannelReservation(capacity, localFundingAmt btcutil.Amount, // non-zero push amt (there's no pushing for dual funder), then this is // a single-funder channel. if ourBalance == 0 || theirBalance == 0 || pushMSat != 0 { - if tweaklessCommit { - chanType = channeldb.SingleFunderTweakless + // Both the tweakless type and the anchor type is tweakless, + // hence set the bit. + if commitType == CommitmentTypeTweakless || + commitType == CommitmentTypeAnchors { + + chanType |= channeldb.SingleFunderTweaklessBit } else { - chanType = channeldb.SingleFunder + chanType |= channeldb.SingleFunderBit } + + // If this intent isn't one that's able to provide us with a + // funding transaction, then we'll set the chanType bit to + // signal that we don't have access to one. + if _, ok := fundingAssembler.(chanfunding.FundingTxAssembler); !ok { + chanType |= channeldb.NoFundingTxBit + } + } else { // Otherwise, this is a dual funder channel, and no side is // technically the "initiator" initiator = false - chanType = channeldb.DualFunder + chanType |= channeldb.DualFunderBit + } + + // We are adding anchor outputs to our commitment. + if commitType == CommitmentTypeAnchors { + chanType |= channeldb.AnchorOutputsBit + } + + // If the channel is meant to be frozen, then we'll set the frozen bit + // now so once the channel is open, it can be interpreted properly. + if thawHeight != 0 { + chanType |= channeldb.FrozenBit } return &ChannelReservation{ @@ -254,13 +340,14 @@ func NewChannelReservation(capacity, localFundingAmt btcutil.Amount, FeePerKw: btcutil.Amount(commitFeePerKw), CommitFee: commitFee, }, - Db: wallet.Cfg.Database, + ThawHeight: thawHeight, + Db: wallet.Cfg.Database, }, pushMSat: pushMSat, + pendingChanID: pendingChanID, reservationID: id, - chanOpen: make(chan *openChanDetails, 1), - chanOpenErr: make(chan error, 1), wallet: wallet, + chanFunder: fundingAssembler, }, nil } @@ -384,6 +471,37 @@ func (r *ChannelReservation) ProcessContribution(theirContribution *ChannelContr return <-errChan } +// IsPsbt returns true if there is a PSBT funding intent mapped to this +// reservation. +func (r *ChannelReservation) IsPsbt() bool { + _, ok := r.fundingIntent.(*chanfunding.PsbtIntent) + return ok +} + +// ProcessPsbt continues a previously paused funding flow that involves PSBT to +// construct the funding transaction. This method can be called once the PSBT is +// finalized and the signed transaction is available. +func (r *ChannelReservation) ProcessPsbt() error { + errChan := make(chan error, 1) + + r.wallet.msgChan <- &continueContributionMsg{ + pendingFundingID: r.reservationID, + err: errChan, + } + + return <-errChan +} + +// RemoteCanceled informs the PSBT funding state machine that the remote peer +// has canceled the pending reservation, likely due to a timeout. +func (r *ChannelReservation) RemoteCanceled() { + psbtIntent, ok := r.fundingIntent.(*chanfunding.PsbtIntent) + if !ok { + return + } + psbtIntent.RemoteCanceled() +} + // ProcessSingleContribution verifies, and records the initiator's contribution // to this pending single funder channel. Internally, no further action is // taken other than recording the initiator's contribution to the single funder @@ -420,7 +538,9 @@ func (r *ChannelReservation) TheirContribution() *ChannelContribution { // // NOTE: These signatures will only be populated after a call to // .ProcessContribution() -func (r *ChannelReservation) OurSignatures() ([]*input.Script, []byte) { +func (r *ChannelReservation) OurSignatures() ([]*input.Script, + input.Signature) { + r.RLock() defer r.RUnlock() return r.ourFundingInputScripts, r.ourCommitmentSig @@ -440,7 +560,7 @@ func (r *ChannelReservation) OurSignatures() ([]*input.Script, []byte) { // confirmations. Once the method unblocks, a LightningChannel instance is // returned, marking the channel available for updates. func (r *ChannelReservation) CompleteReservation(fundingInputScripts []*input.Script, - commitmentSig []byte) (*channeldb.OpenChannel, error) { + commitmentSig input.Signature) (*channeldb.OpenChannel, error) { // TODO(roasbeef): add flag for watch or not? errChan := make(chan error, 1) @@ -467,7 +587,7 @@ func (r *ChannelReservation) CompleteReservation(fundingInputScripts []*input.Sc // called as a response to a single funder channel, only a commitment signature // will be populated. func (r *ChannelReservation) CompleteReservationSingle(fundingPoint *wire.OutPoint, - commitSig []byte) (*channeldb.OpenChannel, error) { + commitSig input.Signature) (*channeldb.OpenChannel, error) { errChan := make(chan error, 1) completeChan := make(chan *channeldb.OpenChannel, 1) @@ -490,7 +610,9 @@ func (r *ChannelReservation) CompleteReservationSingle(fundingPoint *wire.OutPoi // // NOTE: These attributes will be unpopulated before a call to // .CompleteReservation(). -func (r *ChannelReservation) TheirSignatures() ([]*input.Script, []byte) { +func (r *ChannelReservation) TheirSignatures() ([]*input.Script, + input.Signature) { + r.RLock() defer r.RUnlock() return r.theirFundingInputScripts, r.theirCommitmentSig @@ -520,6 +642,14 @@ func (r *ChannelReservation) FundingOutpoint() *wire.OutPoint { return &r.partialState.FundingOutpoint } +// SetOurUpfrontShutdown sets the upfront shutdown address on our contribution. +func (r *ChannelReservation) SetOurUpfrontShutdown(shutdown lnwire.DeliveryAddress) { + r.Lock() + defer r.Unlock() + + r.ourContribution.UpfrontShutdown = shutdown +} + // Capacity returns the channel capacity for this reservation. func (r *ChannelReservation) Capacity() btcutil.Amount { r.RLock() diff --git a/lnwallet/sigpool.go b/lnwallet/sigpool.go index 57b849888a..ec1d4f3462 100644 --- a/lnwallet/sigpool.go +++ b/lnwallet/sigpool.go @@ -205,7 +205,7 @@ func (s *SigPool) poolWorker() { } } - sig, err := lnwire.NewSigFromRawSignature(rawSig) + sig, err := lnwire.NewSigFromSignature(rawSig) select { case sigMsg.Resp <- SignJobResp{ Sig: sig, diff --git a/lnwallet/size_test.go b/lnwallet/size_test.go deleted file mode 100644 index 36dac258dc..0000000000 --- a/lnwallet/size_test.go +++ /dev/null @@ -1,191 +0,0 @@ -package lnwallet_test - -import ( - "testing" - - "github.com/btcsuite/btcd/blockchain" - "github.com/btcsuite/btcd/chaincfg" - "github.com/btcsuite/btcd/txscript" - "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcutil" - - "github.com/lightningnetwork/lnd/input" -) - -// TestTxWeightEstimator tests that transaction weight estimates are calculated -// correctly by comparing against an actual (though invalid) transaction -// matching the template. -func TestTxWeightEstimator(t *testing.T) { - netParams := &chaincfg.MainNetParams - - p2pkhAddr, err := btcutil.NewAddressPubKeyHash( - make([]byte, 20), netParams) - if err != nil { - t.Fatalf("Failed to generate address: %v", err) - } - p2pkhScript, err := txscript.PayToAddrScript(p2pkhAddr) - if err != nil { - t.Fatalf("Failed to generate scriptPubKey: %v", err) - } - - p2wkhAddr, err := btcutil.NewAddressWitnessPubKeyHash( - make([]byte, 20), netParams) - if err != nil { - t.Fatalf("Failed to generate address: %v", err) - } - p2wkhScript, err := txscript.PayToAddrScript(p2wkhAddr) - if err != nil { - t.Fatalf("Failed to generate scriptPubKey: %v", err) - } - - p2wshAddr, err := btcutil.NewAddressWitnessScriptHash( - make([]byte, 32), netParams) - if err != nil { - t.Fatalf("Failed to generate address: %v", err) - } - p2wshScript, err := txscript.PayToAddrScript(p2wshAddr) - if err != nil { - t.Fatalf("Failed to generate scriptPubKey: %v", err) - } - - p2shAddr, err := btcutil.NewAddressScriptHash([]byte{0}, netParams) - if err != nil { - t.Fatalf("Failed to generate address: %v", err) - } - p2shScript, err := txscript.PayToAddrScript(p2shAddr) - if err != nil { - t.Fatalf("Failed to generate scriptPubKey: %v", err) - } - - testCases := []struct { - numP2PKHInputs int - numP2WKHInputs int - numP2WSHInputs int - numNestedP2WKHInputs int - numNestedP2WSHInputs int - numP2PKHOutputs int - numP2WKHOutputs int - numP2WSHOutputs int - numP2SHOutputs int - }{ - { - numP2PKHInputs: 1, - numP2PKHOutputs: 2, - }, - { - numP2PKHInputs: 1, - numP2WKHInputs: 1, - numP2WKHOutputs: 1, - numP2WSHOutputs: 1, - }, - { - numP2WKHInputs: 1, - numP2WKHOutputs: 1, - numP2WSHOutputs: 1, - }, - { - numP2WKHInputs: 2, - numP2WKHOutputs: 1, - numP2WSHOutputs: 1, - }, - { - numP2WSHInputs: 1, - numP2WKHOutputs: 1, - }, - { - numP2PKHInputs: 1, - numP2SHOutputs: 1, - }, - { - numNestedP2WKHInputs: 1, - numP2WKHOutputs: 1, - }, - { - numNestedP2WSHInputs: 1, - numP2WKHOutputs: 1, - }, - } - - for i, test := range testCases { - var weightEstimate input.TxWeightEstimator - tx := wire.NewMsgTx(1) - - for j := 0; j < test.numP2PKHInputs; j++ { - weightEstimate.AddP2PKHInput() - - signature := make([]byte, 73) - compressedPubKey := make([]byte, 33) - scriptSig, err := txscript.NewScriptBuilder().AddData(signature). - AddData(compressedPubKey).Script() - if err != nil { - t.Fatalf("Failed to generate scriptSig: %v", err) - } - - tx.AddTxIn(&wire.TxIn{SignatureScript: scriptSig}) - } - for j := 0; j < test.numP2WKHInputs; j++ { - weightEstimate.AddP2WKHInput() - - signature := make([]byte, 73) - compressedPubKey := make([]byte, 33) - witness := wire.TxWitness{signature, compressedPubKey} - tx.AddTxIn(&wire.TxIn{Witness: witness}) - } - for j := 0; j < test.numP2WSHInputs; j++ { - weightEstimate.AddWitnessInput(42) - - witnessScript := make([]byte, 40) - witness := wire.TxWitness{witnessScript} - tx.AddTxIn(&wire.TxIn{Witness: witness}) - } - for j := 0; j < test.numNestedP2WKHInputs; j++ { - weightEstimate.AddNestedP2WKHInput() - - signature := make([]byte, 73) - compressedPubKey := make([]byte, 33) - witness := wire.TxWitness{signature, compressedPubKey} - scriptSig, err := txscript.NewScriptBuilder().AddData(p2wkhScript). - Script() - if err != nil { - t.Fatalf("Failed to generate scriptSig: %v", err) - } - - tx.AddTxIn(&wire.TxIn{SignatureScript: scriptSig, Witness: witness}) - } - for j := 0; j < test.numNestedP2WSHInputs; j++ { - weightEstimate.AddNestedP2WSHInput(42) - - witnessScript := make([]byte, 40) - witness := wire.TxWitness{witnessScript} - scriptSig, err := txscript.NewScriptBuilder().AddData(p2wshScript). - Script() - if err != nil { - t.Fatalf("Failed to generate scriptSig: %v", err) - } - - tx.AddTxIn(&wire.TxIn{SignatureScript: scriptSig, Witness: witness}) - } - for j := 0; j < test.numP2PKHOutputs; j++ { - weightEstimate.AddP2PKHOutput() - tx.AddTxOut(&wire.TxOut{PkScript: p2pkhScript}) - } - for j := 0; j < test.numP2WKHOutputs; j++ { - weightEstimate.AddP2WKHOutput() - tx.AddTxOut(&wire.TxOut{PkScript: p2wkhScript}) - } - for j := 0; j < test.numP2WSHOutputs; j++ { - weightEstimate.AddP2WSHOutput() - tx.AddTxOut(&wire.TxOut{PkScript: p2wshScript}) - } - for j := 0; j < test.numP2SHOutputs; j++ { - weightEstimate.AddP2SHOutput() - tx.AddTxOut(&wire.TxOut{PkScript: p2shScript}) - } - - expectedWeight := blockchain.GetTransactionWeight(btcutil.NewTx(tx)) - if weightEstimate.Weight() != int(expectedWeight) { - t.Errorf("Case %d: Got wrong weight: expected %d, got %d", - i, expectedWeight, weightEstimate.Weight()) - } - } -} diff --git a/lnwallet/test_utils.go b/lnwallet/test_utils.go index efb30ac1dc..55d8a3420c 100644 --- a/lnwallet/test_utils.go +++ b/lnwallet/test_utils.go @@ -1,7 +1,6 @@ package lnwallet import ( - "bytes" "crypto/rand" "encoding/binary" "encoding/hex" @@ -18,6 +17,7 @@ import ( "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/keychain" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/shachain" ) @@ -80,6 +80,19 @@ var ( }, LockTime: 5, } + + // A valid, DER-encoded signature (taken from btcec unit tests). + testSigBytes = []byte{ + 0x30, 0x44, 0x02, 0x20, 0x4e, 0x45, 0xe1, 0x69, + 0x32, 0xb8, 0xaf, 0x51, 0x49, 0x61, 0xa1, 0xd3, + 0xa1, 0xa2, 0x5f, 0xdf, 0x3f, 0x4f, 0x77, 0x32, + 0xe9, 0xd6, 0x24, 0xc6, 0xc6, 0x15, 0x48, 0xab, + 0x5f, 0xb8, 0xcd, 0x41, 0x02, 0x20, 0x18, 0x15, + 0x22, 0xec, 0x8e, 0xca, 0x07, 0xde, 0x48, 0x60, + 0xa4, 0xac, 0xdd, 0x12, 0x90, 0x9d, 0x83, 0x1c, + 0xc5, 0x6c, 0xbb, 0xac, 0x46, 0x22, 0x08, 0x22, + 0x21, 0xa8, 0x76, 0x8d, 0x1d, 0x09, + } ) // CreateTestChannels creates to fully populated channels to be used within @@ -90,7 +103,7 @@ var ( // the test has been finalized. The clean up function will remote all temporary // files created. If tweaklessCommits is true, then the commits within the // channels will use the new format, otherwise the legacy format. -func CreateTestChannels(tweaklessCommits bool) ( +func CreateTestChannels(chanType channeldb.ChannelType) ( *LightningChannel, *LightningChannel, func(), error) { channelCapacity, err := btcutil.NewAmount(10) @@ -207,7 +220,7 @@ func CreateTestChannels(tweaklessCommits bool) ( aliceCommitTx, bobCommitTx, err := CreateCommitmentTxns( channelBal, channelBal, &aliceCfg, &bobCfg, aliceCommitPoint, - bobCommitPoint, *fundingTxIn, tweaklessCommits, + bobCommitPoint, *fundingTxIn, chanType, ) if err != nil { return nil, nil, nil, err @@ -233,30 +246,39 @@ func CreateTestChannels(tweaklessCommits bool) ( return nil, nil, nil, err } - estimator := NewStaticFeeEstimator(6000, 0) + estimator := chainfee.NewStaticEstimator(6000, 0) feePerKw, err := estimator.EstimateFeePerKW(1) if err != nil { return nil, nil, nil, err } - commitFee := calcStaticFee(0) + commitFee := calcStaticFee(chanType, 0) + var anchorAmt btcutil.Amount + if chanType.HasAnchors() { + anchorAmt += 2 * anchorSize + } + + aliceBalance := lnwire.NewMSatFromSatoshis( + channelBal - commitFee - anchorAmt, + ) + bobBalance := lnwire.NewMSatFromSatoshis(channelBal) aliceCommit := channeldb.ChannelCommitment{ CommitHeight: 0, - LocalBalance: lnwire.NewMSatFromSatoshis(channelBal - commitFee), - RemoteBalance: lnwire.NewMSatFromSatoshis(channelBal), + LocalBalance: aliceBalance, + RemoteBalance: bobBalance, CommitFee: commitFee, FeePerKw: btcutil.Amount(feePerKw), CommitTx: aliceCommitTx, - CommitSig: bytes.Repeat([]byte{1}, 71), + CommitSig: testSigBytes, } bobCommit := channeldb.ChannelCommitment{ CommitHeight: 0, - LocalBalance: lnwire.NewMSatFromSatoshis(channelBal), - RemoteBalance: lnwire.NewMSatFromSatoshis(channelBal - commitFee), + LocalBalance: bobBalance, + RemoteBalance: aliceBalance, CommitFee: commitFee, FeePerKw: btcutil.Amount(feePerKw), CommitTx: bobCommitTx, - CommitSig: bytes.Repeat([]byte{1}, 71), + CommitSig: testSigBytes, } var chanIDBytes [8]byte @@ -274,7 +296,7 @@ func CreateTestChannels(tweaklessCommits bool) ( IdentityPub: aliceKeys[0].PubKey(), FundingOutpoint: *prevOut, ShortChannelID: shortChanID, - ChanType: channeldb.SingleFunderTweakless, + ChanType: chanType, IsInitiator: true, Capacity: channelCapacity, RemoteCurrentRevocation: bobCommitPoint, @@ -292,7 +314,7 @@ func CreateTestChannels(tweaklessCommits bool) ( IdentityPub: bobKeys[0].PubKey(), FundingOutpoint: *prevOut, ShortChannelID: shortChanID, - ChanType: channeldb.SingleFunderTweakless, + ChanType: chanType, IsInitiator: false, Capacity: channelCapacity, RemoteCurrentRevocation: aliceCommitPoint, @@ -304,11 +326,6 @@ func CreateTestChannels(tweaklessCommits bool) ( Packager: channeldb.NewChannelPackager(shortChanID), } - if !tweaklessCommits { - aliceChannelState.ChanType = channeldb.SingleFunder - bobChannelState.ChanType = channeldb.SingleFunder - } - aliceSigner := &input.MockSigner{Privkeys: aliceKeys} bobSigner := &input.MockSigner{Privkeys: bobKeys} @@ -323,6 +340,8 @@ func CreateTestChannels(tweaklessCommits bool) ( } alicePool.Start() + obfuscator := createStateHintObfuscator(aliceChannelState) + bobPool := NewSigPool(1, bobSigner) channelBob, err := NewLightningChannel( bobSigner, bobChannelState, bobPool, @@ -333,13 +352,13 @@ func CreateTestChannels(tweaklessCommits bool) ( bobPool.Start() err = SetStateNumHint( - aliceCommitTx, 0, channelAlice.stateHintObfuscator, + aliceCommitTx, 0, obfuscator, ) if err != nil { return nil, nil, nil, err } err = SetStateNumHint( - bobCommitTx, 0, channelAlice.stateHintObfuscator, + bobCommitTx, 0, obfuscator, ) if err != nil { return nil, nil, nil, err @@ -446,14 +465,14 @@ func txFromHex(txHex string) (*btcutil.Tx, error) { // calculations into account. // // TODO(bvu): Refactor when dynamic fee estimation is added. -func calcStaticFee(numHTLCs int) btcutil.Amount { +func calcStaticFee(chanType channeldb.ChannelType, numHTLCs int) btcutil.Amount { const ( - commitWeight = btcutil.Amount(724) - htlcWeight = 172 - feePerKw = btcutil.Amount(24/4) * 1000 + htlcWeight = 172 + feePerKw = btcutil.Amount(24/4) * 1000 ) - return feePerKw * (commitWeight + - btcutil.Amount(htlcWeight*numHTLCs)) / 1000 + return feePerKw * + (btcutil.Amount(CommitWeight(chanType) + + htlcWeight*int64(numHTLCs))) / 1000 } // ForceStateTransition executes the necessary interaction between the two diff --git a/lnwallet/transactions.go b/lnwallet/transactions.go index 7803f752bf..3c37cd8da7 100644 --- a/lnwallet/transactions.go +++ b/lnwallet/transactions.go @@ -7,6 +7,7 @@ import ( "github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcutil" + "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/input" ) @@ -44,8 +45,8 @@ var ( // In order to spend the HTLC output, the witness for the passed transaction // should be: // * <0> -func createHtlcSuccessTx(htlcOutput wire.OutPoint, htlcAmt btcutil.Amount, - csvDelay uint32, +func createHtlcSuccessTx(chanType channeldb.ChannelType, + htlcOutput wire.OutPoint, htlcAmt btcutil.Amount, csvDelay uint32, revocationKey, delayKey *btcec.PublicKey) (*wire.MsgTx, error) { // Create a version two transaction (as the success version of this @@ -53,10 +54,13 @@ func createHtlcSuccessTx(htlcOutput wire.OutPoint, htlcAmt btcutil.Amount, successTx := wire.NewMsgTx(2) // The input to the transaction is the outpoint that creates the - // original HTLC on the sender's commitment transaction. - successTx.AddTxIn(&wire.TxIn{ + // original HTLC on the sender's commitment transaction. Set the + // sequence number based on the channel type. + txin := &wire.TxIn{ PreviousOutPoint: htlcOutput, - }) + Sequence: HtlcSecondLevelInputSequence(chanType), + } + successTx.AddTxIn(txin) // Next, we'll generate the script used as the output for all second // level HTLC which forces a covenant w.r.t what can be done with all @@ -97,7 +101,8 @@ func createHtlcSuccessTx(htlcOutput wire.OutPoint, htlcAmt btcutil.Amount, // NOTE: The passed amount for the HTLC should take into account the required // fee rate at the time the HTLC was created. The fee should be able to // entirely pay for this (tiny: 1-in 1-out) transaction. -func createHtlcTimeoutTx(htlcOutput wire.OutPoint, htlcAmt btcutil.Amount, +func createHtlcTimeoutTx(chanType channeldb.ChannelType, + htlcOutput wire.OutPoint, htlcAmt btcutil.Amount, cltvExpiry, csvDelay uint32, revocationKey, delayKey *btcec.PublicKey) (*wire.MsgTx, error) { @@ -108,10 +113,13 @@ func createHtlcTimeoutTx(htlcOutput wire.OutPoint, htlcAmt btcutil.Amount, timeoutTx.LockTime = cltvExpiry // The input to the transaction is the outpoint that creates the - // original HTLC on the sender's commitment transaction. - timeoutTx.AddTxIn(&wire.TxIn{ + // original HTLC on the sender's commitment transaction. Set the + // sequence number based on the channel type. + txin := &wire.TxIn{ PreviousOutPoint: htlcOutput, - }) + Sequence: HtlcSecondLevelInputSequence(chanType), + } + timeoutTx.AddTxIn(txin) // Next, we'll generate the script used as the output for all second // level HTLC which forces a covenant w.r.t what can be done with all diff --git a/lnwallet/transactions_test.go b/lnwallet/transactions_test.go index ed4c533db0..7a3f08a299 100644 --- a/lnwallet/transactions_test.go +++ b/lnwallet/transactions_test.go @@ -17,6 +17,7 @@ import ( "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/keychain" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/shachain" ) @@ -370,7 +371,7 @@ func TestCommitmentAndHTLCTransactions(t *testing.T) { // Manually construct a new LightningChannel. channelState := channeldb.OpenChannel{ - ChanType: channeldb.SingleFunderTweakless, + ChanType: channeldb.SingleFunderTweaklessBit, ChainHash: *tc.netParams.GenesisHash, FundingOutpoint: tc.fundingOutpoint, ShortChannelID: tc.shortChanID, @@ -422,14 +423,12 @@ func TestCommitmentAndHTLCTransactions(t *testing.T) { channel := LightningChannel{ channelState: &channelState, Signer: signer, - localChanCfg: &channelState.LocalChanCfg, - remoteChanCfg: &channelState.RemoteChanCfg, + commitBuilder: NewCommitmentBuilder(&channelState), } err = channel.createSignDesc() if err != nil { t.Fatalf("Failed to generate channel sign descriptor: %v", err) } - channel.createStateHintObfuscator() // The commitmentPoint is technically hidden in the spec, but we need it to // generate the correct tweak. @@ -440,8 +439,8 @@ func TestCommitmentAndHTLCTransactions(t *testing.T) { LocalHtlcKeyTweak: tweak, LocalHtlcKey: tc.localPaymentPubKey, RemoteHtlcKey: tc.remotePaymentPubKey, - DelayKey: tc.localDelayPubKey, - NoDelayKey: tc.remotePaymentPubKey, + ToLocalKey: tc.localDelayPubKey, + ToRemoteKey: tc.remotePaymentPubKey, RevocationKey: tc.localRevocationPubKey, } @@ -795,23 +794,32 @@ func TestCommitmentAndHTLCTransactions(t *testing.T) { } theHTLCView := htlcViewFromHTLCs(htlcs) + feePerKw := chainfee.SatPerKWeight(test.commitment.FeePerKw) + isOurs := true + height := test.commitment.CommitHeight + // Create unsigned commitment transaction. - commitmentView := &commitment{ - height: test.commitment.CommitHeight, - ourBalance: test.commitment.LocalBalance, - theirBalance: test.commitment.RemoteBalance, - feePerKw: SatPerKWeight(test.commitment.FeePerKw), - dustLimit: tc.dustLimit, - isOurs: true, - } - err = channel.createCommitmentTx( - commitmentView, theHTLCView, keys, + view, err := channel.commitBuilder.createUnsignedCommitmentTx( + test.commitment.LocalBalance, + test.commitment.RemoteBalance, isOurs, feePerKw, + height, theHTLCView, keys, ) if err != nil { t.Errorf("Case %d: Failed to create new commitment tx: %v", i, err) continue } + commitmentView := &commitment{ + ourBalance: view.ourBalance, + theirBalance: view.theirBalance, + txn: view.txn, + fee: view.fee, + height: height, + feePerKw: feePerKw, + dustLimit: tc.dustLimit, + isOurs: isOurs, + } + // Initialize LocalCommit, which is used in getSignedCommitTx. channelState.LocalCommitment = test.commitment channelState.LocalCommitment.Htlcs = htlcs @@ -843,9 +851,10 @@ func TestCommitmentAndHTLCTransactions(t *testing.T) { // Generate second-level HTLC transactions for HTLCs in // commitment tx. htlcResolutions, err := extractHtlcResolutions( - SatPerKWeight(test.commitment.FeePerKw), true, signer, - htlcs, keys, channel.localChanCfg, channel.remoteChanCfg, - commitTx.TxHash(), + chainfee.SatPerKWeight(test.commitment.FeePerKw), true, + signer, htlcs, keys, &channel.channelState.LocalChanCfg, + &channel.channelState.RemoteChanCfg, commitTx.TxHash(), + channel.channelState.ChanType, ) if err != nil { t.Errorf("Case %d: Failed to extract HTLC resolutions: %v", i, err) @@ -1016,7 +1025,7 @@ func testSpendValidation(t *testing.T, tweakless bool) { fakeFundingTxIn := wire.NewTxIn(fundingOut, nil, nil) const channelBalance = btcutil.Amount(1 * 10e8) - const csvTimeout = uint32(5) + const csvTimeout = 5 // We also set up set some resources for the commitment transaction. // Each side currently has 1 BTC within the channel, with a total @@ -1040,8 +1049,10 @@ func testSpendValidation(t *testing.T, tweakless bool) { // our commitments, if it's tweakless, his key will just be his regular // pubkey. bobPayKey := input.TweakPubKey(bobKeyPub, commitPoint) + channelType := channeldb.SingleFunderBit if tweakless { bobPayKey = bobKeyPub + channelType = channeldb.SingleFunderTweaklessBit } aliceCommitTweak := input.SingleTweakBytes(commitPoint, aliceKeyPub) @@ -1051,6 +1062,20 @@ func testSpendValidation(t *testing.T, tweakless bool) { Privkeys: []*btcec.PrivateKey{aliceKeyPriv}, } + aliceChanCfg := &channeldb.ChannelConfig{ + ChannelConstraints: channeldb.ChannelConstraints{ + DustLimit: DefaultDustLimit(), + CsvDelay: csvTimeout, + }, + } + + bobChanCfg := &channeldb.ChannelConfig{ + ChannelConstraints: channeldb.ChannelConstraints{ + DustLimit: DefaultDustLimit(), + CsvDelay: csvTimeout, + }, + } + // With all the test data set up, we create the commitment transaction. // We only focus on a single party's transactions, as the scripts are // identical with the roles reversed. @@ -1059,13 +1084,13 @@ func testSpendValidation(t *testing.T, tweakless bool) { // of 5 blocks before sweeping the output, while bob can spend // immediately with either the revocation key, or his regular key. keyRing := &CommitmentKeyRing{ - DelayKey: aliceDelayKey, + ToLocalKey: aliceDelayKey, RevocationKey: revokePubKey, - NoDelayKey: bobPayKey, + ToRemoteKey: bobPayKey, } commitmentTx, err := CreateCommitTx( - *fakeFundingTxIn, keyRing, csvTimeout, channelBalance, - channelBalance, DefaultDustLimit(), + channelType, *fakeFundingTxIn, keyRing, aliceChanCfg, + bobChanCfg, channelBalance, channelBalance, 0, ) if err != nil { t.Fatalf("unable to create commitment transaction: %v", nil) diff --git a/lnwallet/wallet.go b/lnwallet/wallet.go index ba65fe4b7a..3349d29ecf 100644 --- a/lnwallet/wallet.go +++ b/lnwallet/wallet.go @@ -5,7 +5,6 @@ import ( "crypto/sha256" "errors" "fmt" - "math" "net" "sync" "sync/atomic" @@ -16,11 +15,14 @@ import ( "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcutil" + "github.com/btcsuite/btcutil/psbt" "github.com/btcsuite/btcutil/txsort" "github.com/davecgh/go-spew/spew" "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/keychain" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" + "github.com/lightningnetwork/lnd/lnwallet/chanfunding" "github.com/lightningnetwork/lnd/lnwallet/chanvalidate" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/shachain" @@ -32,18 +34,26 @@ const ( msgBufferSize = 100 ) -// ErrInsufficientFunds is a type matching the error interface which is -// returned when coin selection for a new funding transaction fails to due -// having an insufficient amount of confirmed funds. -type ErrInsufficientFunds struct { - amountAvailable btcutil.Amount - amountSelected btcutil.Amount +var ( + // ErrPsbtFundingRequired is the error that is returned during the + // contribution handling process if the process should be paused for + // the construction of a PSBT outside of lnd's wallet. + ErrPsbtFundingRequired = errors.New("PSBT funding required") +) + +// PsbtFundingRequired is a type that implements the error interface and +// contains the information needed to construct a PSBT. +type PsbtFundingRequired struct { + // Intent is the pending PSBT funding intent that needs to be funded + // if the wrapping error is returned. + Intent *chanfunding.PsbtIntent } -func (e *ErrInsufficientFunds) Error() string { - return fmt.Sprintf("not enough witness outputs to create funding transaction,"+ - " need %v only have %v available", e.amountAvailable, - e.amountSelected) +// Error returns the underlying error. +// +// NOTE: This method is part of the error interface. +func (p *PsbtFundingRequired) Error() string { + return ErrPsbtFundingRequired.Error() } // InitFundingReserveMsg is the first message sent to initiate the workflow @@ -61,6 +71,10 @@ type InitFundingReserveMsg struct { // target channel. ChainHash *chainhash.Hash + // PendingChanID is the pending channel ID for this funding flow as + // used in the wire protocol. + PendingChanID [32]byte + // NodeID is the ID of the remote node we would like to open a channel // with. NodeID *btcec.PublicKey @@ -89,11 +103,11 @@ type InitFundingReserveMsg struct { // of initial commitment transactions. In order to ensure timely // confirmation, it is recommended that this fee should be generous, // paying some multiple of the accepted base fee rate of the network. - CommitFeePerKw SatPerKWeight + CommitFeePerKw chainfee.SatPerKWeight // FundingFeePerKw is the fee rate in sat/kw to use for the initial // funding transaction. - FundingFeePerKw SatPerKWeight + FundingFeePerKw chainfee.SatPerKWeight // PushMSat is the number of milli-satoshis that should be pushed over // the responder as part of the initial channel creation. @@ -107,9 +121,15 @@ type InitFundingReserveMsg struct { // output selected to fund the channel should satisfy. MinConfs int32 - // Tweakless indicates if the channel should use the new tweakless - // commitment format or not. - Tweakless bool + // CommitType indicates what type of commitment type the channel should + // be using, like tweakless or anchors. + CommitType CommitmentType + + // ChanFunder is an optional channel funder that allows the caller to + // control exactly how the channel funding is carried out. If not + // specified, then the default chanfunding.WalletAssembler will be + // used. + ChanFunder chanfunding.Assembler // err is a channel in which all errors will be sent across. Will be // nil if this initial set is successful. @@ -153,6 +173,16 @@ type addContributionMsg struct { err chan error } +// continueContributionMsg represents a message that signals that the +// interrupted funding process involving a PSBT can now be continued because the +// finalized transaction is now available. +type continueContributionMsg struct { + pendingFundingID uint64 + + // NOTE: In order to avoid deadlocks, this channel MUST be buffered. + err chan error +} + // addSingleContributionMsg represents a message executing the second phase of // a single funder channel reservation workflow. This messages carries the // counterparty's "contribution" to the payment channel. As this message is @@ -185,7 +215,7 @@ type addCounterPartySigsMsg struct { // This should be 1/2 of the signatures needed to successfully spend our // version of the commitment transaction. - theirCommitmentSig []byte + theirCommitmentSig input.Signature // This channel is used to return the completed channel after the wallet // has completed all of its stages in the funding process. @@ -210,7 +240,7 @@ type addSingleFunderSigsMsg struct { // theirCommitmentSig are the 1/2 of the signatures needed to // successfully spend our version of the commitment transaction. - theirCommitmentSig []byte + theirCommitmentSig input.Signature // This channel is used to return the completed channel after the wallet // has completed all of its stages in the funding process. @@ -280,6 +310,11 @@ type LightningWallet struct { // the currently locked outpoints. lockedOutPoints map[wire.OutPoint]struct{} + // fundingIntents houses all the "interception" registered by a caller + // using the RegisterFundingIntent method. + intentMtx sync.RWMutex + fundingIntents map[[32]byte]chanfunding.Intent + quit chan struct{} wg sync.WaitGroup @@ -300,6 +335,7 @@ func NewLightningWallet(Cfg Config) (*LightningWallet, error) { nextFundingID: 0, fundingLimbo: make(map[uint64]*ChannelReservation), lockedOutPoints: make(map[wire.OutPoint]struct{}), + fundingIntents: make(map[[32]byte]chanfunding.Intent), quit: make(chan struct{}), }, nil } @@ -345,6 +381,8 @@ func (l *LightningWallet) Shutdown() error { func (l *LightningWallet) LockedOutpoints() []*wire.OutPoint { outPoints := make([]*wire.OutPoint, 0, len(l.lockedOutPoints)) for outPoint := range l.lockedOutPoints { + outPoint := outPoint + outPoints = append(outPoints, &outPoint) } @@ -390,6 +428,8 @@ out: l.handleSingleContribution(msg) case *addContributionMsg: l.handleContributionMsg(msg) + case *continueContributionMsg: + l.handleChanPointReady(msg) case *addSingleFunderSigsMsg: l.handleSingleFunderSigs(msg) case *addCounterPartySigsMsg: @@ -436,6 +476,100 @@ func (l *LightningWallet) InitChannelReservation( return <-req.resp, <-req.err } +// RegisterFundingIntent allows a caller to signal to the wallet that if a +// pending channel ID of expectedID is found, then it can skip constructing a +// new chanfunding.Assembler, and instead use the specified chanfunding.Intent. +// As an example, this lets some of the parameters for funding transaction to +// be negotiated outside the regular funding protocol. +func (l *LightningWallet) RegisterFundingIntent(expectedID [32]byte, + shimIntent chanfunding.Intent) error { + + l.intentMtx.Lock() + defer l.intentMtx.Unlock() + + if _, ok := l.fundingIntents[expectedID]; ok { + return fmt.Errorf("pendingChanID(%x) already has intent "+ + "registered", expectedID[:]) + } + + l.fundingIntents[expectedID] = shimIntent + + return nil +} + +// PsbtFundingVerify looks up a previously registered funding intent by its +// pending channel ID and tries to advance the state machine by verifying the +// passed PSBT. +func (l *LightningWallet) PsbtFundingVerify(pid [32]byte, + packet *psbt.Packet) error { + + l.intentMtx.Lock() + defer l.intentMtx.Unlock() + + intent, ok := l.fundingIntents[pid] + if !ok { + return fmt.Errorf("no funding intent found for "+ + "pendingChannelID(%x)", pid[:]) + } + psbtIntent, ok := intent.(*chanfunding.PsbtIntent) + if !ok { + return fmt.Errorf("incompatible funding intent") + } + err := psbtIntent.Verify(packet) + if err != nil { + return fmt.Errorf("error verifying PSBT: %v", err) + } + + return nil +} + +// PsbtFundingFinalize looks up a previously registered funding intent by its +// pending channel ID and tries to advance the state machine by finalizing the +// passed PSBT. +func (l *LightningWallet) PsbtFundingFinalize(pid [32]byte, + packet *psbt.Packet) error { + + l.intentMtx.Lock() + defer l.intentMtx.Unlock() + + intent, ok := l.fundingIntents[pid] + if !ok { + return fmt.Errorf("no funding intent found for "+ + "pendingChannelID(%x)", pid[:]) + } + psbtIntent, ok := intent.(*chanfunding.PsbtIntent) + if !ok { + return fmt.Errorf("incompatible funding intent") + } + err := psbtIntent.Finalize(packet) + if err != nil { + return fmt.Errorf("error finalizing PSBT: %v", err) + } + + return nil +} + +// CancelFundingIntent allows a caller to cancel a previously registered +// funding intent. If no intent was found, then an error will be returned. +func (l *LightningWallet) CancelFundingIntent(pid [32]byte) error { + l.intentMtx.Lock() + defer l.intentMtx.Unlock() + + intent, ok := l.fundingIntents[pid] + if !ok { + return fmt.Errorf("no funding intent found for "+ + "pendingChannelID(%x)", pid[:]) + } + + // Give the intent a chance to clean up after itself, removing coin + // locks or similar reserved resources. + intent.Cancel() + + delete(l.fundingIntents, pid) + + return nil +} + // handleFundingReserveRequest processes a message intending to create, and // validate a funding reservation request. func (l *LightningWallet) handleFundingReserveRequest(req *InitFundingReserveMsg) { @@ -458,24 +592,55 @@ func (l *LightningWallet) handleFundingReserveRequest(req *InitFundingReserveMsg return } + // If no chanFunder was provided, then we'll assume the default + // assembler, which is backed by the wallet's internal coin selection. + if req.ChanFunder == nil { + cfg := chanfunding.WalletConfig{ + CoinSource: &CoinSource{l}, + CoinSelectLocker: l, + CoinLocker: l, + Signer: l.Cfg.Signer, + DustLimit: DefaultDustLimit(), + } + req.ChanFunder = chanfunding.NewWalletAssembler(cfg) + } + localFundingAmt := req.LocalFundingAmt + remoteFundingAmt := req.RemoteFundingAmt var ( - selected *coinSelection - err error + fundingIntent chanfunding.Intent + err error ) - // If we're on the receiving end of a single funder channel then we - // don't need to perform any coin selection, and the remote contributes - // all funds. Otherwise, attempt to obtain enough coins to meet the - // required funding amount. - if req.LocalFundingAmt != 0 { + // If we've just received an inbound funding request that we have a + // registered shim intent to, then we'll obtain the backing intent now. + // In this case, we're doing a special funding workflow that allows + // more advanced constructions such as channel factories to be + // instantiated. + l.intentMtx.Lock() + fundingIntent, ok := l.fundingIntents[req.PendingChanID] + l.intentMtx.Unlock() + + // Otherwise, this is a normal funding flow, so we'll use the chan + // funder in the attached request to provision the inputs/outputs + // that'll ultimately be used to construct the funding transaction. + if !ok { // Coin selection is done on the basis of sat/kw, so we'll use // the fee rate passed in to perform coin selection. var err error - selected, err = l.selectCoinsAndChange( - req.FundingFeePerKw, req.LocalFundingAmt, req.MinConfs, - req.SubtractFees, + fundingReq := &chanfunding.Request{ + RemoteAmt: req.RemoteFundingAmt, + LocalAmt: req.LocalFundingAmt, + MinConfs: req.MinConfs, + SubtractFees: req.SubtractFees, + FeeRate: req.FundingFeePerKw, + ChangeAddr: func() (btcutil.Address, error) { + return l.NewAddress(WitnessPubKey, true) + }, + } + fundingIntent, err = req.ChanFunder.ProvisionChannel( + fundingReq, ) if err != nil { req.err <- err @@ -483,31 +648,73 @@ func (l *LightningWallet) handleFundingReserveRequest(req *InitFundingReserveMsg return } - localFundingAmt = selected.fundingAmt + // Register the funding intent now in case we need to access it + // again later, as it's the case for the PSBT state machine for + // example. + err = l.RegisterFundingIntent(req.PendingChanID, fundingIntent) + if err != nil { + req.err <- err + req.resp <- nil + return + } + + localFundingAmt = fundingIntent.LocalFundingAmt() + remoteFundingAmt = fundingIntent.RemoteFundingAmt() + } + + // At this point there _has_ to be a funding intent, otherwise something + // went really wrong. + if fundingIntent == nil { + req.err <- fmt.Errorf("no funding intent present") + req.resp <- nil + return + } + + // If this is a shim intent, then it may be attempting to use an + // existing set of keys for the funding workflow. In this case, we'll + // make a simple wrapper keychain.KeyRing that will proxy certain + // derivation calls to future callers. + var ( + keyRing keychain.KeyRing = l.SecretKeyRing + thawHeight uint32 + ) + if shimIntent, ok := fundingIntent.(*chanfunding.ShimIntent); ok { + keyRing = &shimKeyRing{ + KeyRing: keyRing, + ShimIntent: shimIntent, + } + + // As this was a registered shim intent, we'll obtain the thaw + // height of the intent, if present at all. If this is + // non-zero, then we'll mark this as the proper channel type. + thawHeight = shimIntent.ThawHeight() } // The total channel capacity will be the size of the funding output we // created plus the remote contribution. - capacity := localFundingAmt + req.RemoteFundingAmt + capacity := localFundingAmt + remoteFundingAmt id := atomic.AddUint64(&l.nextFundingID, 1) reservation, err := NewChannelReservation( capacity, localFundingAmt, req.CommitFeePerKw, l, id, req.PushMSat, l.Cfg.NetParams.GenesisHash, req.Flags, - req.Tweakless, + req.CommitType, req.ChanFunder, req.PendingChanID, + thawHeight, ) if err != nil { - selected.unlockCoins() + fundingIntent.Cancel() + req.err <- err req.resp <- nil return } err = l.initOurContribution( - reservation, selected, req.NodeAddr, req.NodeID, + reservation, fundingIntent, req.NodeAddr, req.NodeID, keyRing, ) if err != nil { - selected.unlockCoins() + fundingIntent.Cancel() + req.err <- err req.resp <- nil return @@ -530,52 +737,60 @@ func (l *LightningWallet) handleFundingReserveRequest(req *InitFundingReserveMsg // and change reserved for the channel, and derives the keys to use for this // channel. func (l *LightningWallet) initOurContribution(reservation *ChannelReservation, - selected *coinSelection, nodeAddr net.Addr, nodeID *btcec.PublicKey) error { + fundingIntent chanfunding.Intent, nodeAddr net.Addr, + nodeID *btcec.PublicKey, keyRing keychain.KeyRing) error { // Grab the mutex on the ChannelReservation to ensure thread-safety reservation.Lock() defer reservation.Unlock() - if selected != nil { - reservation.ourContribution.Inputs = selected.coins - reservation.ourContribution.ChangeOutputs = selected.change + // At this point, if we have a funding intent, we'll use it to populate + // the existing reservation state entries for our coin selection. + if fundingIntent != nil { + if intent, ok := fundingIntent.(*chanfunding.FullIntent); ok { + for _, coin := range intent.InputCoins { + reservation.ourContribution.Inputs = append( + reservation.ourContribution.Inputs, + &wire.TxIn{ + PreviousOutPoint: coin.OutPoint, + }, + ) + } + reservation.ourContribution.ChangeOutputs = intent.ChangeOutputs + } + + reservation.fundingIntent = fundingIntent } reservation.nodeAddr = nodeAddr reservation.partialState.IdentityPub = nodeID - // Next, we'll grab a series of keys from the wallet which will be used - // for the duration of the channel. The keys include: our multi-sig - // key, the base revocation key, the base htlc key,the base payment - // key, and the delayed payment key. - // - // TODO(roasbeef): "salt" each key as well? var err error - reservation.ourContribution.MultiSigKey, err = l.DeriveNextKey( + reservation.ourContribution.MultiSigKey, err = keyRing.DeriveNextKey( keychain.KeyFamilyMultiSig, ) if err != nil { return err } - reservation.ourContribution.RevocationBasePoint, err = l.DeriveNextKey( + reservation.ourContribution.RevocationBasePoint, err = keyRing.DeriveNextKey( keychain.KeyFamilyRevocationBase, ) if err != nil { return err } - reservation.ourContribution.HtlcBasePoint, err = l.DeriveNextKey( + reservation.ourContribution.HtlcBasePoint, err = keyRing.DeriveNextKey( keychain.KeyFamilyHtlcBase, ) if err != nil { return err } - reservation.ourContribution.PaymentBasePoint, err = l.DeriveNextKey( + reservation.ourContribution.PaymentBasePoint, err = keyRing.DeriveNextKey( keychain.KeyFamilyPaymentBase, ) if err != nil { return err } - reservation.ourContribution.DelayBasePoint, err = l.DeriveNextKey( + reservation.ourContribution.DelayBasePoint, err = keyRing.DeriveNextKey( keychain.KeyFamilyDelayBase, ) if err != nil { @@ -584,7 +799,7 @@ func (l *LightningWallet) initOurContribution(reservation *ChannelReservation, // With the above keys created, we'll also need to initialization our // initial revocation tree state. - nextRevocationKeyDesc, err := l.DeriveNextKey( + nextRevocationKeyDesc, err := keyRing.DeriveNextKey( keychain.KeyFamilyRevocationRoot, ) if err != nil { @@ -650,6 +865,16 @@ func (l *LightningWallet) handleFundingCancelRequest(req *fundingReserveCancelMs delete(l.fundingLimbo, req.pendingFundingID) + pid := pendingReservation.pendingChanID + + l.intentMtx.Lock() + if intent, ok := l.fundingIntents[pid]; ok { + intent.Cancel() + + delete(l.fundingIntents, pendingReservation.pendingChanID) + } + l.intentMtx.Unlock() + req.err <- nil } @@ -661,21 +886,20 @@ func (l *LightningWallet) handleFundingCancelRequest(req *fundingReserveCancelMs func CreateCommitmentTxns(localBalance, remoteBalance btcutil.Amount, ourChanCfg, theirChanCfg *channeldb.ChannelConfig, localCommitPoint, remoteCommitPoint *btcec.PublicKey, - fundingTxIn wire.TxIn, - tweaklessCommit bool) (*wire.MsgTx, *wire.MsgTx, error) { + fundingTxIn wire.TxIn, chanType channeldb.ChannelType) ( + *wire.MsgTx, *wire.MsgTx, error) { localCommitmentKeys := DeriveCommitmentKeys( - localCommitPoint, true, tweaklessCommit, ourChanCfg, - theirChanCfg, + localCommitPoint, true, chanType, ourChanCfg, theirChanCfg, ) remoteCommitmentKeys := DeriveCommitmentKeys( - remoteCommitPoint, false, tweaklessCommit, ourChanCfg, - theirChanCfg, + remoteCommitPoint, false, chanType, ourChanCfg, theirChanCfg, ) - ourCommitTx, err := CreateCommitTx(fundingTxIn, localCommitmentKeys, - uint32(ourChanCfg.CsvDelay), localBalance, remoteBalance, - ourChanCfg.DustLimit) + ourCommitTx, err := CreateCommitTx( + chanType, fundingTxIn, localCommitmentKeys, ourChanCfg, + theirChanCfg, localBalance, remoteBalance, 0, + ) if err != nil { return nil, nil, err } @@ -685,9 +909,10 @@ func CreateCommitmentTxns(localBalance, remoteBalance btcutil.Amount, return nil, nil, err } - theirCommitTx, err := CreateCommitTx(fundingTxIn, remoteCommitmentKeys, - uint32(theirChanCfg.CsvDelay), remoteBalance, localBalance, - theirChanCfg.DustLimit) + theirCommitTx, err := CreateCommitTx( + chanType, fundingTxIn, remoteCommitmentKeys, theirChanCfg, + ourChanCfg, remoteBalance, localBalance, 0, + ) if err != nil { return nil, nil, err } @@ -719,100 +944,180 @@ func (l *LightningWallet) handleContributionMsg(req *addContributionMsg) { pendingReservation.Lock() defer pendingReservation.Unlock() - // Create a blank, fresh transaction. Soon to be a complete funding - // transaction which will allow opening a lightning channel. - pendingReservation.fundingTx = wire.NewMsgTx(1) - fundingTx := pendingReservation.fundingTx - // Some temporary variables to cut down on the resolution verbosity. pendingReservation.theirContribution = req.contribution theirContribution := req.contribution ourContribution := pendingReservation.ourContribution - // Add all multi-party inputs and outputs to the transaction. - for _, ourInput := range ourContribution.Inputs { - fundingTx.AddTxIn(ourInput) - } - for _, theirInput := range theirContribution.Inputs { - fundingTx.AddTxIn(theirInput) - } - for _, ourChangeOutput := range ourContribution.ChangeOutputs { - fundingTx.AddTxOut(ourChangeOutput) - } - for _, theirChangeOutput := range theirContribution.ChangeOutputs { - fundingTx.AddTxOut(theirChangeOutput) - } - - ourKey := pendingReservation.ourContribution.MultiSigKey - theirKey := theirContribution.MultiSigKey - - // Finally, add the 2-of-2 multi-sig output which will set up the lightning - // channel. - channelCapacity := int64(pendingReservation.partialState.Capacity) - witnessScript, multiSigOut, err := input.GenFundingPkScript( - ourKey.PubKey.SerializeCompressed(), - theirKey.PubKey.SerializeCompressed(), channelCapacity, + var ( + chanPoint *wire.OutPoint + err error ) - if err != nil { - req.err <- err - return - } - // Sort the transaction. Since both side agree to a canonical ordering, - // by sorting we no longer need to send the entire transaction. Only - // signatures will be exchanged. - fundingTx.AddTxOut(multiSigOut) - txsort.InPlaceSort(pendingReservation.fundingTx) + // At this point, we can now construct our channel point. Depending on + // which type of intent we obtained from our chanfunding.Assembler, + // we'll carry out a distinct set of steps. + switch fundingIntent := pendingReservation.fundingIntent.(type) { + // The transaction was created outside of the wallet and might already + // be published. Nothing left to do other than using the correct + // outpoint. + case *chanfunding.ShimIntent: + chanPoint, err = fundingIntent.ChanPoint() + if err != nil { + req.err <- fmt.Errorf("unable to obtain chan point: %v", err) + return + } - // Next, sign all inputs that are ours, collecting the signatures in - // order of the inputs. - pendingReservation.ourFundingInputScripts = make([]*input.Script, 0, - len(ourContribution.Inputs)) - signDesc := input.SignDescriptor{ - HashType: txscript.SigHashAll, - SigHashes: txscript.NewTxSigHashes(fundingTx), - } - for i, txIn := range fundingTx.TxIn { - info, err := l.FetchInputInfo(&txIn.PreviousOutPoint) - if err == ErrNotMine { - continue - } else if err != nil { - req.err <- err + pendingReservation.partialState.FundingOutpoint = *chanPoint + + // The user has signaled that they want to use a PSBT to construct the + // funding transaction. Because we now have the multisig keys from both + // parties, we can create the multisig script that needs to be funded + // and then pause the process until the user supplies the PSBT + // containing the eventual funding transaction. + case *chanfunding.PsbtIntent: + if fundingIntent.PendingPsbt != nil { + req.err <- fmt.Errorf("PSBT funding already in" + + "progress") return } - signDesc.Output = &wire.TxOut{ - PkScript: info.PkScript, - Value: int64(info.Value), + // Now that we know our contribution, we can bind both the local + // and remote key which will be needed to calculate the multisig + // funding output in a next step. + pendingChanID := pendingReservation.pendingChanID + walletLog.Debugf("Advancing PSBT funding flow for "+ + "pending_id(%x), binding keys local_key=%v, "+ + "remote_key=%x", pendingChanID, + &ourContribution.MultiSigKey, + theirContribution.MultiSigKey.PubKey.SerializeCompressed()) + fundingIntent.BindKeys( + &ourContribution.MultiSigKey, + theirContribution.MultiSigKey.PubKey, + ) + + // Exit early because we can't continue the funding flow yet. + req.err <- &PsbtFundingRequired{ + Intent: fundingIntent, } - signDesc.InputIndex = i + return + + case *chanfunding.FullIntent: + // Now that we know their public key, we can bind theirs as + // well as ours to the funding intent. + fundingIntent.BindKeys( + &pendingReservation.ourContribution.MultiSigKey, + theirContribution.MultiSigKey.PubKey, + ) - inputScript, err := l.Cfg.Signer.ComputeInputScript( - fundingTx, &signDesc, + // With our keys bound, we can now construct+sign the final + // funding transaction and also obtain the chanPoint that + // creates the channel. + fundingTx, err := fundingIntent.CompileFundingTx( + theirContribution.Inputs, + theirContribution.ChangeOutputs, ) if err != nil { - req.err <- err + req.err <- fmt.Errorf("unable to construct funding "+ + "tx: %v", err) + return + } + chanPoint, err = fundingIntent.ChanPoint() + if err != nil { + req.err <- fmt.Errorf("unable to obtain chan "+ + "point: %v", err) return } - txIn.SignatureScript = inputScript.SigScript - txIn.Witness = inputScript.Witness - pendingReservation.ourFundingInputScripts = append( - pendingReservation.ourFundingInputScripts, - inputScript, + // Finally, we'll populate the relevant information in our + // pendingReservation so the rest of the funding flow can + // continue as normal. + pendingReservation.fundingTx = fundingTx + pendingReservation.partialState.FundingOutpoint = *chanPoint + pendingReservation.ourFundingInputScripts = make( + []*input.Script, 0, len(ourContribution.Inputs), ) + for _, txIn := range fundingTx.TxIn { + _, err := l.FetchInputInfo(&txIn.PreviousOutPoint) + if err != nil { + continue + } + + pendingReservation.ourFundingInputScripts = append( + pendingReservation.ourFundingInputScripts, + &input.Script{ + Witness: txIn.Witness, + SigScript: txIn.SignatureScript, + }, + ) + } + + walletLog.Debugf("Funding tx for ChannelPoint(%v) "+ + "generated: %v", chanPoint, spew.Sdump(fundingTx)) } - // Locate the index of the multi-sig outpoint in order to record it - // since the outputs are canonically sorted. If this is a single funder - // workflow, then we'll also need to send this to the remote node. - fundingTxID := fundingTx.TxHash() - _, multiSigIndex := input.FindScriptOutputIndex(fundingTx, multiSigOut.PkScript) - fundingOutpoint := wire.NewOutPoint(&fundingTxID, multiSigIndex) - pendingReservation.partialState.FundingOutpoint = *fundingOutpoint + // If we landed here and didn't exit early, it means we already have + // the channel point ready. We can jump directly to the next step. + l.handleChanPointReady(&continueContributionMsg{ + pendingFundingID: req.pendingFundingID, + err: req.err, + }) +} - walletLog.Debugf("Funding tx for ChannelPoint(%v) generated: %v", - fundingOutpoint, spew.Sdump(fundingTx)) +// handleChanPointReady continues the funding process once the channel point +// is known and the funding transaction can be completed. +func (l *LightningWallet) handleChanPointReady(req *continueContributionMsg) { + l.limboMtx.Lock() + pendingReservation, ok := l.fundingLimbo[req.pendingFundingID] + l.limboMtx.Unlock() + if !ok { + req.err <- fmt.Errorf("attempted to update non-existent " + + "funding state") + return + } + ourContribution := pendingReservation.ourContribution + theirContribution := pendingReservation.theirContribution + chanPoint := pendingReservation.partialState.FundingOutpoint + + // If we're in the PSBT funding flow, we now should have everything that + // is needed to construct and publish the full funding transaction. + intent := pendingReservation.fundingIntent + if psbtIntent, ok := intent.(*chanfunding.PsbtIntent); ok { + // With our keys bound, we can now construct+sign the final + // funding transaction and also obtain the chanPoint that + // creates the channel. + fundingTx, err := psbtIntent.CompileFundingTx() + if err != nil { + req.err <- fmt.Errorf("unable to construct funding "+ + "tx: %v", err) + return + } + chanPointPtr, err := psbtIntent.ChanPoint() + if err != nil { + req.err <- fmt.Errorf("unable to obtain chan "+ + "point: %v", err) + return + } + + // Finally, we'll populate the relevant information in our + // pendingReservation so the rest of the funding flow can + // continue as normal. + pendingReservation.fundingTx = fundingTx + pendingReservation.partialState.FundingOutpoint = *chanPointPtr + chanPoint = *chanPointPtr + pendingReservation.ourFundingInputScripts = make( + []*input.Script, 0, len(ourContribution.Inputs), + ) + for _, txIn := range fundingTx.TxIn { + pendingReservation.ourFundingInputScripts = append( + pendingReservation.ourFundingInputScripts, + &input.Script{ + Witness: txIn.Witness, + SigScript: txIn.SignatureScript, + }, + ) + } + } // Initialize an empty sha-chain for them, tracking the current pending // revocation hash (we don't yet know the preimage so we can't add it @@ -829,22 +1134,18 @@ func (l *LightningWallet) handleContributionMsg(req *addContributionMsg) { // Create the txin to our commitment transaction; required to construct // the commitment transactions. fundingTxIn := wire.TxIn{ - PreviousOutPoint: wire.OutPoint{ - Hash: fundingTxID, - Index: multiSigIndex, - }, + PreviousOutPoint: chanPoint, } // With the funding tx complete, create both commitment transactions. localBalance := pendingReservation.partialState.LocalCommitment.LocalBalance.ToSatoshis() remoteBalance := pendingReservation.partialState.LocalCommitment.RemoteBalance.ToSatoshis() - tweaklessCommits := pendingReservation.partialState.ChanType.IsTweakless() ourCommitTx, theirCommitTx, err := CreateCommitmentTxns( localBalance, remoteBalance, ourContribution.ChannelConfig, theirContribution.ChannelConfig, ourContribution.FirstCommitmentPoint, theirContribution.FirstCommitmentPoint, fundingTxIn, - tweaklessCommits, + pendingReservation.partialState.ChanType, ) if err != nil { req.err <- err @@ -889,21 +1190,32 @@ func (l *LightningWallet) handleContributionMsg(req *addContributionMsg) { txsort.InPlaceSort(theirCommitTx) walletLog.Debugf("Local commit tx for ChannelPoint(%v): %v", - fundingOutpoint, spew.Sdump(ourCommitTx)) + chanPoint, spew.Sdump(ourCommitTx)) walletLog.Debugf("Remote commit tx for ChannelPoint(%v): %v", - fundingOutpoint, spew.Sdump(theirCommitTx)) + chanPoint, spew.Sdump(theirCommitTx)) // Record newly available information within the open channel state. - chanState.FundingOutpoint = *fundingOutpoint + chanState.FundingOutpoint = chanPoint chanState.LocalCommitment.CommitTx = ourCommitTx chanState.RemoteCommitment.CommitTx = theirCommitTx + // Next, we'll obtain the funding witness script, and the funding + // output itself so we can generate a valid signature for the remote + // party. + fundingIntent := pendingReservation.fundingIntent + fundingWitnessScript, fundingOutput, err := fundingIntent.FundingOutput() + if err != nil { + req.err <- fmt.Errorf("unable to obtain funding output") + return + } + // Generate a signature for their version of the initial commitment // transaction. - signDesc = input.SignDescriptor{ - WitnessScript: witnessScript, + ourKey := ourContribution.MultiSigKey + signDesc := input.SignDescriptor{ + WitnessScript: fundingWitnessScript, KeyDesc: ourKey, - Output: multiSigOut, + Output: fundingOutput, HashType: txscript.SigHashAll, SigHashes: txscript.NewTxSigHashes(theirCommitTx), InputIndex: 0, @@ -959,41 +1271,14 @@ func (l *LightningWallet) handleSingleContribution(req *addSingleContributionMsg return } -// openChanDetails contains a "finalized" channel which can be considered -// "open" according to the requested confirmation depth at reservation -// initialization. Additionally, the struct contains additional details -// pertaining to the exact location in the main chain in-which the transaction -// was confirmed. -type openChanDetails struct { -} - -// handleFundingCounterPartySigs is the final step in the channel reservation -// workflow. During this step, we validate *all* the received signatures for -// inputs to the funding transaction. If any of these are invalid, we bail, -// and forcibly cancel this funding request. Additionally, we ensure that the -// signature we received from the counterparty for our version of the commitment -// transaction allows us to spend from the funding output with the addition of -// our signature. -func (l *LightningWallet) handleFundingCounterPartySigs(msg *addCounterPartySigsMsg) { - l.limboMtx.RLock() - res, ok := l.fundingLimbo[msg.pendingFundingID] - l.limboMtx.RUnlock() - if !ok { - msg.err <- fmt.Errorf("attempted to update non-existent funding state") - return - } - - // Grab the mutex on the ChannelReservation to ensure thread-safety - res.Lock() - defer res.Unlock() +// verifyFundingInputs attempts to verify all remote inputs to the funding +// transaction. +func (l *LightningWallet) verifyFundingInputs(fundingTx *wire.MsgTx, + remoteInputScripts []*input.Script) error { - // Now we can complete the funding transaction by adding their - // signatures to their inputs. - res.theirFundingInputScripts = msg.theirFundingInputScripts - inputScripts := msg.theirFundingInputScripts - fundingTx := res.fundingTx sigIndex := 0 fundingHashCache := txscript.NewTxSigHashes(fundingTx) + inputScripts := remoteInputScripts for i, txin := range fundingTx.TxIn { if len(inputScripts) != 0 && len(txin.Witness) == 0 { // Attach the input scripts so we can verify it below. @@ -1005,48 +1290,85 @@ func (l *LightningWallet) handleFundingCounterPartySigs(msg *addCounterPartySigs // // TODO(roasbeef): when dual funder pass actual // height-hint - pkScript, err := input.WitnessScriptHash( - txin.Witness[len(txin.Witness)-1], + // + // TODO(roasbeef): this fails for neutrino always as it + // treats the height hint as an exact birthday of the + // utxo rather than a lower bound + pkScript, err := txscript.ComputePkScript( + txin.SignatureScript, txin.Witness, ) if err != nil { - msg.err <- fmt.Errorf("cannot create script: "+ - "%v", err) - msg.completeChan <- nil - return + return fmt.Errorf("cannot create script: %v", err) } - output, err := l.Cfg.ChainIO.GetUtxo( &txin.PreviousOutPoint, - pkScript, 0, l.quit, + pkScript.Script(), 0, l.quit, ) if output == nil { - msg.err <- fmt.Errorf("input to funding tx "+ - "does not exist: %v", err) - msg.completeChan <- nil - return + return fmt.Errorf("input to funding tx does "+ + "not exist: %v", err) } // Ensure that the witness+sigScript combo is valid. - vm, err := txscript.NewEngine(output.PkScript, - fundingTx, i, txscript.StandardVerifyFlags, nil, - fundingHashCache, output.Value) + vm, err := txscript.NewEngine( + output.PkScript, fundingTx, i, + txscript.StandardVerifyFlags, nil, + fundingHashCache, output.Value, + ) if err != nil { - msg.err <- fmt.Errorf("cannot create script "+ + return fmt.Errorf("cannot create script "+ "engine: %s", err) - msg.completeChan <- nil - return } if err = vm.Execute(); err != nil { - msg.err <- fmt.Errorf("cannot validate "+ + return fmt.Errorf("cannot validate "+ "transaction: %s", err) - msg.completeChan <- nil - return } sigIndex++ } } + return nil +} + +// handleFundingCounterPartySigs is the final step in the channel reservation +// workflow. During this step, we validate *all* the received signatures for +// inputs to the funding transaction. If any of these are invalid, we bail, +// and forcibly cancel this funding request. Additionally, we ensure that the +// signature we received from the counterparty for our version of the commitment +// transaction allows us to spend from the funding output with the addition of +// our signature. +func (l *LightningWallet) handleFundingCounterPartySigs(msg *addCounterPartySigsMsg) { + l.limboMtx.RLock() + res, ok := l.fundingLimbo[msg.pendingFundingID] + l.limboMtx.RUnlock() + if !ok { + msg.err <- fmt.Errorf("attempted to update non-existent funding state") + return + } + + // Grab the mutex on the ChannelReservation to ensure thread-safety + res.Lock() + defer res.Unlock() + + // Now we can complete the funding transaction by adding their + // signatures to their inputs. + res.theirFundingInputScripts = msg.theirFundingInputScripts + inputScripts := msg.theirFundingInputScripts + + // Only if we have the final funding transaction do we need to verify + // the final set of inputs. Otherwise, it may be the case that the + // channel was funded via an external wallet. + fundingTx := res.fundingTx + if res.partialState.ChanType.HasFundingTx() { + err := l.verifyFundingInputs(fundingTx, inputScripts) + if err != nil { + msg.err <- err + msg.completeChan <- nil + return + } + } + // At this point, we can also record and verify their signature for our // commitment transaction. res.theirCommitmentSig = msg.theirCommitmentSig @@ -1072,8 +1394,10 @@ func (l *LightningWallet) handleFundingCounterPartySigs(msg *addCounterPartySigs // is complete, allowing us to spend from the funding transaction. channelValue := int64(res.partialState.Capacity) hashCache := txscript.NewTxSigHashes(commitTx) - sigHash, err := txscript.CalcWitnessSigHash(witnessScript, hashCache, - txscript.SigHashAll, commitTx, 0, channelValue) + sigHash, err := txscript.CalcWitnessSigHash( + witnessScript, hashCache, txscript.SigHashAll, commitTx, + 0, channelValue, + ) if err != nil { msg.err <- err msg.completeChan <- nil @@ -1082,24 +1406,23 @@ func (l *LightningWallet) handleFundingCounterPartySigs(msg *addCounterPartySigs // Verify that we've received a valid signature from the remote party // for our version of the commitment transaction. - theirCommitSig := msg.theirCommitmentSig - sig, err := btcec.ParseSignature(theirCommitSig, btcec.S256()) - if err != nil { - msg.err <- err - msg.completeChan <- nil - return - } else if !sig.Verify(sigHash, theirKey.PubKey) { + if !msg.theirCommitmentSig.Verify(sigHash, theirKey.PubKey) { msg.err <- fmt.Errorf("counterparty's commitment signature is invalid") msg.completeChan <- nil return } - res.partialState.LocalCommitment.CommitSig = theirCommitSig + theirCommitSigBytes := msg.theirCommitmentSig.Serialize() + res.partialState.LocalCommitment.CommitSig = theirCommitSigBytes // Funding complete, this entry can be removed from limbo. l.limboMtx.Lock() delete(l.fundingLimbo, res.reservationID) l.limboMtx.Unlock() + l.intentMtx.Lock() + delete(l.fundingIntents, res.pendingChanID) + l.intentMtx.Unlock() + // As we're about to broadcast the funding transaction, we'll take note // of the current height for record keeping purposes. // @@ -1122,6 +1445,13 @@ func (l *LightningWallet) handleFundingCounterPartySigs(msg *addCounterPartySigs // rebroadcast on startup in case we fail. res.partialState.FundingTxn = fundingTx + // Set optional upfront shutdown scripts on the channel state so that they + // are persisted. These values may be nil. + res.partialState.LocalShutdownScript = + res.ourContribution.UpfrontShutdown + res.partialState.RemoteShutdownScript = + res.theirContribution.UpfrontShutdown + // Add the complete funding transaction to the DB, in its open bucket // which will be used for the lifetime of this channel. nodeAddr := res.nodeAddr @@ -1164,14 +1494,13 @@ func (l *LightningWallet) handleSingleFunderSigs(req *addSingleFunderSigsMsg) { // remote node's commitment transactions. localBalance := pendingReservation.partialState.LocalCommitment.LocalBalance.ToSatoshis() remoteBalance := pendingReservation.partialState.LocalCommitment.RemoteBalance.ToSatoshis() - tweaklessCommits := pendingReservation.partialState.ChanType.IsTweakless() ourCommitTx, theirCommitTx, err := CreateCommitmentTxns( localBalance, remoteBalance, pendingReservation.ourContribution.ChannelConfig, pendingReservation.theirContribution.ChannelConfig, pendingReservation.ourContribution.FirstCommitmentPoint, pendingReservation.theirContribution.FirstCommitmentPoint, - *fundingTxIn, tweaklessCommits, + *fundingTxIn, pendingReservation.partialState.ChanType, ) if err != nil { req.err <- err @@ -1220,8 +1549,10 @@ func (l *LightningWallet) handleSingleFunderSigs(req *addSingleFunderSigsMsg) { return } - sigHash, err := txscript.CalcWitnessSigHash(witnessScript, hashCache, - txscript.SigHashAll, ourCommitTx, 0, channelValue) + sigHash, err := txscript.CalcWitnessSigHash( + witnessScript, hashCache, txscript.SigHashAll, ourCommitTx, 0, + channelValue, + ) if err != nil { req.err <- err req.completeChan <- nil @@ -1230,18 +1561,14 @@ func (l *LightningWallet) handleSingleFunderSigs(req *addSingleFunderSigsMsg) { // Verify that we've received a valid signature from the remote party // for our version of the commitment transaction. - sig, err := btcec.ParseSignature(req.theirCommitmentSig, btcec.S256()) - if err != nil { - req.err <- err - req.completeChan <- nil - return - } else if !sig.Verify(sigHash, theirKey.PubKey) { + if !req.theirCommitmentSig.Verify(sigHash, theirKey.PubKey) { req.err <- fmt.Errorf("counterparty's commitment signature " + "is invalid") req.completeChan <- nil return } - chanState.LocalCommitment.CommitSig = req.theirCommitmentSig + theirCommitSigBytes := req.theirCommitmentSig.Serialize() + chanState.LocalCommitment.CommitSig = theirCommitSigBytes // With their signature for our version of the commitment transactions // verified, we can now generate a signature for their version, @@ -1278,6 +1605,13 @@ func (l *LightningWallet) handleSingleFunderSigs(req *addSingleFunderSigsMsg) { return } + // Set optional upfront shutdown scripts on the channel state so that they + // are persisted. These values may be nil. + chanState.LocalShutdownScript = + pendingReservation.ourContribution.UpfrontShutdown + chanState.RemoteShutdownScript = + pendingReservation.theirContribution.UpfrontShutdown + // Add the complete funding transaction to the DB, in it's open bucket // which will be used for the lifetime of this channel. chanState.LocalChanCfg = pendingReservation.ourContribution.toChanConfig() @@ -1295,11 +1629,15 @@ func (l *LightningWallet) handleSingleFunderSigs(req *addSingleFunderSigsMsg) { l.limboMtx.Lock() delete(l.fundingLimbo, req.pendingFundingID) l.limboMtx.Unlock() + + l.intentMtx.Lock() + delete(l.fundingIntents, pendingReservation.pendingChanID) + l.intentMtx.Unlock() } // WithCoinSelectLock will execute the passed function closure in a // synchronized manner preventing any coin selection operations from proceeding -// while the closure if executing. This can be seen as the ability to execute a +// while the closure is executing. This can be seen as the ability to execute a // function closure under an exclusive coin selection lock. func (l *LightningWallet) WithCoinSelectLock(f func() error) error { l.coinSelectMtx.Lock() @@ -1308,127 +1646,6 @@ func (l *LightningWallet) WithCoinSelectLock(f func() error) error { return f() } -// coinSelection holds the result from selectCoinsAndChange. -type coinSelection struct { - coins []*wire.TxIn - change []*wire.TxOut - fundingAmt btcutil.Amount - unlockCoins func() -} - -// selectCoinsAndChange performs coin selection in order to obtain witness -// outputs which sum to at least 'amt' amount of satoshis. If necessary, -// a change address will also be generated. If coin selection is -// successful/possible, then the selected coins and change outputs are -// returned, and the value of the resulting funding output. This method locks -// the selected outputs, and a function closure to unlock them in case of an -// error is returned. -func (l *LightningWallet) selectCoinsAndChange(feeRate SatPerKWeight, - amt btcutil.Amount, minConfs int32, subtractFees bool) ( - *coinSelection, error) { - - // We hold the coin select mutex while querying for outputs, and - // performing coin selection in order to avoid inadvertent double - // spends across funding transactions. - l.coinSelectMtx.Lock() - defer l.coinSelectMtx.Unlock() - - walletLog.Infof("Performing funding tx coin selection using %v "+ - "sat/kw as fee rate", int64(feeRate)) - - // Find all unlocked unspent witness outputs that satisfy the minimum - // number of confirmations required. - coins, err := l.ListUnspentWitness(minConfs, math.MaxInt32) - if err != nil { - return nil, err - } - - var ( - selectedCoins []*Utxo - fundingAmt btcutil.Amount - changeAmt btcutil.Amount - ) - - // Perform coin selection over our available, unlocked unspent outputs - // in order to find enough coins to meet the funding amount - // requirements. - switch { - // In case this request want the fees subtracted from the local amount, - // we'll call the specialized method for that. This ensures that we - // won't deduct more that the specified balance from our wallet. - case subtractFees: - dustLimit := l.Cfg.DefaultConstraints.DustLimit - selectedCoins, fundingAmt, changeAmt, err = coinSelectSubtractFees( - feeRate, amt, dustLimit, coins, - ) - if err != nil { - return nil, err - } - - // Ótherwise do a normal coin selection where we target a given funding - // amount. - default: - fundingAmt = amt - selectedCoins, changeAmt, err = coinSelect(feeRate, amt, coins) - if err != nil { - return nil, err - } - } - - // Record any change output(s) generated as a result of the coin - // selection, but only if the addition of the output won't lead to the - // creation of dust. - var changeOutputs []*wire.TxOut - if changeAmt != 0 && changeAmt > DefaultDustLimit() { - changeAddr, err := l.NewAddress(WitnessPubKey, true) - if err != nil { - return nil, err - } - changeScript, err := txscript.PayToAddrScript(changeAddr) - if err != nil { - return nil, err - } - - changeOutputs = make([]*wire.TxOut, 1) - changeOutputs[0] = &wire.TxOut{ - Value: int64(changeAmt), - PkScript: changeScript, - } - } - - // Lock the selected coins. These coins are now "reserved", this - // prevents concurrent funding requests from referring to and this - // double-spending the same set of coins. - inputs := make([]*wire.TxIn, len(selectedCoins)) - for i, coin := range selectedCoins { - outpoint := &coin.OutPoint - l.lockedOutPoints[*outpoint] = struct{}{} - l.LockOutpoint(*outpoint) - - // Empty sig script, we'll actually sign if this reservation is - // queued up to be completed (the other side accepts). - inputs[i] = wire.NewTxIn(outpoint, nil, nil) - } - - unlock := func() { - l.coinSelectMtx.Lock() - defer l.coinSelectMtx.Unlock() - - for _, coin := range selectedCoins { - outpoint := &coin.OutPoint - delete(l.lockedOutPoints, *outpoint) - l.UnlockOutpoint(*outpoint) - } - } - - return &coinSelection{ - coins: inputs, - change: changeOutputs, - fundingAmt: fundingAmt, - unlockCoins: unlock, - }, nil -} - // DeriveStateHintObfuscator derives the bytes to be used for obfuscating the // state hints from the root to be used for a new channel. The obfuscator is // generated via the following computation: @@ -1465,179 +1682,6 @@ func initStateHints(commit1, commit2 *wire.MsgTx, return nil } -// selectInputs selects a slice of inputs necessary to meet the specified -// selection amount. If input selection is unable to succeed due to insufficient -// funds, a non-nil error is returned. Additionally, the total amount of the -// selected coins are returned in order for the caller to properly handle -// change+fees. -func selectInputs(amt btcutil.Amount, coins []*Utxo) (btcutil.Amount, []*Utxo, error) { - satSelected := btcutil.Amount(0) - for i, coin := range coins { - satSelected += coin.Value - if satSelected >= amt { - return satSelected, coins[:i+1], nil - } - } - return 0, nil, &ErrInsufficientFunds{amt, satSelected} -} - -// coinSelect attempts to select a sufficient amount of coins, including a -// change output to fund amt satoshis, adhering to the specified fee rate. The -// specified fee rate should be expressed in sat/kw for coin selection to -// function properly. -func coinSelect(feeRate SatPerKWeight, amt btcutil.Amount, - coins []*Utxo) ([]*Utxo, btcutil.Amount, error) { - - amtNeeded := amt - for { - // First perform an initial round of coin selection to estimate - // the required fee. - totalSat, selectedUtxos, err := selectInputs(amtNeeded, coins) - if err != nil { - return nil, 0, err - } - - var weightEstimate input.TxWeightEstimator - - for _, utxo := range selectedUtxos { - switch utxo.AddressType { - case WitnessPubKey: - weightEstimate.AddP2WKHInput() - case NestedWitnessPubKey: - weightEstimate.AddNestedP2WKHInput() - default: - return nil, 0, fmt.Errorf("unsupported address type: %v", - utxo.AddressType) - } - } - - // Channel funding multisig output is P2WSH. - weightEstimate.AddP2WSHOutput() - - // Assume that change output is a P2WKH output. - // - // TODO: Handle wallets that generate non-witness change - // addresses. - // TODO(halseth): make coinSelect not estimate change output - // for dust change. - weightEstimate.AddP2WKHOutput() - - // The difference between the selected amount and the amount - // requested will be used to pay fees, and generate a change - // output with the remaining. - overShootAmt := totalSat - amt - - // Based on the estimated size and fee rate, if the excess - // amount isn't enough to pay fees, then increase the requested - // coin amount by the estimate required fee, performing another - // round of coin selection. - totalWeight := int64(weightEstimate.Weight()) - requiredFee := feeRate.FeeForWeight(totalWeight) - if overShootAmt < requiredFee { - amtNeeded = amt + requiredFee - continue - } - - // If the fee is sufficient, then calculate the size of the - // change output. - changeAmt := overShootAmt - requiredFee - - return selectedUtxos, changeAmt, nil - } -} - -// coinSelectSubtractFees attempts to select coins such that we'll spend up to -// amt in total after fees, adhering to the specified fee rate. The selected -// coins, the final output and change values are returned. -func coinSelectSubtractFees(feeRate SatPerKWeight, amt, - dustLimit btcutil.Amount, coins []*Utxo) ([]*Utxo, btcutil.Amount, - btcutil.Amount, error) { - - // First perform an initial round of coin selection to estimate - // the required fee. - totalSat, selectedUtxos, err := selectInputs(amt, coins) - if err != nil { - return nil, 0, 0, err - } - - var weightEstimate input.TxWeightEstimator - for _, utxo := range selectedUtxos { - switch utxo.AddressType { - case WitnessPubKey: - weightEstimate.AddP2WKHInput() - case NestedWitnessPubKey: - weightEstimate.AddNestedP2WKHInput() - default: - return nil, 0, 0, fmt.Errorf("unsupported "+ - "address type: %v", utxo.AddressType) - } - } - - // Channel funding multisig output is P2WSH. - weightEstimate.AddP2WSHOutput() - - // At this point we've got two possibilities, either create a - // change output, or not. We'll first try without creating a - // change output. - // - // Estimate the fee required for a transaction without a change - // output. - totalWeight := int64(weightEstimate.Weight()) - requiredFee := feeRate.FeeForWeight(totalWeight) - - // For a transaction without a change output, we'll let everything go - // to our multi-sig output after subtracting fees. - outputAmt := totalSat - requiredFee - changeAmt := btcutil.Amount(0) - - // If the the output is too small after subtracting the fee, the coin - // selection cannot be performed with an amount this small. - if outputAmt <= dustLimit { - return nil, 0, 0, fmt.Errorf("output amount(%v) after "+ - "subtracting fees(%v) below dust limit(%v)", outputAmt, - requiredFee, dustLimit) - } - - // We were able to create a transaction with no change from the - // selected inputs. We'll remember the resulting values for - // now, while we try to add a change output. Assume that change output - // is a P2WKH output. - weightEstimate.AddP2WKHOutput() - - // Now that we have added the change output, redo the fee - // estimate. - totalWeight = int64(weightEstimate.Weight()) - requiredFee = feeRate.FeeForWeight(totalWeight) - - // For a transaction with a change output, everything we don't spend - // will go to change. - newChange := totalSat - amt - newOutput := amt - requiredFee - - // If adding a change output leads to both outputs being above - // the dust limit, we'll add the change output. Otherwise we'll - // go with the no change tx we originally found. - if newChange > dustLimit && newOutput > dustLimit { - outputAmt = newOutput - changeAmt = newChange - } - - // Sanity check the resulting output values to make sure we - // don't burn a great part to fees. - totalOut := outputAmt + changeAmt - fee := totalSat - totalOut - - // Fail if more than 20% goes to fees. - // TODO(halseth): smarter fee limit. Make configurable or dynamic wrt - // total funding size? - if fee > totalOut/5 { - return nil, 0, 0, fmt.Errorf("fee %v on total output"+ - "value %v", fee, totalOut) - } - - return selectedUtxos, outputAmt, changeAmt, nil -} - // ValidateChannel will attempt to fully validate a newly mined channel, given // its funding transaction and existing channel state. If this method returns // an error, then the mined channel is invalid, and shouldn't be used. @@ -1692,3 +1736,81 @@ func (l *LightningWallet) ValidateChannel(channelState *channeldb.OpenChannel, return nil } + +// CoinSource is a wrapper around the wallet that implements the +// chanfunding.CoinSource interface. +type CoinSource struct { + wallet *LightningWallet +} + +// NewCoinSource creates a new instance of the CoinSource wrapper struct. +func NewCoinSource(w *LightningWallet) *CoinSource { + return &CoinSource{wallet: w} +} + +// ListCoins returns all UTXOs from the source that have between +// minConfs and maxConfs number of confirmations. +func (c *CoinSource) ListCoins(minConfs int32, + maxConfs int32) ([]chanfunding.Coin, error) { + + utxos, err := c.wallet.ListUnspentWitness(minConfs, maxConfs) + if err != nil { + return nil, err + } + + var coins []chanfunding.Coin + for _, utxo := range utxos { + coins = append(coins, chanfunding.Coin{ + TxOut: wire.TxOut{ + Value: int64(utxo.Value), + PkScript: utxo.PkScript, + }, + OutPoint: utxo.OutPoint, + }) + } + + return coins, nil +} + +// CoinFromOutPoint attempts to locate details pertaining to a coin based on +// its outpoint. If the coin isn't under the control of the backing CoinSource, +// then an error should be returned. +func (c *CoinSource) CoinFromOutPoint(op wire.OutPoint) (*chanfunding.Coin, error) { + inputInfo, err := c.wallet.FetchInputInfo(&op) + if err != nil { + return nil, err + } + + return &chanfunding.Coin{ + TxOut: wire.TxOut{ + Value: int64(inputInfo.Value), + PkScript: inputInfo.PkScript, + }, + OutPoint: inputInfo.OutPoint, + }, nil +} + +// shimKeyRing is a wrapper struct that's used to provide the proper multi-sig +// key for an initiated external funding flow. +type shimKeyRing struct { + keychain.KeyRing + + *chanfunding.ShimIntent +} + +// DeriveNextKey intercepts the normal DeriveNextKey call to a keychain.KeyRing +// instance, and supplies the multi-sig key specified by the ShimIntent. This +// allows us to transparently insert new keys into the existing funding flow, +// as these keys may not come from the wallet itself. +func (s *shimKeyRing) DeriveNextKey(keyFam keychain.KeyFamily) (keychain.KeyDescriptor, error) { + if keyFam != keychain.KeyFamilyMultiSig { + return s.KeyRing.DeriveNextKey(keyFam) + } + + fundingKeys, err := s.ShimIntent.MultiSigKeys() + if err != nil { + return keychain.KeyDescriptor{}, err + } + + return *fundingKeys.LocalKey, nil +} diff --git a/lnwire/accept_channel.go b/lnwire/accept_channel.go index ef1b2828b8..da9daa69b3 100644 --- a/lnwire/accept_channel.go +++ b/lnwire/accept_channel.go @@ -86,6 +86,12 @@ type AcceptChannel struct { // base point in order to derive the revocation keys that are placed // within the commitment transaction of the sender. FirstCommitmentPoint *btcec.PublicKey + + // UpfrontShutdownScript is the script to which the channel funds should + // be paid when mutually closing the channel. This field is optional, and + // and has a length prefix, so a zero will be written if it is not set + // and its length followed by the script will be written if it is set. + UpfrontShutdownScript DeliveryAddress } // A compile time check to ensure AcceptChannel implements the lnwire.Message @@ -113,6 +119,7 @@ func (a *AcceptChannel) Encode(w io.Writer, pver uint32) error { a.DelayedPaymentPoint, a.HtlcPoint, a.FirstCommitmentPoint, + a.UpfrontShutdownScript, ) } @@ -122,7 +129,8 @@ func (a *AcceptChannel) Encode(w io.Writer, pver uint32) error { // // This is part of the lnwire.Message interface. func (a *AcceptChannel) Decode(r io.Reader, pver uint32) error { - return ReadElements(r, + // Read all the mandatory fields in the accept message. + err := ReadElements(r, a.PendingChannelID[:], &a.DustLimit, &a.MaxValueInFlight, @@ -138,6 +146,17 @@ func (a *AcceptChannel) Decode(r io.Reader, pver uint32) error { &a.HtlcPoint, &a.FirstCommitmentPoint, ) + if err != nil { + return err + } + + // Check for the optional upfront shutdown script field. If it is not there, + // silence the EOF error. + err = ReadElement(r, &a.UpfrontShutdownScript) + if err != nil && err != io.EOF { + return err + } + return nil } // MsgType returns the MessageType code which uniquely identifies this message @@ -154,5 +173,10 @@ func (a *AcceptChannel) MsgType() MessageType { // This is part of the lnwire.Message interface. func (a *AcceptChannel) MaxPayloadLength(uint32) uint32 { // 32 + (8 * 4) + (4 * 1) + (2 * 2) + (33 * 6) - return 270 + var length uint32 = 270 // base length + + // Upfront shutdown script max length. + length += 2 + deliveryAddressMaxSize + + return length } diff --git a/lnwire/accept_channel_test.go b/lnwire/accept_channel_test.go new file mode 100644 index 0000000000..a1ab2be48c --- /dev/null +++ b/lnwire/accept_channel_test.go @@ -0,0 +1,71 @@ +package lnwire + +import ( + "bytes" + "testing" + + "github.com/btcsuite/btcd/btcec" +) + +// TestDecodeAcceptChannel tests decoding of an accept channel wire message with +// and without the optional upfront shutdown script. +func TestDecodeAcceptChannel(t *testing.T) { + tests := []struct { + name string + shutdownScript DeliveryAddress + }{ + { + name: "no upfront shutdown script", + shutdownScript: nil, + }, + { + name: "empty byte array", + shutdownScript: []byte{}, + }, + { + name: "upfront shutdown script set", + shutdownScript: []byte("example"), + }, + } + + for _, test := range tests { + test := test + + t.Run(test.name, func(t *testing.T) { + priv, err := btcec.NewPrivateKey(btcec.S256()) + if err != nil { + t.Fatalf("cannot create privkey: %v", err) + } + pk := priv.PubKey() + + encoded := &AcceptChannel{ + PendingChannelID: [32]byte{}, + FundingKey: pk, + RevocationPoint: pk, + PaymentPoint: pk, + DelayedPaymentPoint: pk, + HtlcPoint: pk, + FirstCommitmentPoint: pk, + UpfrontShutdownScript: test.shutdownScript, + } + + buf := &bytes.Buffer{} + if _, err := WriteMessage(buf, encoded, 0); err != nil { + t.Fatalf("cannot write message: %v", err) + } + + msg, err := ReadMessage(buf, 0) + if err != nil { + t.Fatalf("cannot read message: %v", err) + } + + decoded := msg.(*AcceptChannel) + if !bytes.Equal( + decoded.UpfrontShutdownScript, encoded.UpfrontShutdownScript, + ) { + t.Fatalf("decoded script: %x does not equal encoded script: %x", + decoded.UpfrontShutdownScript, encoded.UpfrontShutdownScript) + } + }) + } +} diff --git a/lnwire/channel_id.go b/lnwire/channel_id.go index 8e8ef67c3d..0a9e082265 100644 --- a/lnwire/channel_id.go +++ b/lnwire/channel_id.go @@ -56,11 +56,11 @@ func NewChanIDFromOutPoint(op *wire.OutPoint) ChannelID { // ChannelID. To do this, we expect the cid parameter to contain the txid // unaltered and the outputIndex to be the output index func xorTxid(cid *ChannelID, outputIndex uint16) { - var buf [32]byte - binary.BigEndian.PutUint16(buf[30:], outputIndex) + var buf [2]byte + binary.BigEndian.PutUint16(buf[:], outputIndex) - cid[30] = cid[30] ^ buf[30] - cid[31] = cid[31] ^ buf[31] + cid[30] ^= buf[0] + cid[31] ^= buf[1] } // GenPossibleOutPoints generates all the possible outputs given a channel ID. @@ -69,13 +69,13 @@ func xorTxid(cid *ChannelID, outputIndex uint16) { // mapping from channelID back to OutPoint. func (c *ChannelID) GenPossibleOutPoints() [MaxFundingTxOutputs]wire.OutPoint { var possiblePoints [MaxFundingTxOutputs]wire.OutPoint - for i := uint32(0); i < MaxFundingTxOutputs; i++ { + for i := uint16(0); i < MaxFundingTxOutputs; i++ { cidCopy := *c - xorTxid(&cidCopy, uint16(i)) + xorTxid(&cidCopy, i) possiblePoints[i] = wire.OutPoint{ Hash: chainhash.Hash(cidCopy), - Index: i, + Index: uint32(i), } } diff --git a/lnwire/features.go b/lnwire/features.go index 9a34a6373f..4e5899c22c 100644 --- a/lnwire/features.go +++ b/lnwire/features.go @@ -2,10 +2,16 @@ package lnwire import ( "encoding/binary" - "fmt" + "errors" "io" ) +var ( + // ErrFeaturePairExists signals an error in feature vector construction + // where the opposing bit in a feature pair has already been set. + ErrFeaturePairExists = errors.New("feature pair exists") +) + // FeatureBit represents a feature that can be enabled in either a local or // global feature vector at a specific bit position. Feature bits follow the // "it's OK to be odd" rule, where features at even bit positions must be known @@ -35,6 +41,16 @@ const ( // connection is established. InitialRoutingSync FeatureBit = 3 + // UpfrontShutdownScriptRequired is a feature bit which indicates that a + // peer *requires* that the remote peer accept an upfront shutdown script to + // which payout is enforced on cooperative closes. + UpfrontShutdownScriptRequired FeatureBit = 4 + + // UpfrontShutdownScriptOptional is an optional feature bit which indicates + // that the peer will accept an upfront shutdown script to which payout is + // enforced on cooperative closes. + UpfrontShutdownScriptOptional FeatureBit = 5 + // GossipQueriesRequired is a feature bit that indicates that the // receiving peer MUST know of the set of features that allows nodes to // more efficiently query the network view of peers on the network for @@ -65,6 +81,36 @@ const ( // party's non-delay output should not be tweaked. StaticRemoteKeyOptional FeatureBit = 13 + // PaymentAddrRequired is a required feature bit that signals that a + // node requires payment addresses, which are used to mitigate probing + // attacks on the receiver of a payment. + PaymentAddrRequired FeatureBit = 14 + + // PaymentAddrOptional is an optional feature bit that signals that a + // node supports payment addresses, which are used to mitigate probing + // attacks on the receiver of a payment. + PaymentAddrOptional FeatureBit = 15 + + // MPPOptional is a required feature bit that signals that the receiver + // of a payment requires settlement of an invoice with more than one + // HTLC. + MPPRequired FeatureBit = 16 + + // MPPOptional is an optional feature bit that signals that the receiver + // of a payment supports settlement of an invoice with more than one + // HTLC. + MPPOptional FeatureBit = 17 + + // AnchorsRequired is a required feature bit that signals that the node + // requires channels to be made using commitments having anchor + // outputs. + AnchorsRequired FeatureBit = 1336 + + // AnchorsRequired is an optional feature bit that signals that the + // node supports channels to be made using commitments having anchor + // outputs. + AnchorsOptional FeatureBit = 1337 + // maxAllowedSize is a maximum allowed size of feature vector. // // NOTE: Within the protocol, the maximum allowed message size is 65535 @@ -78,28 +124,32 @@ const ( maxAllowedSize = 32764 ) -// LocalFeatures is a mapping of known connection-local feature bits to a -// descriptive name. All known local feature bits must be assigned a name in -// this mapping. Local features are those which are only sent to the peer and -// not advertised to the entire network. A full description of these feature -// bits is provided in the BOLT-09 specification. -var LocalFeatures = map[FeatureBit]string{ - DataLossProtectRequired: "data-loss-protect", - DataLossProtectOptional: "data-loss-protect", - InitialRoutingSync: "initial-routing-sync", - GossipQueriesRequired: "gossip-queries", - GossipQueriesOptional: "gossip-queries", +// IsRequired returns true if the feature bit is even, and false otherwise. +func (b FeatureBit) IsRequired() bool { + return b&0x01 == 0x00 } -// GlobalFeatures is a mapping of known global feature bits to a descriptive -// name. All known global feature bits must be assigned a name in this mapping. -// Global features are those which are advertised to the entire network. A full -// description of these feature bits is provided in the BOLT-09 specification. -var GlobalFeatures = map[FeatureBit]string{ - TLVOnionPayloadRequired: "tlv-onion", - TLVOnionPayloadOptional: "tlv-onion", - StaticRemoteKeyOptional: "static-remote-key", - StaticRemoteKeyRequired: "static-remote-key", +// Features is a mapping of known feature bits to a descriptive name. All known +// feature bits must be assigned a name in this mapping, and feature bit pairs +// must be assigned together for correct behavior. +var Features = map[FeatureBit]string{ + DataLossProtectRequired: "data-loss-protect", + DataLossProtectOptional: "data-loss-protect", + InitialRoutingSync: "initial-routing-sync", + UpfrontShutdownScriptRequired: "upfront-shutdown-script", + UpfrontShutdownScriptOptional: "upfront-shutdown-script", + GossipQueriesRequired: "gossip-queries", + GossipQueriesOptional: "gossip-queries", + TLVOnionPayloadRequired: "tlv-onion", + TLVOnionPayloadOptional: "tlv-onion", + StaticRemoteKeyOptional: "static-remote-key", + StaticRemoteKeyRequired: "static-remote-key", + PaymentAddrOptional: "payment-addr", + PaymentAddrRequired: "payment-addr", + MPPOptional: "multi-path-payments", + MPPRequired: "multi-path-payments", + AnchorsRequired: "anchor-commitments", + AnchorsOptional: "anchor-commitments", } // RawFeatureVector represents a set of feature bits as defined in BOLT-09. A @@ -121,6 +171,26 @@ func NewRawFeatureVector(bits ...FeatureBit) *RawFeatureVector { return fv } +// Merges sets all feature bits in other on the receiver's feature vector. +func (fv *RawFeatureVector) Merge(other *RawFeatureVector) error { + for bit := range other.features { + err := fv.SafeSet(bit) + if err != nil { + return err + } + } + return nil +} + +// Clone makes a copy of a feature vector. +func (fv *RawFeatureVector) Clone() *RawFeatureVector { + newFeatures := NewRawFeatureVector() + for bit := range fv.features { + newFeatures.Set(bit) + } + return newFeatures +} + // IsSet returns whether a particular feature bit is enabled in the vector. func (fv *RawFeatureVector) IsSet(feature FeatureBit) bool { return fv.features[feature] @@ -131,6 +201,20 @@ func (fv *RawFeatureVector) Set(feature FeatureBit) { fv.features[feature] = true } +// SafeSet sets the chosen feature bit in the feature vector, but returns an +// error if the opposing feature bit is already set. This ensures both that we +// are creating properly structured feature vectors, and in some cases, that +// peers are sending properly encoded ones, i.e. it can't be both optional and +// required. +func (fv *RawFeatureVector) SafeSet(feature FeatureBit) error { + if _, ok := fv.features[feature^1]; ok { + return ErrFeaturePairExists + } + + fv.Set(feature) + return nil +} + // Unset marks a feature as disabled in the vector. func (fv *RawFeatureVector) Unset(feature FeatureBit) { delete(fv.features, feature) @@ -184,8 +268,16 @@ func (fv *RawFeatureVector) Encode(w io.Writer) error { return fv.encode(w, length, 8) } +// EncodeBase256 writes the feature vector in base256 representation. Every +// feature is encoded as a bit, and the bit vector is serialized using the least +// number of bytes. +func (fv *RawFeatureVector) EncodeBase256(w io.Writer) error { + length := fv.SerializeSize() + return fv.encode(w, length, 8) +} + // EncodeBase32 writes the feature vector in base32 representation. Every feature -// encoded as a bit, and the bit vector is serialized using the least number of +// is encoded as a bit, and the bit vector is serialized using the least number of // bytes. func (fv *RawFeatureVector) EncodeBase32(w io.Writer) error { length := fv.SerializeSize32() @@ -207,8 +299,8 @@ func (fv *RawFeatureVector) encode(w io.Writer, length, width int) error { } // Decode reads the feature vector from its byte representation. Every feature -// encoded as a bit, and the bit vector is serialized using the least number of -// bytes. Since the bit vector length is variable, the first two bytes of the +// is encoded as a bit, and the bit vector is serialized using the least number +// of bytes. Since the bit vector length is variable, the first two bytes of the // serialization represent the length. func (fv *RawFeatureVector) Decode(r io.Reader) error { // Read the length of the feature vector. @@ -221,6 +313,13 @@ func (fv *RawFeatureVector) Decode(r io.Reader) error { return fv.decode(r, int(length), 8) } +// DecodeBase256 reads the feature vector from its base256 representation. Every +// feature encoded as a bit, and the bit vector is serialized using the least +// number of bytes. +func (fv *RawFeatureVector) DecodeBase256(r io.Reader, length int) error { + return fv.decode(r, length, 8) +} + // DecodeBase32 reads the feature vector from its base32 representation. Every // feature encoded as a bit, and the bit vector is serialized using the least // number of bytes. @@ -274,6 +373,11 @@ func NewFeatureVector(featureVector *RawFeatureVector, } } +// EmptyFeatureVector returns a feature vector with no bits set. +func EmptyFeatureVector() *FeatureVector { + return NewFeatureVector(nil, Features) +} + // HasFeature returns whether a particular feature is included in the set. The // feature can be seen as set either if the bit is set directly OR the queried // bit has the same meaning as its corresponding even/odd bit, which is set @@ -303,9 +407,9 @@ func (fv *FeatureVector) UnknownRequiredFeatures() []FeatureBit { func (fv *FeatureVector) Name(bit FeatureBit) string { name, known := fv.featureNames[bit] if !known { - name = "unknown" + return "unknown" } - return fmt.Sprintf("%s(%d)", name, bit) + return name } // IsKnown returns whether this feature bit represents a known feature. @@ -324,3 +428,19 @@ func (fv *FeatureVector) isFeatureBitPair(bit FeatureBit) bool { name2, known2 := fv.featureNames[bit^1] return known1 && known2 && name1 == name2 } + +// Features returns the set of raw features contained in the feature vector. +func (fv *FeatureVector) Features() map[FeatureBit]struct{} { + fs := make(map[FeatureBit]struct{}, len(fv.RawFeatureVector.features)) + for b := range fv.RawFeatureVector.features { + fs[b] = struct{}{} + } + return fs +} + +// Clone copies a feature vector, carrying over its feature bits. The feature +// names are not copied. +func (fv *FeatureVector) Clone() *FeatureVector { + features := fv.RawFeatureVector.Clone() + return NewFeatureVector(features, fv.featureNames) +} diff --git a/lnwire/features_test.go b/lnwire/features_test.go index 6a80bb8331..1cff8f5287 100644 --- a/lnwire/features_test.go +++ b/lnwire/features_test.go @@ -205,42 +205,42 @@ func TestFeatureNames(t *testing.T) { }{ { bit: 0, - expectedName: "feature1(0)", + expectedName: "feature1", expectedKnown: true, }, { bit: 1, - expectedName: "unknown(1)", + expectedName: "unknown", expectedKnown: false, }, { bit: 2, - expectedName: "unknown(2)", + expectedName: "unknown", expectedKnown: false, }, { bit: 3, - expectedName: "feature2(3)", + expectedName: "feature2", expectedKnown: true, }, { bit: 4, - expectedName: "feature3(4)", + expectedName: "feature3", expectedKnown: true, }, { bit: 5, - expectedName: "feature3(5)", + expectedName: "feature3", expectedKnown: true, }, { bit: 6, - expectedName: "unknown(6)", + expectedName: "unknown", expectedKnown: false, }, { bit: 7, - expectedName: "unknown(7)", + expectedName: "unknown", expectedKnown: false, }, } @@ -260,3 +260,68 @@ func TestFeatureNames(t *testing.T) { } } } + +// TestIsRequired asserts that feature bits properly return their IsRequired +// status. We require that even features be required and odd features be +// optional. +func TestIsRequired(t *testing.T) { + optional := FeatureBit(1) + if optional.IsRequired() { + t.Fatalf("optional feature should not be required") + } + + required := FeatureBit(0) + if !required.IsRequired() { + t.Fatalf("required feature should be required") + } +} + +// TestFeatures asserts that the Features() method on a FeatureVector properly +// returns the set of feature bits it stores internallly. +func TestFeatures(t *testing.T) { + tests := []struct { + name string + exp map[FeatureBit]struct{} + }{ + { + name: "empty", + exp: map[FeatureBit]struct{}{}, + }, + { + name: "one", + exp: map[FeatureBit]struct{}{ + 5: {}, + }, + }, + { + name: "several", + exp: map[FeatureBit]struct{}{ + 0: {}, + 5: {}, + 23948: {}, + }, + }, + } + + toRawFV := func(set map[FeatureBit]struct{}) *RawFeatureVector { + var bits []FeatureBit + for bit := range set { + bits = append(bits, bit) + } + return NewRawFeatureVector(bits...) + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + fv := NewFeatureVector( + toRawFV(test.exp), Features, + ) + + if !reflect.DeepEqual(fv.Features(), test.exp) { + t.Fatalf("feature mismatch, want: %v, got: %v", + test.exp, fv.Features()) + } + }) + } +} diff --git a/lnwire/init_message.go b/lnwire/init_message.go index f64f9f8cab..0236a71f84 100644 --- a/lnwire/init_message.go +++ b/lnwire/init_message.go @@ -7,20 +7,26 @@ import "io" // diagnosis where features are incompatible. Each node MUST wait to receive // init before sending any other messages. type Init struct { - // GlobalFeatures is feature vector which affects HTLCs and thus are - // also advertised to other nodes. + // GlobalFeatures is a legacy feature vector used for backwards + // compatibility with older nodes. Any features defined here should be + // merged with those presented in Features. GlobalFeatures *RawFeatureVector - // LocalFeatures is feature vector which only affect the protocol - // between two nodes. - LocalFeatures *RawFeatureVector + // Features is a feature vector containing a the features supported by + // the remote node. + // + // NOTE: Older nodes may place some features in GlobalFeatures, but all + // new features are to be added in Features. When handling an Init + // message, any GlobalFeatures should be merged into the unified + // Features field. + Features *RawFeatureVector } // NewInitMessage creates new instance of init message object. -func NewInitMessage(gf *RawFeatureVector, lf *RawFeatureVector) *Init { +func NewInitMessage(gf *RawFeatureVector, f *RawFeatureVector) *Init { return &Init{ GlobalFeatures: gf, - LocalFeatures: lf, + Features: f, } } @@ -35,7 +41,7 @@ var _ Message = (*Init)(nil) func (msg *Init) Decode(r io.Reader, pver uint32) error { return ReadElements(r, &msg.GlobalFeatures, - &msg.LocalFeatures, + &msg.Features, ) } @@ -46,7 +52,7 @@ func (msg *Init) Decode(r io.Reader, pver uint32) error { func (msg *Init) Encode(w io.Writer, pver uint32) error { return WriteElements(w, msg.GlobalFeatures, - msg.LocalFeatures, + msg.Features, ) } diff --git a/lnwire/lnwire.go b/lnwire/lnwire.go index e1f6a7dd4f..4e9871b230 100644 --- a/lnwire/lnwire.go +++ b/lnwire/lnwire.go @@ -816,8 +816,8 @@ func ReadElement(r io.Reader, element interface{}) error { } length := binary.BigEndian.Uint16(addrLen[:]) - var addrBytes [34]byte - if length > 34 { + var addrBytes [deliveryAddressMaxSize]byte + if length > deliveryAddressMaxSize { return fmt.Errorf("Cannot read %d bytes into addrBytes", length) } if _, err = io.ReadFull(r, addrBytes[:length]); err != nil { diff --git a/lnwire/lnwire_test.go b/lnwire/lnwire_test.go index b7604f5d4b..02023b0231 100644 --- a/lnwire/lnwire_test.go +++ b/lnwire/lnwire_test.go @@ -67,6 +67,15 @@ func randRawKey() ([33]byte, error) { return n, nil } +func randDeliveryAddress(r *rand.Rand) (DeliveryAddress, error) { + // Generate size minimum one. Empty scripts should be tested specifically. + size := r.Intn(deliveryAddressMaxSize) + 1 + da := DeliveryAddress(make([]byte, size)) + + _, err := r.Read(da) + return da, err +} + func randRawFeatureVector(r *rand.Rand) *RawFeatureVector { featureVec := NewRawFeatureVector() for i := 0; i < 10000; i++ { @@ -241,6 +250,9 @@ func TestEmptyMessageUnknownType(t *testing.T) { // TestLightningWireProtocol uses the testing/quick package to create a series // of fuzz tests to attempt to break a primary scenario which is implemented as // property based testing scenario. +// +// Debug help: when the message payload can reach a size larger than the return +// value of MaxPayloadLength, the test can panic without a helpful message. func TestLightningWireProtocol(t *testing.T) { t.Parallel() @@ -353,6 +365,17 @@ func TestLightningWireProtocol(t *testing.T) { return } + // 1/2 chance empty upfront shutdown script. + if r.Intn(2) == 0 { + req.UpfrontShutdownScript, err = randDeliveryAddress(r) + if err != nil { + t.Fatalf("unable to generate delivery address: %v", err) + return + } + } else { + req.UpfrontShutdownScript = []byte{} + } + v[0] = reflect.ValueOf(req) }, MsgAcceptChannel: func(v []reflect.Value, r *rand.Rand) { @@ -403,6 +426,17 @@ func TestLightningWireProtocol(t *testing.T) { return } + // 1/2 chance empty upfront shutdown script. + if r.Intn(2) == 0 { + req.UpfrontShutdownScript, err = randDeliveryAddress(r) + if err != nil { + t.Fatalf("unable to generate delivery address: %v", err) + return + } + } else { + req.UpfrontShutdownScript = []byte{} + } + v[0] = reflect.ValueOf(req) }, MsgFundingCreated: func(v []reflect.Value, r *rand.Rand) { diff --git a/lnwire/msat.go b/lnwire/msat.go index 67ee497257..d3789dfa31 100644 --- a/lnwire/msat.go +++ b/lnwire/msat.go @@ -6,9 +6,15 @@ import ( "github.com/btcsuite/btcutil" ) -// mSatScale is a value that's used to scale satoshis to milli-satoshis, and -// the other way around. -const mSatScale uint64 = 1000 +const ( + // mSatScale is a value that's used to scale satoshis to milli-satoshis, and + // the other way around. + mSatScale uint64 = 1000 + + // MaxMilliSatoshi is the maximum number of msats that can be expressed + // in this data type. + MaxMilliSatoshi = ^MilliSatoshi(0) +) // MilliSatoshi are the native unit of the Lightning Network. A milli-satoshi // is simply 1/1000th of a satoshi. There are 1000 milli-satoshis in a single diff --git a/lnwire/node_announcement.go b/lnwire/node_announcement.go index 4ea64fa646..f0d897bc91 100644 --- a/lnwire/node_announcement.go +++ b/lnwire/node_announcement.go @@ -101,14 +101,6 @@ type NodeAnnouncement struct { ExtraOpaqueData []byte } -// UpdateNodeAnnAddrs is a functional option that allows updating the addresses -// of the given node announcement. -func UpdateNodeAnnAddrs(addrs []net.Addr) func(*NodeAnnouncement) { - return func(nodeAnn *NodeAnnouncement) { - nodeAnn.Addresses = addrs - } -} - // A compile time check to ensure NodeAnnouncement implements the // lnwire.Message interface. var _ Message = (*NodeAnnouncement)(nil) diff --git a/lnwire/onion_error.go b/lnwire/onion_error.go index bf75b63f64..c6235552e9 100644 --- a/lnwire/onion_error.go +++ b/lnwire/onion_error.go @@ -11,6 +11,7 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/go-errors/errors" + "github.com/lightningnetwork/lnd/tlv" ) // FailureMessage represents the onion failure object identified by its unique @@ -78,6 +79,8 @@ const ( CodeFinalIncorrectCltvExpiry FailCode = 18 CodeFinalIncorrectHtlcAmount FailCode = 19 CodeExpiryTooFar FailCode = 21 + CodeInvalidOnionPayload = FlagPerm | 22 + CodeMPPTimeout FailCode = 23 ) // String returns the string representation of the failure code. @@ -149,6 +152,12 @@ func (c FailCode) String() string { case CodeExpiryTooFar: return "ExpiryTooFar" + case CodeInvalidOnionPayload: + return "InvalidOnionPayload" + + case CodeMPPTimeout: + return "MPPTimeout" + default: return "" } @@ -1117,6 +1126,86 @@ func (f *FailExpiryTooFar) Error() string { return f.Code().String() } +// InvalidOnionPayload is returned if the hop could not process the TLV payload +// enclosed in the onion. +type InvalidOnionPayload struct { + // Type is the TLV type that caused the specific failure. + Type uint64 + + // Offset is the byte offset within the payload where the failure + // occurred. + Offset uint16 +} + +// NewInvalidOnionPayload initializes a new InvalidOnionPayload failure. +func NewInvalidOnionPayload(typ uint64, offset uint16) *InvalidOnionPayload { + return &InvalidOnionPayload{ + Type: typ, + Offset: offset, + } +} + +// Code returns the failure unique code. +// +// NOTE: Part of the FailureMessage interface. +func (f *InvalidOnionPayload) Code() FailCode { + return CodeInvalidOnionPayload +} + +// Returns a human readable string describing the target FailureMessage. +// +// NOTE: Implements the error interface. +func (f *InvalidOnionPayload) Error() string { + return fmt.Sprintf("%v(type=%v, offset=%d)", + f.Code(), f.Type, f.Offset) +} + +// Decode decodes the failure from bytes stream. +// +// NOTE: Part of the Serializable interface. +func (f *InvalidOnionPayload) Decode(r io.Reader, pver uint32) error { + var buf [8]byte + typ, err := tlv.ReadVarInt(r, &buf) + if err != nil { + return err + } + f.Type = typ + + return ReadElements(r, &f.Offset) +} + +// Encode writes the failure in bytes stream. +// +// NOTE: Part of the Serializable interface. +func (f *InvalidOnionPayload) Encode(w io.Writer, pver uint32) error { + var buf [8]byte + if err := tlv.WriteVarInt(w, f.Type, &buf); err != nil { + return err + } + + return WriteElements(w, f.Offset) +} + +// FailMPPTimeout is returned if the complete amount for a multi part payment +// was not received within a reasonable time. +// +// NOTE: May only be returned by the final node in the path. +type FailMPPTimeout struct{} + +// Code returns the failure unique code. +// +// NOTE: Part of the FailureMessage interface. +func (f *FailMPPTimeout) Code() FailCode { + return CodeMPPTimeout +} + +// Returns a human readable string describing the target FailureMessage. +// +// NOTE: Implements the error interface. +func (f *FailMPPTimeout) Error() string { + return f.Code().String() +} + // DecodeFailure decodes, validates, and parses the lnwire onion failure, for // the provided protocol version. func DecodeFailure(r io.Reader, pver uint32) (FailureMessage, error) { @@ -1298,6 +1387,12 @@ func makeEmptyOnionError(code FailCode) (FailureMessage, error) { case CodeExpiryTooFar: return &FailExpiryTooFar{}, nil + case CodeInvalidOnionPayload: + return &InvalidOnionPayload{}, nil + + case CodeMPPTimeout: + return &FailMPPTimeout{}, nil + default: return nil, errors.Errorf("unknown error code: %v", code) } diff --git a/lnwire/onion_error_test.go b/lnwire/onion_error_test.go index 8420be1f73..3ec147d1dd 100644 --- a/lnwire/onion_error_test.go +++ b/lnwire/onion_error_test.go @@ -16,6 +16,8 @@ var ( testAmount = MilliSatoshi(1) testCtlvExpiry = uint32(2) testFlags = uint16(2) + testType = uint64(3) + testOffset = uint16(24) sig, _ = NewSigFromSignature(testSig) testChannelUpdate = ChannelUpdate{ Signature: sig, @@ -36,6 +38,7 @@ var onionFailures = []FailureMessage{ &FailUnknownNextPeer{}, &FailIncorrectPaymentAmount{}, &FailFinalExpiryTooSoon{}, + &FailMPPTimeout{}, NewFailIncorrectDetails(99, 100), NewInvalidOnionVersion(testOnionHash), @@ -50,6 +53,7 @@ var onionFailures = []FailureMessage{ NewChannelDisabled(testFlags, testChannelUpdate), NewFinalIncorrectCltvExpiry(testCtlvExpiry), NewFinalIncorrectHtlcAmount(testAmount), + NewInvalidOnionPayload(testType, testOffset), } // TestEncodeDecodeCode tests the ability of onion errors to be properly encoded diff --git a/lnwire/open_channel.go b/lnwire/open_channel.go index ccd01ef6d6..f78cc26eff 100644 --- a/lnwire/open_channel.go +++ b/lnwire/open_channel.go @@ -122,6 +122,12 @@ type OpenChannel struct { // Currently, the least significant bit of this bit field indicates the // initiator of the channel wishes to advertise this channel publicly. ChannelFlags FundingFlag + + // UpfrontShutdownScript is the script to which the channel funds should + // be paid when mutually closing the channel. This field is optional, and + // and has a length prefix, so a zero will be written if it is not set + // and its length followed by the script will be written if it is set. + UpfrontShutdownScript DeliveryAddress } // A compile time check to ensure OpenChannel implements the lnwire.Message @@ -153,6 +159,7 @@ func (o *OpenChannel) Encode(w io.Writer, pver uint32) error { o.HtlcPoint, o.FirstCommitmentPoint, o.ChannelFlags, + o.UpfrontShutdownScript, ) } @@ -162,7 +169,7 @@ func (o *OpenChannel) Encode(w io.Writer, pver uint32) error { // // This is part of the lnwire.Message interface. func (o *OpenChannel) Decode(r io.Reader, pver uint32) error { - return ReadElements(r, + if err := ReadElements(r, o.ChainHash[:], o.PendingChannelID[:], &o.FundingAmount, @@ -181,7 +188,18 @@ func (o *OpenChannel) Decode(r io.Reader, pver uint32) error { &o.HtlcPoint, &o.FirstCommitmentPoint, &o.ChannelFlags, - ) + ); err != nil { + return err + } + + // Check for the optional upfront shutdown script field. If it is not there, + // silence the EOF error. + err := ReadElement(r, &o.UpfrontShutdownScript) + if err != nil && err != io.EOF { + return err + } + + return nil } // MsgType returns the MessageType code which uniquely identifies this message @@ -198,5 +216,10 @@ func (o *OpenChannel) MsgType() MessageType { // This is part of the lnwire.Message interface. func (o *OpenChannel) MaxPayloadLength(uint32) uint32 { // (32 * 2) + (8 * 6) + (4 * 1) + (2 * 2) + (33 * 6) + 1 - return 319 + var length uint32 = 319 // base length + + // Upfront shutdown script max length. + length += 2 + deliveryAddressMaxSize + + return length } diff --git a/lnwire/query_channel_range.go b/lnwire/query_channel_range.go index f910cb9881..9546fcd32a 100644 --- a/lnwire/query_channel_range.go +++ b/lnwire/query_channel_range.go @@ -2,6 +2,7 @@ package lnwire import ( "io" + "math" "github.com/btcsuite/btcd/chaincfg/chainhash" ) @@ -75,3 +76,14 @@ func (q *QueryChannelRange) MaxPayloadLength(uint32) uint32 { // 32 + 4 + 4 return 40 } + +// LastBlockHeight returns the last block height covered by the range of a +// QueryChannelRange message. +func (q *QueryChannelRange) LastBlockHeight() uint32 { + // Handle overflows by casting to uint64. + lastBlockHeight := uint64(q.FirstBlockHeight) + uint64(q.NumBlocks) - 1 + if lastBlockHeight > math.MaxUint32 { + return math.MaxUint32 + } + return uint32(lastBlockHeight) +} diff --git a/lnwire/query_short_chan_ids.go b/lnwire/query_short_chan_ids.go index 64521ce35c..76729364ec 100644 --- a/lnwire/query_short_chan_ids.go +++ b/lnwire/query_short_chan_ids.go @@ -35,6 +35,19 @@ const ( maxZlibBufSize = 67413630 ) +// ErrUnsortedSIDs is returned when decoding a QueryShortChannelID request whose +// items were not sorted. +type ErrUnsortedSIDs struct { + prevSID ShortChannelID + curSID ShortChannelID +} + +// Error returns a human-readable description of the error. +func (e ErrUnsortedSIDs) Error() string { + return fmt.Sprintf("current sid: %v isn't greater than last sid: %v", + e.curSID, e.prevSID) +} + // zlibDecodeMtx is a package level mutex that we'll use in order to ensure // that we'll only attempt a single zlib decoding instance at a time. This // allows us to also further bound our memory usage. @@ -67,6 +80,12 @@ type QueryShortChanIDs struct { // ShortChanIDs is a slice of decoded short channel ID's. ShortChanIDs []ShortChannelID + + // noSort indicates whether or not to sort the short channel ids before + // writing them out. + // + // NOTE: This should only be used during testing. + noSort bool } // NewQueryShortChanIDs creates a new QueryShortChanIDs message. @@ -113,7 +132,7 @@ func decodeShortChanIDs(r io.Reader) (ShortChanIDEncoding, []ShortChannelID, err } if numBytesResp == 0 { - return 0, nil, fmt.Errorf("No encoding type specified") + return 0, nil, nil } queryBody := make([]byte, numBytesResp) @@ -158,11 +177,18 @@ func decodeShortChanIDs(r io.Reader) (ShortChanIDEncoding, []ShortChannelID, err // ID's to conclude our parsing. shortChanIDs := make([]ShortChannelID, numShortChanIDs) bodyReader := bytes.NewReader(queryBody) + var lastChanID ShortChannelID for i := 0; i < numShortChanIDs; i++ { if err := ReadElements(bodyReader, &shortChanIDs[i]); err != nil { return 0, nil, fmt.Errorf("unable to parse "+ "short chan ID: %v", err) } + + cid := shortChanIDs[i] + if cid.ToUint64() <= lastChanID.ToUint64() { + return 0, nil, ErrUnsortedSIDs{lastChanID, cid} + } + lastChanID = cid } return encodingType, shortChanIDs, nil @@ -177,6 +203,13 @@ func decodeShortChanIDs(r io.Reader) (ShortChanIDEncoding, []ShortChannelID, err zlibDecodeMtx.Lock() defer zlibDecodeMtx.Unlock() + // At this point, if there's no body remaining, then only the encoding + // type was specified, meaning that there're no further bytes to be + // parsed. + if len(queryBody) == 0 { + return encodingType, nil, nil + } + // Before we start to decode, we'll create a limit reader over // the current reader. This will ensure that we can control how // much memory we're allocating during the decoding process. @@ -224,9 +257,7 @@ func decodeShortChanIDs(r io.Reader) (ShortChanIDEncoding, []ShortChannelID, err // within the encoding, and if violated can aide us in // detecting malicious payloads. if cid.ToUint64() <= lastChanID.ToUint64() { - return 0, nil, fmt.Errorf("current sid of %v "+ - "isn't greater than last sid of %v", cid, - lastChanID) + return 0, nil, ErrUnsortedSIDs{lastChanID, cid} } lastChanID = cid @@ -253,20 +284,23 @@ func (q *QueryShortChanIDs) Encode(w io.Writer, pver uint32) error { // Base on our encoding type, we'll write out the set of short channel // ID's. - return encodeShortChanIDs(w, q.EncodingType, q.ShortChanIDs) + return encodeShortChanIDs(w, q.EncodingType, q.ShortChanIDs, q.noSort) } // encodeShortChanIDs encodes the passed short channel ID's into the passed // io.Writer, respecting the specified encoding type. func encodeShortChanIDs(w io.Writer, encodingType ShortChanIDEncoding, - shortChanIDs []ShortChannelID) error { + shortChanIDs []ShortChannelID, noSort bool) error { // For both of the current encoding types, the channel ID's are to be - // sorted in place, so we'll do that now. - sort.Slice(shortChanIDs, func(i, j int) bool { - return shortChanIDs[i].ToUint64() < - shortChanIDs[j].ToUint64() - }) + // sorted in place, so we'll do that now. The sorting is applied unless + // we were specifically requested not to for testing purposes. + if !noSort { + sort.Slice(shortChanIDs, func(i, j int) bool { + return shortChanIDs[i].ToUint64() < + shortChanIDs[j].ToUint64() + }) + } switch encodingType { @@ -311,27 +345,43 @@ func encodeShortChanIDs(w io.Writer, encodingType ShortChanIDEncoding, var buf bytes.Buffer zlibWriter := zlib.NewWriter(&buf) - // Next, we'll write out all the channel ID's directly into the - // zlib writer, which will do compressing on the fly. - for _, chanID := range shortChanIDs { - err := WriteElements(zlibWriter, chanID) - if err != nil { - return fmt.Errorf("unable to write short chan "+ - "ID: %v", err) + // If we don't have anything at all to write, then we'll write + // an empty payload so we don't include things like the zlib + // header when the remote party is expecting no actual short + // channel IDs. + var compressedPayload []byte + if len(shortChanIDs) > 0 { + // Next, we'll write out all the channel ID's directly + // into the zlib writer, which will do compressing on + // the fly. + for _, chanID := range shortChanIDs { + err := WriteElements(zlibWriter, chanID) + if err != nil { + return fmt.Errorf("unable to write short chan "+ + "ID: %v", err) + } + } + + // Now that we've written all the elements, we'll + // ensure the compressed stream is written to the + // underlying buffer. + if err := zlibWriter.Close(); err != nil { + return fmt.Errorf("unable to finalize "+ + "compression: %v", err) } - } - // Now that we've written all the elements, we'll ensure the - // compressed stream is written to the underlying buffer. - if err := zlibWriter.Close(); err != nil { - return fmt.Errorf("unable to finalize "+ - "compression: %v", err) + compressedPayload = buf.Bytes() } // Now that we have all the items compressed, we can compute // what the total payload size will be. We add one to account // for the byte to encode the type. - compressedPayload := buf.Bytes() + // + // If we don't have any actual bytes to write, then we'll end + // up emitting one byte for the length, followed by the + // encoding type, and nothing more. The spec isn't 100% clear + // in this area, but we do this as this is what most of the + // other implementations do. numBytesBody := len(compressedPayload) + 1 // Finally, we can write out the number of bytes, the diff --git a/lnwire/query_short_chan_ids_test.go b/lnwire/query_short_chan_ids_test.go new file mode 100644 index 0000000000..7d0538f53a --- /dev/null +++ b/lnwire/query_short_chan_ids_test.go @@ -0,0 +1,75 @@ +package lnwire + +import ( + "bytes" + "testing" +) + +type unsortedSidTest struct { + name string + encType ShortChanIDEncoding + sids []ShortChannelID +} + +var ( + unsortedSids = []ShortChannelID{ + NewShortChanIDFromInt(4), + NewShortChanIDFromInt(3), + } + + duplicateSids = []ShortChannelID{ + NewShortChanIDFromInt(3), + NewShortChanIDFromInt(3), + } + + unsortedSidTests = []unsortedSidTest{ + { + name: "plain unsorted", + encType: EncodingSortedPlain, + sids: unsortedSids, + }, + { + name: "plain duplicate", + encType: EncodingSortedPlain, + sids: duplicateSids, + }, + { + name: "zlib unsorted", + encType: EncodingSortedZlib, + sids: unsortedSids, + }, + { + name: "zlib duplicate", + encType: EncodingSortedZlib, + sids: duplicateSids, + }, + } +) + +// TestQueryShortChanIDsUnsorted tests that decoding a QueryShortChanID request +// that contains duplicate or unsorted ids returns an ErrUnsortedSIDs failure. +func TestQueryShortChanIDsUnsorted(t *testing.T) { + for _, test := range unsortedSidTests { + test := test + t.Run(test.name, func(t *testing.T) { + req := &QueryShortChanIDs{ + EncodingType: test.encType, + ShortChanIDs: test.sids, + noSort: true, + } + + var b bytes.Buffer + err := req.Encode(&b, 0) + if err != nil { + t.Fatalf("unable to encode req: %v", err) + } + + var req2 QueryShortChanIDs + err = req2.Decode(bytes.NewReader(b.Bytes()), 0) + if _, ok := err.(ErrUnsortedSIDs); !ok { + t.Fatalf("expected ErrUnsortedSIDs, got: %T", + err) + } + }) + } +} diff --git a/lnwire/reply_channel_range.go b/lnwire/reply_channel_range.go index 5765191d55..430606025c 100644 --- a/lnwire/reply_channel_range.go +++ b/lnwire/reply_channel_range.go @@ -21,6 +21,12 @@ type ReplyChannelRange struct { // ShortChanIDs is a slice of decoded short channel ID's. ShortChanIDs []ShortChannelID + + // noSort indicates whether or not to sort the short channel ids before + // writing them out. + // + // NOTE: This should only be used for testing. + noSort bool } // NewReplyChannelRange creates a new empty ReplyChannelRange message. @@ -64,7 +70,7 @@ func (c *ReplyChannelRange) Encode(w io.Writer, pver uint32) error { return err } - return encodeShortChanIDs(w, c.EncodingType, c.ShortChanIDs) + return encodeShortChanIDs(w, c.EncodingType, c.ShortChanIDs, c.noSort) } // MsgType returns the integer uniquely identifying this message type on the diff --git a/lnwire/reply_channel_range_test.go b/lnwire/reply_channel_range_test.go new file mode 100644 index 0000000000..d2c8df68c6 --- /dev/null +++ b/lnwire/reply_channel_range_test.go @@ -0,0 +1,107 @@ +package lnwire + +import ( + "bytes" + "encoding/hex" + "reflect" + "testing" + + "github.com/davecgh/go-spew/spew" +) + +// TestReplyChannelRangeUnsorted tests that decoding a ReplyChannelRange request +// that contains duplicate or unsorted ids returns an ErrUnsortedSIDs failure. +func TestReplyChannelRangeUnsorted(t *testing.T) { + for _, test := range unsortedSidTests { + test := test + t.Run(test.name, func(t *testing.T) { + req := &ReplyChannelRange{ + EncodingType: test.encType, + ShortChanIDs: test.sids, + noSort: true, + } + + var b bytes.Buffer + err := req.Encode(&b, 0) + if err != nil { + t.Fatalf("unable to encode req: %v", err) + } + + var req2 ReplyChannelRange + err = req2.Decode(bytes.NewReader(b.Bytes()), 0) + if _, ok := err.(ErrUnsortedSIDs); !ok { + t.Fatalf("expected ErrUnsortedSIDs, got: %T", + err) + } + }) + } +} + +// TestReplyChannelRangeEmpty tests encoding and decoding a ReplyChannelRange +// that doesn't contain any channel results. +func TestReplyChannelRangeEmpty(t *testing.T) { + t.Parallel() + + emptyChannelsTests := []struct { + name string + encType ShortChanIDEncoding + encodedHex string + }{ + { + name: "empty plain encoding", + encType: EncodingSortedPlain, + encodedHex: "000000000000000000000000000000000000000" + + "00000000000000000000000000000000100000002" + + "01000100", + }, + { + name: "empty zlib encoding", + encType: EncodingSortedZlib, + encodedHex: "00000000000000000000000000000000000000" + + "0000000000000000000000000000000001000000" + + "0201000101", + }, + } + + for _, test := range emptyChannelsTests { + test := test + t.Run(test.name, func(t *testing.T) { + req := ReplyChannelRange{ + QueryChannelRange: QueryChannelRange{ + FirstBlockHeight: 1, + NumBlocks: 2, + }, + Complete: 1, + EncodingType: test.encType, + ShortChanIDs: nil, + } + + // First decode the hex string in the test case into a + // new ReplyChannelRange message. It should be + // identical to the one created above. + var req2 ReplyChannelRange + b, _ := hex.DecodeString(test.encodedHex) + err := req2.Decode(bytes.NewReader(b), 0) + if err != nil { + t.Fatalf("unable to decode req: %v", err) + } + if !reflect.DeepEqual(req, req2) { + t.Fatalf("requests don't match: expected %v got %v", + spew.Sdump(req), spew.Sdump(req2)) + } + + // Next, we go in the reverse direction: encode the + // request created above, and assert that it matches + // the raw byte encoding. + var b2 bytes.Buffer + err = req.Encode(&b2, 0) + if err != nil { + t.Fatalf("unable to encode req: %v", err) + } + if !bytes.Equal(b, b2.Bytes()) { + t.Fatalf("encoded requests don't match: expected %x got %x", + b, b2.Bytes()) + } + }) + } +} diff --git a/lnwire/shutdown.go b/lnwire/shutdown.go index 3dabc7bc6e..94d10a9080 100644 --- a/lnwire/shutdown.go +++ b/lnwire/shutdown.go @@ -22,6 +22,15 @@ type Shutdown struct { // p2wpkh. type DeliveryAddress []byte +// deliveryAddressMaxSize is the maximum expected size in bytes of a +// DeliveryAddress based on the types of scripts we know. +// Following are the known scripts and their sizes in bytes. +// - pay to witness script hash: 34 +// - pay to pubkey hash: 25 +// - pay to script hash: 22 +// - pay to witness pubkey hash: 22. +const deliveryAddressMaxSize = 34 + // NewShutdown creates a new Shutdown message. func NewShutdown(cid ChannelID, addr DeliveryAddress) *Shutdown { return &Shutdown{ @@ -71,11 +80,8 @@ func (s *Shutdown) MaxPayloadLength(pver uint32) uint32 { // Len - 2 bytes length += 2 - // ScriptPubKey - 34 bytes for pay to witness script hash - length += 34 - - // NOTE: pay to pubkey hash is 25 bytes, pay to script hash is 22 - // bytes, and pay to witness pubkey hash is 22 bytes in length. + // ScriptPubKey - maximum delivery address size. + length += deliveryAddressMaxSize return length } diff --git a/lnwire/signature.go b/lnwire/signature.go index 012d5a9131..13a2f25c3a 100644 --- a/lnwire/signature.go +++ b/lnwire/signature.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/btcsuite/btcd/btcec" + "github.com/lightningnetwork/lnd/input" ) // Sig is a fixed-sized ECDSA signature. Unlike Bitcoin, we use fixed sized @@ -64,7 +65,7 @@ func NewSigFromRawSignature(sig []byte) (Sig, error) { // NewSigFromSignature creates a new signature as used on the wire, from an // existing btcec.Signature. -func NewSigFromSignature(e *btcec.Signature) (Sig, error) { +func NewSigFromSignature(e input.Signature) (Sig, error) { if e == nil { return Sig{}, fmt.Errorf("cannot decode empty signature") } diff --git a/log.go b/log.go index a4961e2c76..d2c6292e79 100644 --- a/log.go +++ b/log.go @@ -2,20 +2,16 @@ package lnd import ( "context" - "fmt" - "io" - "os" - "path/filepath" "github.com/btcsuite/btcd/connmgr" "github.com/btcsuite/btclog" - "github.com/jrick/logrotate/rotator" "github.com/lightninglabs/neutrino" sphinx "github.com/lightningnetwork/lightning-onion" "github.com/lightningnetwork/lnd/autopilot" "github.com/lightningnetwork/lnd/build" "github.com/lightningnetwork/lnd/chainntnfs" "github.com/lightningnetwork/lnd/chanbackup" + "github.com/lightningnetwork/lnd/chanfitness" "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/channelnotifier" "github.com/lightningnetwork/lnd/contractcourt" @@ -27,13 +23,16 @@ import ( "github.com/lightningnetwork/lnd/lnrpc/invoicesrpc" "github.com/lightningnetwork/lnd/lnrpc/routerrpc" "github.com/lightningnetwork/lnd/lnrpc/signrpc" + "github.com/lightningnetwork/lnd/lnrpc/verrpc" "github.com/lightningnetwork/lnd/lnrpc/walletrpc" "github.com/lightningnetwork/lnd/lnrpc/wtclientrpc" "github.com/lightningnetwork/lnd/lnwallet" + "github.com/lightningnetwork/lnd/lnwallet/chanfunding" "github.com/lightningnetwork/lnd/monitoring" "github.com/lightningnetwork/lnd/netann" "github.com/lightningnetwork/lnd/peernotifier" "github.com/lightningnetwork/lnd/routing" + "github.com/lightningnetwork/lnd/routing/localchans" "github.com/lightningnetwork/lnd/signal" "github.com/lightningnetwork/lnd/sweep" "github.com/lightningnetwork/lnd/watchtower" @@ -48,177 +47,83 @@ import ( // // Loggers can not be used before the log rotator has been initialized with a // log file. This must be performed early during application startup by -// calling initLogRotator. +// calling logWriter.InitLogRotator. var ( - logWriter = &build.LogWriter{} - - // backendLog is the logging backend used to create all subsystem - // loggers. The backend must not be used before the log rotator has - // been initialized, or data races and/or nil pointer dereferences will - // occur. - backendLog = btclog.NewBackend(logWriter) - - // logRotator is one of the logging outputs. It should be closed on - // application shutdown. - logRotator *rotator.Rotator - - ltndLog = build.NewSubLogger("LTND", backendLog.Logger) - lnwlLog = build.NewSubLogger("LNWL", backendLog.Logger) - peerLog = build.NewSubLogger("PEER", backendLog.Logger) - discLog = build.NewSubLogger("DISC", backendLog.Logger) - rpcsLog = build.NewSubLogger("RPCS", backendLog.Logger) - srvrLog = build.NewSubLogger("SRVR", backendLog.Logger) - ntfnLog = build.NewSubLogger("NTFN", backendLog.Logger) - chdbLog = build.NewSubLogger("CHDB", backendLog.Logger) - fndgLog = build.NewSubLogger("FNDG", backendLog.Logger) - hswcLog = build.NewSubLogger("HSWC", backendLog.Logger) - utxnLog = build.NewSubLogger("UTXN", backendLog.Logger) - brarLog = build.NewSubLogger("BRAR", backendLog.Logger) - cmgrLog = build.NewSubLogger("CMGR", backendLog.Logger) - crtrLog = build.NewSubLogger("CRTR", backendLog.Logger) - btcnLog = build.NewSubLogger("GRSN", backendLog.Logger) - atplLog = build.NewSubLogger("ATPL", backendLog.Logger) - cnctLog = build.NewSubLogger("CNCT", backendLog.Logger) - sphxLog = build.NewSubLogger("SPHX", backendLog.Logger) - swprLog = build.NewSubLogger("SWPR", backendLog.Logger) - sgnrLog = build.NewSubLogger("SGNR", backendLog.Logger) - wlktLog = build.NewSubLogger("WLKT", backendLog.Logger) - arpcLog = build.NewSubLogger("ARPC", backendLog.Logger) - invcLog = build.NewSubLogger("INVC", backendLog.Logger) - nannLog = build.NewSubLogger("NANN", backendLog.Logger) - wtwrLog = build.NewSubLogger("WTWR", backendLog.Logger) - ntfrLog = build.NewSubLogger("NTFR", backendLog.Logger) - irpcLog = build.NewSubLogger("IRPC", backendLog.Logger) - chnfLog = build.NewSubLogger("CHNF", backendLog.Logger) - chbuLog = build.NewSubLogger("CHBU", backendLog.Logger) - promLog = build.NewSubLogger("PROM", backendLog.Logger) - wtclLog = build.NewSubLogger("WTCL", backendLog.Logger) - prnfLog = build.NewSubLogger("PRNF", backendLog.Logger) + logWriter = build.NewRotatingLogWriter() + + // Loggers that need to be accessible from the lnd package can be placed + // here. Loggers that are only used in sub modules can be added directly + // by using the addSubLogger method. + ltndLog = build.NewSubLogger("LTND", logWriter.GenSubLogger) + peerLog = build.NewSubLogger("PEER", logWriter.GenSubLogger) + rpcsLog = build.NewSubLogger("RPCS", logWriter.GenSubLogger) + srvrLog = build.NewSubLogger("SRVR", logWriter.GenSubLogger) + fndgLog = build.NewSubLogger("FNDG", logWriter.GenSubLogger) + utxnLog = build.NewSubLogger("UTXN", logWriter.GenSubLogger) + brarLog = build.NewSubLogger("BRAR", logWriter.GenSubLogger) + atplLog = build.NewSubLogger("ATPL", logWriter.GenSubLogger) ) // Initialize package-global logger variables. func init() { - lnwallet.UseLogger(lnwlLog) - discovery.UseLogger(discLog) - chainntnfs.UseLogger(ntfnLog) - channeldb.UseLogger(chdbLog) - htlcswitch.UseLogger(hswcLog) - connmgr.UseLogger(cmgrLog) - routing.UseLogger(crtrLog) - neutrino.UseLogger(btcnLog) - autopilot.UseLogger(atplLog) - contractcourt.UseLogger(cnctLog) - sphinx.UseLogger(sphxLog) - signal.UseLogger(ltndLog) - sweep.UseLogger(swprLog) - signrpc.UseLogger(sgnrLog) - walletrpc.UseLogger(wlktLog) - autopilotrpc.UseLogger(arpcLog) - invoices.UseLogger(invcLog) - netann.UseLogger(nannLog) - watchtower.UseLogger(wtwrLog) - chainrpc.UseLogger(ntfrLog) - invoicesrpc.UseLogger(irpcLog) - channelnotifier.UseLogger(chnfLog) - chanbackup.UseLogger(chbuLog) - monitoring.UseLogger(promLog) - wtclient.UseLogger(wtclLog) - peernotifier.UseLogger(prnfLog) - + setSubLogger("LTND", ltndLog, signal.UseLogger) + setSubLogger("ATPL", atplLog, autopilot.UseLogger) + setSubLogger("PEER", peerLog) + setSubLogger("RPCS", rpcsLog) + setSubLogger("SRVR", srvrLog) + setSubLogger("FNDG", fndgLog) + setSubLogger("UTXN", utxnLog) + setSubLogger("BRAR", brarLog) + + addSubLogger("LNWL", lnwallet.UseLogger) + addSubLogger("DISC", discovery.UseLogger) + addSubLogger("NTFN", chainntnfs.UseLogger) + addSubLogger("CHDB", channeldb.UseLogger) + addSubLogger("HSWC", htlcswitch.UseLogger) + addSubLogger("CMGR", connmgr.UseLogger) + addSubLogger("GRSN", neutrino.UseLogger) + addSubLogger("CNCT", contractcourt.UseLogger) + addSubLogger("SPHX", sphinx.UseLogger) + addSubLogger("SWPR", sweep.UseLogger) + addSubLogger("SGNR", signrpc.UseLogger) + addSubLogger("WLKT", walletrpc.UseLogger) + addSubLogger("ARPC", autopilotrpc.UseLogger) + addSubLogger("INVC", invoices.UseLogger) + addSubLogger("NANN", netann.UseLogger) + addSubLogger("WTWR", watchtower.UseLogger) + addSubLogger("NTFR", chainrpc.UseLogger) + addSubLogger("IRPC", invoicesrpc.UseLogger) + addSubLogger("CHNF", channelnotifier.UseLogger) + addSubLogger("CHBU", chanbackup.UseLogger) + addSubLogger("PROM", monitoring.UseLogger) + addSubLogger("WTCL", wtclient.UseLogger) + addSubLogger("PRNF", peernotifier.UseLogger) + addSubLogger("CHFD", chanfunding.UseLogger) + + addSubLogger(routing.Subsystem, routing.UseLogger, localchans.UseLogger) addSubLogger(routerrpc.Subsystem, routerrpc.UseLogger) addSubLogger(wtclientrpc.Subsystem, wtclientrpc.UseLogger) + addSubLogger(chanfitness.Subsystem, chanfitness.UseLogger) + addSubLogger(verrpc.Subsystem, verrpc.UseLogger) } -// addSubLogger is a helper method to conveniently register the logger of a sub -// system. -func addSubLogger(subsystem string, useLogger func(btclog.Logger)) { - logger := build.NewSubLogger(subsystem, backendLog.Logger) - useLogger(logger) - subsystemLoggers[subsystem] = logger +// addSubLogger is a helper method to conveniently create and register the +// logger of one or more sub systems. +func addSubLogger(subsystem string, useLoggers ...func(btclog.Logger)) { + // Create and register just a single logger to prevent them from + // overwriting each other internally. + logger := build.NewSubLogger(subsystem, logWriter.GenSubLogger) + setSubLogger(subsystem, logger, useLoggers...) } -// subsystemLoggers maps each subsystem identifier to its associated logger. -var subsystemLoggers = map[string]btclog.Logger{ - "LTND": ltndLog, - "LNWL": lnwlLog, - "PEER": peerLog, - "DISC": discLog, - "RPCS": rpcsLog, - "SRVR": srvrLog, - "NTFN": ntfnLog, - "CHDB": chdbLog, - "FNDG": fndgLog, - "HSWC": hswcLog, - "UTXN": utxnLog, - "BRAR": brarLog, - "CMGR": cmgrLog, - "CRTR": crtrLog, - "GRSN": btcnLog, - "ATPL": atplLog, - "CNCT": cnctLog, - "SPHX": sphxLog, - "SWPR": swprLog, - "SGNR": sgnrLog, - "WLKT": wlktLog, - "ARPC": arpcLog, - "INVC": invcLog, - "NANN": nannLog, - "WTWR": wtwrLog, - "NTFR": ntfrLog, - "IRPC": irpcLog, - "CHNF": chnfLog, - "CHBU": chbuLog, - "PROM": promLog, - "WTCL": wtclLog, - "PRNF": prnfLog, -} - -// initLogRotator initializes the logging rotator to write logs to logFile and -// create roll files in the same directory. It must be called before the -// package-global log rotator variables are used. -func initLogRotator(logFile string, MaxLogFileSize int, MaxLogFiles int) { - logDir, _ := filepath.Split(logFile) - err := os.MkdirAll(logDir, 0700) - if err != nil { - fmt.Fprintf(os.Stderr, "failed to create log directory: %v\n", err) - os.Exit(1) - } - r, err := rotator.New(logFile, int64(MaxLogFileSize*1024), false, MaxLogFiles) - if err != nil { - fmt.Fprintf(os.Stderr, "failed to create file rotator: %v\n", err) - os.Exit(1) - } - - pr, pw := io.Pipe() - go r.Run(pr) - - logWriter.RotatorPipe = pw - logRotator = r -} - -// setLogLevel sets the logging level for provided subsystem. Invalid -// subsystems are ignored. Uninitialized subsystems are dynamically created as -// needed. -func setLogLevel(subsystemID string, logLevel string) { - // Ignore invalid subsystems. - logger, ok := subsystemLoggers[subsystemID] - if !ok { - return - } - - // Defaults to info if the log level is invalid. - level, _ := btclog.LevelFromString(logLevel) - logger.SetLevel(level) -} +// setSubLogger is a helper method to conveniently register the logger of a sub +// system. +func setSubLogger(subsystem string, logger btclog.Logger, + useLoggers ...func(btclog.Logger)) { -// setLogLevels sets the log level for all subsystem loggers to the passed -// level. It also dynamically creates the subsystem loggers as needed, so it -// can be used to initialize the logging system. -func setLogLevels(logLevel string) { - // Configure all sub-systems with the new logging level. Dynamically - // create loggers as needed. - for subsystemID := range subsystemLoggers { - setLogLevel(subsystemID, logLevel) + logWriter.RegisterSubLogger(subsystem, logger) + for _, useLogger := range useLoggers { + useLogger(logger) } } diff --git a/macaroons/README.md b/macaroons/README.md index 6de7d8c154..67ef9ab48e 100644 --- a/macaroons/README.md +++ b/macaroons/README.md @@ -87,3 +87,30 @@ be found in `constraints.go`: * `IPLockConstraint`: Locks the macaroon to a specific IP address. This constraint can be set by adding the parameter `--macaroonip a.b.c.d` to the `lncli` command. + +## Bakery + +As of lnd `v0.9.0-beta` there is a macaroon bakery available through gRPC and +command line. +Users can create their own macaroons with custom permissions if the provided +default macaroons (`admin`, `invoice` and `readonly`) are not sufficient. + +For example, a macaroon that is only allowed to manage peers would be created +with the following command: + +`lncli bakemacaroon peers:read peers:write` + +A full and up-to-date list of available entity/action pairs can be found by +looking at the `rpcserver.go` in the root folder of the project. + +### Upgrading from v0.8.0-beta or earlier + +Users upgrading from a version prior to `v0.9.0-beta` might get a `permission +denied ` error when trying to use the `lncli bakemacaroon` command. +This is because the bakery requires a new permission (`macaroon/generate`) to +access. +Users can obtain a new `admin.macaroon` that contains this permission by +removing all three default macaroons (`admin.macaroon`, `invoice.macaroon` and +`readonly.macaroon`, **NOT** the `macaroons.db`!) from their +`data/chain///` directory inside the lnd data directory and +restarting lnd. diff --git a/macaroons/security.go b/macaroons/security.go new file mode 100644 index 0000000000..814b0d256f --- /dev/null +++ b/macaroons/security.go @@ -0,0 +1,13 @@ +// +build !rpctest + +package macaroons + +import "github.com/btcsuite/btcwallet/snacl" + +var ( + // Below are the default scrypt parameters that are used when creating + // the encryption key for the macaroon database with snacl.NewSecretKey. + scryptN = snacl.DefaultN + scryptR = snacl.DefaultR + scryptP = snacl.DefaultP +) diff --git a/macaroons/security_rpctest.go b/macaroons/security_rpctest.go new file mode 100644 index 0000000000..d49819e0f2 --- /dev/null +++ b/macaroons/security_rpctest.go @@ -0,0 +1,14 @@ +// +build rpctest + +package macaroons + +import "github.com/btcsuite/btcwallet/waddrmgr" + +var ( + // Below are the reduced scrypt parameters that are used when creating + // the encryption key for the macaroon database with snacl.NewSecretKey. + // We use very low values for our itest/rpctest to speed things up. + scryptN = waddrmgr.FastScryptOptions.N + scryptR = waddrmgr.FastScryptOptions.R + scryptP = waddrmgr.FastScryptOptions.P +) diff --git a/macaroons/security_test.go b/macaroons/security_test.go new file mode 100644 index 0000000000..48c18e313a --- /dev/null +++ b/macaroons/security_test.go @@ -0,0 +1,12 @@ +package macaroons + +import "github.com/btcsuite/btcwallet/waddrmgr" + +func init() { + // Below are the reduced scrypt parameters that are used when creating + // the encryption key for the macaroon database with snacl.NewSecretKey. + // We use very low values for our itest/rpctest to speed things up. + scryptN = waddrmgr.FastScryptOptions.N + scryptR = waddrmgr.FastScryptOptions.R + scryptP = waddrmgr.FastScryptOptions.P +} diff --git a/macaroons/service.go b/macaroons/service.go index 3dddea6be4..825c643530 100644 --- a/macaroons/service.go +++ b/macaroons/service.go @@ -7,7 +7,7 @@ import ( "os" "path" - "github.com/coreos/bbolt" + "github.com/lightningnetwork/lnd/channeldb/kvdb" "google.golang.org/grpc" "google.golang.org/grpc/metadata" @@ -48,8 +48,8 @@ func NewService(dir string, checks ...Checker) (*Service, error) { // Open the database that we'll use to store the primary macaroon key, // and all generated macaroons+caveats. - macaroonDB, err := bbolt.Open( - path.Join(dir, DBFilename), 0600, bbolt.DefaultOptions, + macaroonDB, err := kvdb.Create( + kvdb.BoltBackendName, path.Join(dir, DBFilename), true, ) if err != nil { return nil, err diff --git a/macaroons/service_test.go b/macaroons/service_test.go index 3cc01383cd..dd90ad5f8c 100644 --- a/macaroons/service_test.go +++ b/macaroons/service_test.go @@ -8,7 +8,7 @@ import ( "path" "testing" - "github.com/coreos/bbolt" + "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/macaroons" "google.golang.org/grpc/metadata" "gopkg.in/macaroon-bakery.v2/bakery" @@ -33,8 +33,9 @@ func setupTestRootKeyStorage(t *testing.T) string { if err != nil { t.Fatalf("Error creating temp dir: %v", err) } - db, err := bbolt.Open(path.Join(tempDir, "macaroons.db"), 0600, - bbolt.DefaultOptions) + db, err := kvdb.Create( + kvdb.BoltBackendName, path.Join(tempDir, "macaroons.db"), true, + ) if err != nil { t.Fatalf("Error opening store DB: %v", err) } @@ -61,18 +62,19 @@ func TestNewService(t *testing.T) { // Second, create the new service instance, unlock it and pass in a // checker that we expect it to add to the bakery. service, err := macaroons.NewService(tempDir, macaroons.IPLockChecker) - defer service.Close() if err != nil { t.Fatalf("Error creating new service: %v", err) } + defer service.Close() err = service.CreateUnlock(&defaultPw) if err != nil { t.Fatalf("Error unlocking root key storage: %v", err) } // Third, check if the created service can bake macaroons. - macaroon, err := service.Oven.NewMacaroon(nil, bakery.LatestVersion, - nil, testOperation) + macaroon, err := service.Oven.NewMacaroon( + context.TODO(), bakery.LatestVersion, nil, testOperation, + ) if err != nil { t.Fatalf("Error creating macaroon from service: %v", err) } @@ -104,18 +106,20 @@ func TestValidateMacaroon(t *testing.T) { tempDir := setupTestRootKeyStorage(t) defer os.RemoveAll(tempDir) service, err := macaroons.NewService(tempDir, macaroons.IPLockChecker) - defer service.Close() if err != nil { t.Fatalf("Error creating new service: %v", err) } + defer service.Close() + err = service.CreateUnlock(&defaultPw) if err != nil { t.Fatalf("Error unlocking root key storage: %v", err) } // Then, create a new macaroon that we can serialize. - macaroon, err := service.Oven.NewMacaroon(nil, bakery.LatestVersion, - nil, testOperation) + macaroon, err := service.Oven.NewMacaroon( + context.TODO(), bakery.LatestVersion, nil, testOperation, + ) if err != nil { t.Fatalf("Error creating macaroon from service: %v", err) } diff --git a/macaroons/store.go b/macaroons/store.go index 7a38dd92e5..affb0c5067 100644 --- a/macaroons/store.go +++ b/macaroons/store.go @@ -5,8 +5,9 @@ import ( "crypto/rand" "fmt" "io" + "sync" - "github.com/coreos/bbolt" + "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/btcsuite/btcwallet/snacl" ) @@ -45,17 +46,18 @@ var ( // RootKeyStorage implements the bakery.RootKeyStorage interface. type RootKeyStorage struct { - *bbolt.DB + kvdb.Backend - encKey *snacl.SecretKey + encKeyMtx sync.RWMutex + encKey *snacl.SecretKey } // NewRootKeyStorage creates a RootKeyStorage instance. // TODO(aakselrod): Add support for encryption of data with passphrase. -func NewRootKeyStorage(db *bbolt.DB) (*RootKeyStorage, error) { +func NewRootKeyStorage(db kvdb.Backend) (*RootKeyStorage, error) { // If the store's bucket doesn't exist, create it. - err := db.Update(func(tx *bbolt.Tx) error { - _, err := tx.CreateBucketIfNotExists(rootKeyBucketName) + err := kvdb.Update(db, func(tx kvdb.RwTx) error { + _, err := tx.CreateTopLevelBucket(rootKeyBucketName) return err }) if err != nil { @@ -63,12 +65,15 @@ func NewRootKeyStorage(db *bbolt.DB) (*RootKeyStorage, error) { } // Return the DB wrapped in a RootKeyStorage object. - return &RootKeyStorage{db, nil}, nil + return &RootKeyStorage{Backend: db, encKey: nil}, nil } // CreateUnlock sets an encryption key if one is not already set, otherwise it // checks if the password is correct for the stored encryption key. func (r *RootKeyStorage) CreateUnlock(password *[]byte) error { + r.encKeyMtx.Lock() + defer r.encKeyMtx.Unlock() + // Check if we've already unlocked the store; return an error if so. if r.encKey != nil { return ErrAlreadyUnlocked @@ -79,8 +84,8 @@ func (r *RootKeyStorage) CreateUnlock(password *[]byte) error { return ErrPasswordRequired } - return r.Update(func(tx *bbolt.Tx) error { - bucket := tx.Bucket(rootKeyBucketName) + return kvdb.Update(r, func(tx kvdb.RwTx) error { + bucket := tx.ReadWriteBucket(rootKeyBucketName) dbKey := bucket.Get(encryptedKeyID) if len(dbKey) > 0 { // We've already stored a key, so try to unlock with @@ -101,8 +106,9 @@ func (r *RootKeyStorage) CreateUnlock(password *[]byte) error { } // We haven't yet stored a key, so create a new one. - encKey, err := snacl.NewSecretKey(password, snacl.DefaultN, - snacl.DefaultR, snacl.DefaultP) + encKey, err := snacl.NewSecretKey( + password, scryptN, scryptR, scryptP, + ) if err != nil { return err } @@ -119,12 +125,15 @@ func (r *RootKeyStorage) CreateUnlock(password *[]byte) error { // Get implements the Get method for the bakery.RootKeyStorage interface. func (r *RootKeyStorage) Get(_ context.Context, id []byte) ([]byte, error) { + r.encKeyMtx.RLock() + defer r.encKeyMtx.RUnlock() + if r.encKey == nil { return nil, ErrStoreLocked } var rootKey []byte - err := r.View(func(tx *bbolt.Tx) error { - dbKey := tx.Bucket(rootKeyBucketName).Get(id) + err := kvdb.View(r, func(tx kvdb.ReadTx) error { + dbKey := tx.ReadBucket(rootKeyBucketName).Get(id) if len(dbKey) == 0 { return fmt.Errorf("root key with id %s doesn't exist", string(id)) @@ -150,13 +159,16 @@ func (r *RootKeyStorage) Get(_ context.Context, id []byte) ([]byte, error) { // interface. // TODO(aakselrod): Add support for key rotation. func (r *RootKeyStorage) RootKey(_ context.Context) ([]byte, []byte, error) { + r.encKeyMtx.RLock() + defer r.encKeyMtx.RUnlock() + if r.encKey == nil { return nil, nil, ErrStoreLocked } var rootKey []byte id := defaultRootKeyID - err := r.Update(func(tx *bbolt.Tx) error { - ns := tx.Bucket(rootKeyBucketName) + err := kvdb.Update(r, func(tx kvdb.RwTx) error { + ns := tx.ReadWriteBucket(rootKeyBucketName) dbKey := ns.Get(id) // If there's a root key stored in the bucket, decrypt it and @@ -195,8 +207,11 @@ func (r *RootKeyStorage) RootKey(_ context.Context) ([]byte, []byte, error) { // Close closes the underlying database and zeroes the encryption key stored // in memory. func (r *RootKeyStorage) Close() error { + r.encKeyMtx.Lock() + defer r.encKeyMtx.Unlock() + if r.encKey != nil { r.encKey.Zero() } - return r.DB.Close() + return r.Backend.Close() } diff --git a/macaroons/store_test.go b/macaroons/store_test.go index 4b3188acef..b6898df6e9 100644 --- a/macaroons/store_test.go +++ b/macaroons/store_test.go @@ -2,13 +2,13 @@ package macaroons_test import ( "bytes" + "context" "io/ioutil" "os" "path" "testing" - "github.com/coreos/bbolt" - + "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/macaroons" "github.com/btcsuite/btcwallet/snacl" @@ -21,8 +21,9 @@ func TestStore(t *testing.T) { } defer os.RemoveAll(tempDir) - db, err := bbolt.Open(path.Join(tempDir, "weks.db"), 0600, - bbolt.DefaultOptions) + db, err := kvdb.Create( + kvdb.BoltBackendName, path.Join(tempDir, "weks.db"), true, + ) if err != nil { t.Fatalf("Error opening store DB: %v", err) } @@ -34,12 +35,12 @@ func TestStore(t *testing.T) { } defer store.Close() - key, id, err := store.RootKey(nil) + _, _, err = store.RootKey(context.TODO()) if err != macaroons.ErrStoreLocked { t.Fatalf("Received %v instead of ErrStoreLocked", err) } - key, err = store.Get(nil, nil) + _, err = store.Get(context.TODO(), nil) if err != macaroons.ErrStoreLocked { t.Fatalf("Received %v instead of ErrStoreLocked", err) } @@ -50,13 +51,13 @@ func TestStore(t *testing.T) { t.Fatalf("Error creating store encryption key: %v", err) } - key, id, err = store.RootKey(nil) + key, id, err := store.RootKey(context.TODO()) if err != nil { t.Fatalf("Error getting root key from store: %v", err) } rootID := id - key2, err := store.Get(nil, id) + key2, err := store.Get(context.TODO(), id) if err != nil { t.Fatalf("Error getting key with ID %s: %v", string(id), err) } @@ -72,11 +73,13 @@ func TestStore(t *testing.T) { } store.Close() + // Between here and the re-opening of the store, it's possible to get // a double-close, but that's not such a big deal since the tests will // fail anyway in that case. - db, err = bbolt.Open(path.Join(tempDir, "weks.db"), 0600, - bbolt.DefaultOptions) + db, err = kvdb.Create( + kvdb.BoltBackendName, path.Join(tempDir, "weks.db"), true, + ) if err != nil { t.Fatalf("Error opening store DB: %v", err) } @@ -97,12 +100,12 @@ func TestStore(t *testing.T) { t.Fatalf("Received %v instead of ErrPasswordRequired", err) } - key, id, err = store.RootKey(nil) + _, _, err = store.RootKey(context.TODO()) if err != macaroons.ErrStoreLocked { t.Fatalf("Received %v instead of ErrStoreLocked", err) } - key, err = store.Get(nil, nil) + _, err = store.Get(context.TODO(), nil) if err != macaroons.ErrStoreLocked { t.Fatalf("Received %v instead of ErrStoreLocked", err) } @@ -112,7 +115,7 @@ func TestStore(t *testing.T) { t.Fatalf("Error unlocking root key store: %v", err) } - key, err = store.Get(nil, rootID) + key, err = store.Get(context.TODO(), rootID) if err != nil { t.Fatalf("Error getting key with ID %s: %v", string(rootID), err) @@ -122,7 +125,7 @@ func TestStore(t *testing.T) { key2, key) } - key, id, err = store.RootKey(nil) + key, id, err = store.RootKey(context.TODO()) if err != nil { t.Fatalf("Error getting root key from store: %v", err) } diff --git a/make/release_flags.mk b/make/release_flags.mk new file mode 100644 index 0000000000..7b64d3fd0a --- /dev/null +++ b/make/release_flags.mk @@ -0,0 +1,55 @@ +VERSION_TAG = $(shell date +%Y%m%d)-01 +VERSION_CHECK = @$(call print, "Building master with date version tag") + +BUILD_SYSTEM = darwin-386 \ +darwin-amd64 \ +dragonfly-amd64 \ +freebsd-386 \ +freebsd-amd64 \ +freebsd-arm \ +illumos-amd64 \ +linux-386 \ +linux-amd64 \ +linux-armv6 \ +linux-armv7 \ +linux-arm64 \ +linux-ppc64 \ +linux-ppc64le \ +linux-mips \ +linux-mipsle \ +linux-mips64 \ +linux-mips64le \ +linux-s390x \ +netbsd-386 \ +netbsd-amd64 \ +netbsd-arm \ +netbsd-arm64 \ +openbsd-386 \ +openbsd-amd64 \ +openbsd-arm \ +openbsd-arm64 \ +solaris-amd64 \ +windows-386 \ +windows-amd64 \ +windows-arm + +RELEASE_TAGS = autopilotrpc signrpc walletrpc chainrpc invoicesrpc watchtowerrpc + +# One can either specify a git tag as the version suffix or one is generated +# from the current date. +ifneq ($(tag),) +VERSION_TAG = $(tag) +VERSION_CHECK = ./build/release/release.sh check-tag "$(VERSION_TAG)" +endif + +# By default we will build all systems. But with the 'sys' tag, a specific +# system can be specified. This is useful to release for a subset of +# systems/architectures. +ifneq ($(sys),) +BUILD_SYSTEM = $(sys) +endif + +# Use all build tags by default but allow them to be overwritten. +ifneq ($(tags),) +RELEASE_TAGS = $(tags) +endif diff --git a/make/testing_flags.mk b/make/testing_flags.mk index 13fbf81d02..797354d4cb 100644 --- a/make/testing_flags.mk +++ b/make/testing_flags.mk @@ -55,14 +55,12 @@ UNIT_RACE := $(UNIT) -race endif -# Construct the integration test command with the added build flags. -ITEST_TAGS := $(DEV_TAGS) rpctest chainrpc walletrpc signrpc invoicesrpc autopilotrpc routerrpc watchtowerrpc wtclientrpc - # Default to btcd backend if not set. -ifneq ($(backend),) -ITEST_TAGS += ${backend} -else -ITEST_TAGS += btcd +ifeq ($(backend),) +backend = btcd endif -ITEST := rm lntest/itest/*.log; date; $(GOTEST) ./lntest/itest -tags="$(ITEST_TAGS)" $(TEST_FLAGS) -logoutput -goroutinedump +# Construct the integration test command with the added build flags. +ITEST_TAGS := $(DEV_TAGS) rpctest chainrpc walletrpc signrpc invoicesrpc autopilotrpc watchtowerrpc $(backend) + +ITEST := rm lntest/itest/*.log; date; $(GOTEST) -v ./lntest/itest -tags="$(ITEST_TAGS)" $(TEST_FLAGS) -logoutput -goroutinedump diff --git a/mobile/README.md b/mobile/README.md index 219fe13843..3a2662323c 100644 --- a/mobile/README.md +++ b/mobile/README.md @@ -2,20 +2,22 @@ ### Prerequisites #### protoc -Install the dependencies for genarating protobuf definitions as stated in [lnrpc docs]( -../lnrpc/README.md#generate-protobuf-definitions) +Install the dependencies for generating protobuf definitions as stated in +[lnrpc docs]( ../lnrpc/README.md#generate-protobuf-definitions) #### gomobile -Follow [gomobile](https://github.com/golang/go/wiki/Mobile) in order to intall `gomobile` and dependencies. +Follow [gomobile](https://github.com/golang/go/wiki/Mobile) in order to install +`gomobile` and dependencies. Remember to run `gomobile init` (otherwise the `lnd` build might just hang). -Note that `gomobile` only supports building projects from `GOPATH` at this point. +Note that `gomobile` only supports building projects from `GOPATH` at this +point. #### falafel -Install [`falafel`](https://github.com/halseth/falafel): +Install [`falafel`](https://github.com/lightninglabs/falafel): ``` -go get -u -v github.com/halseth/falafel +go get -u -v github.com/lightninglabs/falafel ``` ### Building `lnd` for iOS @@ -31,24 +33,34 @@ make android `make mobile` will build both iOS and Android libs. ### Libraries -After the build has succeeded, the libraries will be found in `mobile/build/ios/Lndmobile.framework` and `mobile/build/android/Lndmobile.aar`. Reference your platforms' SDK documentation for how to add the library to your project. +After the build has succeeded, the libraries will be found in +`mobile/build/ios/Lndmobile.framework` and +`mobile/build/android/Lndmobile.aar`. Reference your platforms' SDK +documentation for how to add the library to your project. #### Generating proto definitions for your language. -In order to call the methods in the generated library, the serialized proto for the given RPC call must be provided. Similarly, the response will be a serialized proto. +In order to call the methods in the generated library, the serialized proto for +the given RPC call must be provided. Similarly, the response will be a +serialized proto. -In order to generate protobuf definitions for your language of choice, add the proto plugin to the `protoc` invocations found in [`gen_protos.sh`](../lnrpc/gen_protos.sh). For instance to generate protos for Swift, add `--swift_out=.` and run `make rpc`. +In order to generate protobuf definitions for your language of choice, add the +proto plugin to the `protoc` invocations found in +[`gen_protos.sh`](../lnrpc/gen_protos.sh). For instance to generate protos for +Swift, add `--swift_out=.` and run `make rpc`. ### Options -Similar to lnd, subservers can be conditionally compiled with the build by setting the tags argument: +Similar to lnd, subservers can be conditionally compiled with the build by +setting the tags argument: ``` -make ios tags="routerrpc" +make ios ``` -To support subservers that have APIs with name conflicts, pass the "prefix" flag. This will add the subserver name as a prefix to each method name: +To support subservers that have APIs with name conflicts, pass the "prefix" +flag. This will add the subserver name as a prefix to each method name: ``` -make ios tags="routerrpc" prefix=1 +make ios prefix=1 ``` ### API docs diff --git a/mobile/bindings.go b/mobile/bindings.go index 9fb792157f..8d669ca0e3 100644 --- a/mobile/bindings.go +++ b/mobile/bindings.go @@ -16,28 +16,52 @@ import ( // extraArgs can be used to pass command line arguments to lnd that will // override what is found in the config file. Example: // extraArgs = "--bitcoin.testnet --lnddir=\"/tmp/folder name/\" --profile=5050" -func Start(extraArgs string, callback Callback) { +// +// The unlockerReady callback is called when the WalletUnlocker service is +// ready, and rpcReady is called after the wallet has been unlocked and lnd is +// ready to accept RPC calls. +// +// NOTE: On mobile platforms the '--lnddir` argument should be set to the +// current app directory in order to ensure lnd has the permissions needed to +// write to it. +func Start(extraArgs string, unlockerReady, rpcReady Callback) { // Split the argument string on "--" to get separated command line // arguments. var splitArgs []string for _, a := range strings.Split(extraArgs, "--") { + // Trim any whitespace space, and ignore empty params. + a := strings.TrimSpace(a) if a == "" { continue } - // Finally we prefix any non-empty string with --, and trim - // whitespace to mimic the regular command line arguments. - splitArgs = append(splitArgs, strings.TrimSpace("--"+a)) + + // Finally we prefix any non-empty string with -- to mimic the + // regular command line arguments. + splitArgs = append(splitArgs, "--"+a) } // Add the extra arguments to os.Args, as that will be parsed during // startup. os.Args = append(os.Args, splitArgs...) + // Set up channels that will be notified when the RPC servers are ready + // to accept calls. + var ( + unlockerListening = make(chan struct{}) + rpcListening = make(chan struct{}) + ) + // We call the main method with the custom in-memory listeners called // by the mobile APIs, such that the grpc server will use these. cfg := lnd.ListenerCfg{ - WalletUnlocker: walletUnlockerLis, - RPCListener: lightningLis, + WalletUnlocker: &lnd.ListenerWithSignal{ + Listener: walletUnlockerLis, + Ready: unlockerListening, + }, + RPCListener: &lnd.ListenerWithSignal{ + Listener: lightningLis, + Ready: rpcListening, + }, } // Call the "real" main in a nested manner so the defers will properly @@ -53,9 +77,40 @@ func Start(extraArgs string, callback Callback) { } }() - // TODO(halseth): callback when RPC server is actually running. Since - // the RPC server might take a while to start up, the client might - // assume it is ready to accept calls when this callback is sent, while - // it's not. - callback.OnResponse([]byte("started")) + // Finally we start two go routines that will call the provided + // callbacks when the RPC servers are ready to accept calls. + go func() { + <-unlockerListening + + // We must set the TLS certificates in order to properly + // authenticate with the wallet unlocker service. + auth, err := lnd.WalletUnlockerAuthOptions() + if err != nil { + unlockerReady.OnError(err) + return + } + + // Add the auth options to the listener's dial options. + addWalletUnlockerLisDialOption(auth...) + + unlockerReady.OnResponse([]byte{}) + }() + + go func() { + <-rpcListening + + // Now that the RPC server is ready, we can get the needed + // authentication options, and add them to the global dial + // options. + auth, err := lnd.AdminAuthOptions() + if err != nil { + rpcReady.OnError(err) + return + } + + // Add the auth options to the listener's dial options. + addLightningLisDialOption(auth...) + + rpcReady.OnResponse([]byte{}) + }() } diff --git a/mobile/gen_bindings.sh b/mobile/gen_bindings.sh index 1a3488c5b7..8ed84460a4 100755 --- a/mobile/gen_bindings.sh +++ b/mobile/gen_bindings.sh @@ -3,7 +3,7 @@ mkdir -p build # Check falafel version. -falafelVersion="0.5" +falafelVersion="0.7" falafel=$(which falafel) if [ $falafel ] then @@ -19,11 +19,21 @@ else exit 1 fi +# Name of the package for the generated APIs. pkg="lndmobile" + +# The package where the protobuf definitions originally are found. target_pkg="github.com/lightningnetwork/lnd/lnrpc" -# Generate APIs by passing the parsed protos to the falafel plugin. -opts="package_name=$pkg,target_package=$target_pkg,listeners=lightning=lightningLis walletunlocker=walletUnlockerLis,mem_rpc=1" +# A mapping from grpc service to name of the custom listeners. The grpc server +# must be configured to listen on these. +listeners="lightning=lightningLis walletunlocker=walletUnlockerLis" + +# Set to 1 to create boiler plate grpc client code and listeners. If more than +# one proto file is being parsed, it should only be done once. +mem_rpc=1 + +opts="package_name=$pkg,target_package=$target_pkg,listeners=$listeners,mem_rpc=$mem_rpc" protoc -I/usr/local/include -I. \ -I$GOPATH/src/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis \ --plugin=protoc-gen-custom=$falafel\ @@ -62,3 +72,6 @@ do --proto_path=${DIRECTORY} \ ${file} done + +# Run goimports to resolve any dependencies among the sub-servers. +goimports -w ./*_generated.go diff --git a/mobile/sample_lnd.conf b/mobile/sample_lnd.conf index a1b93647b9..c2c87fad45 100644 --- a/mobile/sample_lnd.conf +++ b/mobile/sample_lnd.conf @@ -1,6 +1,5 @@ [Application Options] debuglevel=info -no-macaroons=1 maxbackoff=2s nolisten=1 norest=1 diff --git a/mock.go b/mock.go index cbcba6463f..0b71a6dc38 100644 --- a/mock.go +++ b/mock.go @@ -1,6 +1,7 @@ package lnd import ( + "encoding/hex" "fmt" "sync" "sync/atomic" @@ -17,6 +18,11 @@ import ( "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/keychain" "github.com/lightningnetwork/lnd/lnwallet" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" +) + +var ( + coinPkScript, _ = hex.DecodeString("001431df1bde03c074d0cf21ea2529427e1499b8f1de") ) // The block height returned by the mock BlockChainIO's GetBestBlock. @@ -27,7 +33,7 @@ type mockSigner struct { } func (m *mockSigner) SignOutputRaw(tx *wire.MsgTx, - signDesc *input.SignDescriptor) ([]byte, error) { + signDesc *input.SignDescriptor) (input.Signature, error) { amt := signDesc.Output.Value witnessScript := signDesc.WitnessScript privKey := m.key @@ -52,7 +58,7 @@ func (m *mockSigner) SignOutputRaw(tx *wire.MsgTx, return nil, err } - return sig[:len(sig)-1], nil + return btcec.ParseDERSignature(sig[:len(sig)-1], btcec.S256()) } func (m *mockSigner) ComputeInputScript(tx *wire.MsgTx, @@ -271,13 +277,13 @@ func (*mockWalletController) IsOurAddress(a btcutil.Address) bool { } func (*mockWalletController) SendOutputs(outputs []*wire.TxOut, - _ lnwallet.SatPerKWeight) (*wire.MsgTx, error) { + _ chainfee.SatPerKWeight) (*wire.MsgTx, error) { return nil, nil } func (*mockWalletController) CreateSimpleTx(outputs []*wire.TxOut, - _ lnwallet.SatPerKWeight, _ bool) (*txauthor.AuthoredTx, error) { + _ chainfee.SatPerKWeight, _ bool) (*txauthor.AuthoredTx, error) { return nil, nil } @@ -296,7 +302,7 @@ func (m *mockWalletController) ListUnspentWitness(minconfirms, utxo := &lnwallet.Utxo{ AddressType: lnwallet.WitnessPubKey, Value: btcutil.Amount(10 * btcutil.SatoshiPerBitcoin), - PkScript: make([]byte, 22), + PkScript: coinPkScript, OutPoint: wire.OutPoint{ Hash: chainhash.Hash{}, Index: m.index, diff --git a/multimutex/hash_mutex.go b/multimutex/hash_mutex.go new file mode 100644 index 0000000000..4a65394d1c --- /dev/null +++ b/multimutex/hash_mutex.go @@ -0,0 +1,90 @@ +package multimutex + +import ( + "fmt" + "sync" + + "github.com/lightningnetwork/lnd/lntypes" +) + +// HashMutex is a struct that keeps track of a set of mutexes with a given hash. +// It can be used for making sure only one goroutine gets given the mutex per +// hash. +type HashMutex struct { + // mutexes is a map of hashes to a cntMutex. The cntMutex for + // a given hash will hold the mutex to be used by all + // callers requesting access for the hash, in addition to + // the count of callers. + mutexes map[lntypes.Hash]*cntMutex + + // mapMtx is used to give synchronize concurrent access + // to the mutexes map. + mapMtx sync.Mutex +} + +// NewHashMutex creates a new Mutex. +func NewHashMutex() *HashMutex { + return &HashMutex{ + mutexes: make(map[lntypes.Hash]*cntMutex), + } +} + +// Lock locks the mutex by the given hash. If the mutex is already +// locked by this hash, Lock blocks until the mutex is available. +func (c *HashMutex) Lock(hash lntypes.Hash) { + c.mapMtx.Lock() + mtx, ok := c.mutexes[hash] + if ok { + // If the mutex already existed in the map, we + // increment its counter, to indicate that there + // now is one more goroutine waiting for it. + mtx.cnt++ + } else { + // If it was not in the map, it means no other + // goroutine has locked the mutex for this hash, + // and we can create a new mutex with count 1 + // and add it to the map. + mtx = &cntMutex{ + cnt: 1, + } + c.mutexes[hash] = mtx + } + c.mapMtx.Unlock() + + // Acquire the mutex for this hash. + mtx.Lock() +} + +// Unlock unlocks the mutex by the given hash. It is a run-time +// error if the mutex is not locked by the hash on entry to Unlock. +func (c *HashMutex) Unlock(hash lntypes.Hash) { + // Since we are done with all the work for this + // update, we update the map to reflect that. + c.mapMtx.Lock() + + mtx, ok := c.mutexes[hash] + if !ok { + // The mutex not existing in the map means + // an unlock for an hash not currently locked + // was attempted. + panic(fmt.Sprintf("double unlock for hash %v", + hash)) + } + + // Decrement the counter. If the count goes to + // zero, it means this caller was the last one + // to wait for the mutex, and we can delete it + // from the map. We can do this safely since we + // are under the mapMtx, meaning that all other + // goroutines waiting for the mutex already + // have incremented it, or will create a new + // mutex when they get the mapMtx. + mtx.cnt-- + if mtx.cnt == 0 { + delete(c.mutexes, hash) + } + c.mapMtx.Unlock() + + // Unlock the mutex for this hash. + mtx.Unlock() +} diff --git a/netann/chan_status_manager.go b/netann/chan_status_manager.go index 146fef47fc..51797c87b3 100644 --- a/netann/chan_status_manager.go +++ b/netann/chan_status_manager.go @@ -527,7 +527,7 @@ func (m *ChanStatusManager) signAndSendNextUpdate(outpoint wire.OutPoint, err = SignChannelUpdate( m.cfg.MessageSigner, m.cfg.OurPubKey, chanUpdate, - ChannelUpdateSetDisable(disabled), + ChanUpdSetDisable(disabled), ChanUpdSetTimestamp, ) if err != nil { return err diff --git a/netann/channel_announcement.go b/netann/channel_announcement.go new file mode 100644 index 0000000000..99f909e260 --- /dev/null +++ b/netann/channel_announcement.go @@ -0,0 +1,81 @@ +package netann + +import ( + "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/lnwire" +) + +// CreateChanAnnouncement is a helper function which creates all channel +// announcements given the necessary channel related database items. This +// function is used to transform out database structs into the corresponding wire +// structs for announcing new channels to other peers, or simply syncing up a +// peer's initial routing table upon connect. +func CreateChanAnnouncement(chanProof *channeldb.ChannelAuthProof, + chanInfo *channeldb.ChannelEdgeInfo, + e1, e2 *channeldb.ChannelEdgePolicy) (*lnwire.ChannelAnnouncement, + *lnwire.ChannelUpdate, *lnwire.ChannelUpdate, error) { + + // First, using the parameters of the channel, along with the channel + // authentication chanProof, we'll create re-create the original + // authenticated channel announcement. + chanID := lnwire.NewShortChanIDFromInt(chanInfo.ChannelID) + chanAnn := &lnwire.ChannelAnnouncement{ + ShortChannelID: chanID, + NodeID1: chanInfo.NodeKey1Bytes, + NodeID2: chanInfo.NodeKey2Bytes, + ChainHash: chanInfo.ChainHash, + BitcoinKey1: chanInfo.BitcoinKey1Bytes, + BitcoinKey2: chanInfo.BitcoinKey2Bytes, + Features: lnwire.NewRawFeatureVector(), + ExtraOpaqueData: chanInfo.ExtraOpaqueData, + } + + var err error + chanAnn.BitcoinSig1, err = lnwire.NewSigFromRawSignature( + chanProof.BitcoinSig1Bytes, + ) + if err != nil { + return nil, nil, nil, err + } + chanAnn.BitcoinSig2, err = lnwire.NewSigFromRawSignature( + chanProof.BitcoinSig2Bytes, + ) + if err != nil { + return nil, nil, nil, err + } + chanAnn.NodeSig1, err = lnwire.NewSigFromRawSignature( + chanProof.NodeSig1Bytes, + ) + if err != nil { + return nil, nil, nil, err + } + chanAnn.NodeSig2, err = lnwire.NewSigFromRawSignature( + chanProof.NodeSig2Bytes, + ) + if err != nil { + return nil, nil, nil, err + } + + // We'll unconditionally queue the channel's existence chanProof as it + // will need to be processed before either of the channel update + // networkMsgs. + + // Since it's up to a node's policy as to whether they advertise the + // edge in a direction, we don't create an advertisement if the edge is + // nil. + var edge1Ann, edge2Ann *lnwire.ChannelUpdate + if e1 != nil { + edge1Ann, err = ChannelUpdateFromEdge(chanInfo, e1) + if err != nil { + return nil, nil, nil, err + } + } + if e2 != nil { + edge2Ann, err = ChannelUpdateFromEdge(chanInfo, e2) + if err != nil { + return nil, nil, nil, err + } + } + + return chanAnn, edge1Ann, edge2Ann, nil +} diff --git a/netann/channel_update.go b/netann/channel_update.go index c405a0c6a3..4423f4ec2a 100644 --- a/netann/channel_update.go +++ b/netann/channel_update.go @@ -15,9 +15,9 @@ import ( // lnwire.ChannelUpdate. type ChannelUpdateModifier func(*lnwire.ChannelUpdate) -// ChannelUpdateSetDisable sets the disabled channel flag if disabled is true, -// and clears the bit otherwise. -func ChannelUpdateSetDisable(disabled bool) ChannelUpdateModifier { +// ChanUpdSetDisable is a functional option that sets the disabled channel flag +// if disabled is true, and clears the bit otherwise. +func ChanUpdSetDisable(disabled bool) ChannelUpdateModifier { return func(update *lnwire.ChannelUpdate) { if disabled { // Set the bit responsible for marking a channel as @@ -31,6 +31,20 @@ func ChannelUpdateSetDisable(disabled bool) ChannelUpdateModifier { } } +// ChanUpdSetTimestamp is a functional option that sets the timestamp of the +// update to the current time, or increments it if the timestamp is already in +// the future. +func ChanUpdSetTimestamp(update *lnwire.ChannelUpdate) { + newTimestamp := uint32(time.Now().Unix()) + if newTimestamp <= update.Timestamp { + // Increment the prior value to ensure the timestamp + // monotonically increases, otherwise the update won't + // propagate. + newTimestamp = update.Timestamp + 1 + } + update.Timestamp = newTimestamp +} + // SignChannelUpdate applies the given modifiers to the passed // lnwire.ChannelUpdate, then signs the resulting update. The provided update // should be the most recent, valid update, otherwise the timestamp may not @@ -45,23 +59,8 @@ func SignChannelUpdate(signer lnwallet.MessageSigner, pubKey *btcec.PublicKey, modifier(update) } - // Update the message's timestamp to the current time. If the update's - // current time is already in the future, we increment the prior value - // to ensure the timestamps monotonically increase, otherwise the - // update won't propagate. - newTimestamp := uint32(time.Now().Unix()) - if newTimestamp <= update.Timestamp { - newTimestamp = update.Timestamp + 1 - } - update.Timestamp = newTimestamp - - chanUpdateMsg, err := update.DataToSign() - if err != nil { - return err - } - // Create the DER-encoded ECDSA signature over the message digest. - sig, err := signer.SignMessage(pubKey, chanUpdateMsg) + sig, err := SignAnnouncement(signer, pubKey, update) if err != nil { return err } @@ -112,12 +111,12 @@ func ExtractChannelUpdate(ownerPubKey []byte, info.ChannelPoint) } -// ChannelUpdateFromEdge reconstructs a signed ChannelUpdate from the given edge -// info and policy. -func ChannelUpdateFromEdge(info *channeldb.ChannelEdgeInfo, - policy *channeldb.ChannelEdgePolicy) (*lnwire.ChannelUpdate, error) { +// UnsignedChannelUpdateFromEdge reconstructs an unsigned ChannelUpdate from the +// given edge info and policy. +func UnsignedChannelUpdateFromEdge(info *channeldb.ChannelEdgeInfo, + policy *channeldb.ChannelEdgePolicy) *lnwire.ChannelUpdate { - update := &lnwire.ChannelUpdate{ + return &lnwire.ChannelUpdate{ ChainHash: info.ChainHash, ShortChannelID: lnwire.NewShortChanIDFromInt(policy.ChannelID), Timestamp: uint32(policy.LastUpdate.Unix()), @@ -130,6 +129,14 @@ func ChannelUpdateFromEdge(info *channeldb.ChannelEdgeInfo, FeeRate: uint32(policy.FeeProportionalMillionths), ExtraOpaqueData: policy.ExtraOpaqueData, } +} + +// ChannelUpdateFromEdge reconstructs a signed ChannelUpdate from the given edge +// info and policy. +func ChannelUpdateFromEdge(info *channeldb.ChannelEdgeInfo, + policy *channeldb.ChannelEdgePolicy) (*lnwire.ChannelUpdate, error) { + + update := UnsignedChannelUpdateFromEdge(info, policy) var err error update.Signature, err = lnwire.NewSigFromRawSignature(policy.SigBytes) diff --git a/netann/channel_update_test.go b/netann/channel_update_test.go index 4ed019a805..1fff1d5134 100644 --- a/netann/channel_update_test.go +++ b/netann/channel_update_test.go @@ -6,6 +6,7 @@ import ( "time" "github.com/btcsuite/btcd/btcec" + "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/lnwallet" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/netann" @@ -17,7 +18,7 @@ type mockSigner struct { } func (m *mockSigner) SignMessage(pk *btcec.PublicKey, - msg []byte) (*btcec.Signature, error) { + msg []byte) (input.Signature, error) { if m.err != nil { return nil, m.err @@ -103,6 +104,7 @@ func TestUpdateDisableFlag(t *testing.T) { t.Parallel() for _, tc := range updateDisableTests { + tc := tc t.Run(tc.name, func(t *testing.T) { // Create the initial update, the only fields we are // concerned with in this test are the timestamp and the @@ -127,7 +129,8 @@ func TestUpdateDisableFlag(t *testing.T) { // disabled or enabled as prescribed in the test case. err := netann.SignChannelUpdate( tc.signer, pubKey, newUpdate, - netann.ChannelUpdateSetDisable(tc.disable), + netann.ChanUpdSetDisable(tc.disable), + netann.ChanUpdSetTimestamp, ) var fail bool diff --git a/netann/node_announcement.go b/netann/node_announcement.go new file mode 100644 index 0000000000..84a261059f --- /dev/null +++ b/netann/node_announcement.go @@ -0,0 +1,60 @@ +package netann + +import ( + "net" + "time" + + "github.com/btcsuite/btcd/btcec" + "github.com/lightningnetwork/lnd/lnwallet" + "github.com/lightningnetwork/lnd/lnwire" +) + +// NodeAnnModifier is a closure that makes in-place modifications to an +// lnwire.NodeAnnouncement. +type NodeAnnModifier func(*lnwire.NodeAnnouncement) + +// NodeAnnSetAddrs is a functional option that allows updating the addresses of +// the given node announcement. +func NodeAnnSetAddrs(addrs []net.Addr) func(*lnwire.NodeAnnouncement) { + return func(nodeAnn *lnwire.NodeAnnouncement) { + nodeAnn.Addresses = addrs + } +} + +// NodeAnnSetTimestamp is a functional option that sets the timestamp of the +// announcement to the current time, or increments it if the timestamp is +// already in the future. +func NodeAnnSetTimestamp(nodeAnn *lnwire.NodeAnnouncement) { + newTimestamp := uint32(time.Now().Unix()) + if newTimestamp <= nodeAnn.Timestamp { + // Increment the prior value to ensure the timestamp + // monotonically increases, otherwise the announcement won't + // propagate. + newTimestamp = nodeAnn.Timestamp + 1 + } + nodeAnn.Timestamp = newTimestamp +} + +// SignNodeAnnouncement applies the given modifies to the passed +// lnwire.NodeAnnouncement, then signs the resulting announcement. The provided +// update should be the most recent, valid update, otherwise the timestamp may +// not monotonically increase from the prior. +func SignNodeAnnouncement(signer lnwallet.MessageSigner, + pubKey *btcec.PublicKey, nodeAnn *lnwire.NodeAnnouncement, + mods ...NodeAnnModifier) error { + + // Apply the requested changes to the node announcement. + for _, modifier := range mods { + modifier(nodeAnn) + } + + // Create the DER-encoded ECDSA signature over the message digest. + sig, err := SignAnnouncement(signer, pubKey, nodeAnn) + if err != nil { + return err + } + + // Parse the DER-encoded signature into a fixed-size 64-byte array. + nodeAnn.Signature, err = lnwire.NewSigFromSignature(sig) + return err +} diff --git a/netann/node_signer.go b/netann/node_signer.go index 8946c2c6bb..2b97c9379a 100644 --- a/netann/node_signer.go +++ b/netann/node_signer.go @@ -5,6 +5,7 @@ import ( "github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/lnwallet" ) @@ -31,7 +32,7 @@ func NewNodeSigner(key *btcec.PrivateKey) *NodeSigner { // resident node's private key. If the target public key is _not_ the node's // private key, then an error will be returned. func (n *NodeSigner) SignMessage(pubKey *btcec.PublicKey, - msg []byte) (*btcec.Signature, error) { + msg []byte) (input.Signature, error) { // If this isn't our identity public key, then we'll exit early with an // error as we can't sign with this key. @@ -41,12 +42,12 @@ func (n *NodeSigner) SignMessage(pubKey *btcec.PublicKey, // Otherwise, we'll sign the dsha256 of the target message. digest := chainhash.DoubleHashB(msg) - sign, err := n.privKey.Sign(digest) + sig, err := n.privKey.Sign(digest) if err != nil { return nil, fmt.Errorf("can't sign the message: %v", err) } - return sign, nil + return sig, nil } // SignCompact signs a double-sha256 digest of the msg parameter under the diff --git a/netann/sign.go b/netann/sign.go new file mode 100644 index 0000000000..6e0bce9814 --- /dev/null +++ b/netann/sign.go @@ -0,0 +1,37 @@ +package netann + +import ( + "fmt" + + "github.com/btcsuite/btcd/btcec" + "github.com/lightningnetwork/lnd/input" + "github.com/lightningnetwork/lnd/lnwallet" + "github.com/lightningnetwork/lnd/lnwire" +) + +// SignAnnouncement signs any type of gossip message that is announced on the +// network. +func SignAnnouncement(signer lnwallet.MessageSigner, pubKey *btcec.PublicKey, + msg lnwire.Message) (input.Signature, error) { + + var ( + data []byte + err error + ) + + switch m := msg.(type) { + case *lnwire.ChannelAnnouncement: + data, err = m.DataToSign() + case *lnwire.ChannelUpdate: + data, err = m.DataToSign() + case *lnwire.NodeAnnouncement: + data, err = m.DataToSign() + default: + return nil, fmt.Errorf("can't sign %T message", m) + } + if err != nil { + return nil, fmt.Errorf("unable to get data to sign: %v", err) + } + + return signer.SignMessage(pubKey, data) +} diff --git a/nursery_store.go b/nursery_store.go index d7f30d0772..bc20d1bc2c 100644 --- a/nursery_store.go +++ b/nursery_store.go @@ -7,8 +7,8 @@ import ( "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" - "github.com/coreos/bbolt" "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/channeldb/kvdb" ) // Overview of Nursery Store Storage Hierarchy @@ -263,7 +263,7 @@ func newNurseryStore(chainHash *chainhash.Hash, // CSV-delayed outputs (commitment and incoming HTLC's), commitment output and // a list of outgoing two-stage htlc outputs. func (ns *nurseryStore) Incubate(kids []kidOutput, babies []babyOutput) error { - return ns.db.Update(func(tx *bbolt.Tx) error { + return kvdb.Update(ns.db, func(tx kvdb.RwTx) error { // If we have any kid outputs to incubate, then we'll attempt // to add each of them to the nursery store. Any duplicate // outputs will be ignored. @@ -290,7 +290,7 @@ func (ns *nurseryStore) Incubate(kids []kidOutput, babies []babyOutput) error { // kindergarten bucket. The now mature kidOutput contained in the babyOutput // will be stored as it waits out the kidOutput's CSV delay. func (ns *nurseryStore) CribToKinder(bby *babyOutput) error { - return ns.db.Update(func(tx *bbolt.Tx) error { + return kvdb.Update(ns.db, func(tx kvdb.RwTx) error { // First, retrieve or create the channel bucket corresponding to // the baby output's origin channel point. @@ -374,7 +374,7 @@ func (ns *nurseryStore) CribToKinder(bby *babyOutput) error { func (ns *nurseryStore) PreschoolToKinder(kid *kidOutput, lastGradHeight uint32) error { - return ns.db.Update(func(tx *bbolt.Tx) error { + return kvdb.Update(ns.db, func(tx kvdb.RwTx) error { // Create or retrieve the channel bucket corresponding to the // kid output's origin channel point. chanPoint := kid.OriginChanPoint() @@ -471,7 +471,7 @@ func (ns *nurseryStore) PreschoolToKinder(kid *kidOutput, // the height and channel indexes. The height bucket will be opportunistically // pruned from the height index as outputs are removed. func (ns *nurseryStore) GraduateKinder(height uint32, kid *kidOutput) error { - return ns.db.Update(func(tx *bbolt.Tx) error { + return kvdb.Update(ns.db, func(tx kvdb.RwTx) error { hghtBucket := ns.getHeightBucket(tx, height) if hghtBucket == nil { @@ -501,8 +501,7 @@ func (ns *nurseryStore) GraduateKinder(height uint32, kid *kidOutput) error { return err } - chanBucket := ns.getChannelBucket(tx, - chanPoint) + chanBucket := ns.getChannelBucketWrite(tx, chanPoint) if chanBucket == nil { return ErrContractNotFound } @@ -540,7 +539,7 @@ func (ns *nurseryStore) FetchClass( // processed at the provided block height. var kids []kidOutput var babies []babyOutput - if err := ns.db.View(func(tx *bbolt.Tx) error { + if err := kvdb.View(ns.db, func(tx kvdb.ReadTx) error { // Append each crib output to our list of babyOutputs. if err := ns.forEachHeightPrefix(tx, cribPrefix, height, func(buf []byte) error { @@ -594,16 +593,16 @@ func (ns *nurseryStore) FetchClass( // preschool bucket. func (ns *nurseryStore) FetchPreschools() ([]kidOutput, error) { var kids []kidOutput - if err := ns.db.View(func(tx *bbolt.Tx) error { + if err := kvdb.View(ns.db, func(tx kvdb.ReadTx) error { // Retrieve the existing chain bucket for this nursery store. - chainBucket := tx.Bucket(ns.pfxChainKey) + chainBucket := tx.ReadBucket(ns.pfxChainKey) if chainBucket == nil { return nil } // Load the existing channel index from the chain bucket. - chanIndex := chainBucket.Bucket(channelIndexKey) + chanIndex := chainBucket.NestedReadBucket(channelIndexKey) if chanIndex == nil { return nil } @@ -626,7 +625,7 @@ func (ns *nurseryStore) FetchPreschools() ([]kidOutput, error) { for _, chanBytes := range activeChannels { // Retrieve the channel bucket associated with this // channel. - chanBucket := chanIndex.Bucket(chanBytes) + chanBucket := chanIndex.NestedReadBucket(chanBytes) if chanBucket == nil { continue } @@ -635,7 +634,7 @@ func (ns *nurseryStore) FetchPreschools() ([]kidOutput, error) { // "pscl" prefix. So, we will perform a prefix scan of // the channel bucket to efficiently enumerate all the // desired outputs. - c := chanBucket.Cursor() + c := chanBucket.ReadCursor() for k, v := c.Seek(psclPrefix); bytes.HasPrefix( k, psclPrefix); k, v = c.Next() { @@ -667,16 +666,16 @@ func (ns *nurseryStore) FetchPreschools() ([]kidOutput, error) { // index at or below the provided upper bound. func (ns *nurseryStore) HeightsBelowOrEqual(height uint32) ([]uint32, error) { var activeHeights []uint32 - err := ns.db.View(func(tx *bbolt.Tx) error { + err := kvdb.View(ns.db, func(tx kvdb.ReadTx) error { // Ensure that the chain bucket for this nursery store exists. - chainBucket := tx.Bucket(ns.pfxChainKey) + chainBucket := tx.ReadBucket(ns.pfxChainKey) if chainBucket == nil { return nil } // Ensure that the height index has been properly initialized for this // chain. - hghtIndex := chainBucket.Bucket(heightIndexKey) + hghtIndex := chainBucket.NestedReadBucket(heightIndexKey) if hghtIndex == nil { return nil } @@ -686,7 +685,7 @@ func (ns *nurseryStore) HeightsBelowOrEqual(height uint32) ([]uint32, error) { var lower, upper [4]byte byteOrder.PutUint32(upper[:], height) - c := hghtIndex.Cursor() + c := hghtIndex.ReadCursor() for k, _ := c.Seek(lower[:]); bytes.Compare(k, upper[:]) <= 0 && len(k) == 4; k, _ = c.Next() { @@ -712,7 +711,7 @@ func (ns *nurseryStore) HeightsBelowOrEqual(height uint32) ([]uint32, error) { func (ns *nurseryStore) ForChanOutputs(chanPoint *wire.OutPoint, callback func([]byte, []byte) error) error { - return ns.db.View(func(tx *bbolt.Tx) error { + return kvdb.View(ns.db, func(tx kvdb.ReadTx) error { return ns.forChanOutputs(tx, chanPoint, callback) }) } @@ -720,15 +719,15 @@ func (ns *nurseryStore) ForChanOutputs(chanPoint *wire.OutPoint, // ListChannels returns all channels the nursery is currently tracking. func (ns *nurseryStore) ListChannels() ([]wire.OutPoint, error) { var activeChannels []wire.OutPoint - if err := ns.db.View(func(tx *bbolt.Tx) error { + if err := kvdb.View(ns.db, func(tx kvdb.ReadTx) error { // Retrieve the existing chain bucket for this nursery store. - chainBucket := tx.Bucket(ns.pfxChainKey) + chainBucket := tx.ReadBucket(ns.pfxChainKey) if chainBucket == nil { return nil } // Retrieve the existing channel index. - chanIndex := chainBucket.Bucket(channelIndexKey) + chanIndex := chainBucket.NestedReadBucket(channelIndexKey) if chanIndex == nil { return nil } @@ -754,7 +753,7 @@ func (ns *nurseryStore) ListChannels() ([]wire.OutPoint, error) { // IsMatureChannel determines the whether or not all of the outputs in a // particular channel bucket have been marked as graduated. func (ns *nurseryStore) IsMatureChannel(chanPoint *wire.OutPoint) (bool, error) { - err := ns.db.View(func(tx *bbolt.Tx) error { + err := kvdb.View(ns.db, func(tx kvdb.ReadTx) error { // Iterate over the contents of the channel bucket, computing // both total number of outputs, and those that have the grad // prefix. @@ -783,15 +782,15 @@ var ErrImmatureChannel = errors.New("cannot remove immature channel, " + // provided channel point. // NOTE: The channel's entries in the height index are assumed to be removed. func (ns *nurseryStore) RemoveChannel(chanPoint *wire.OutPoint) error { - return ns.db.Update(func(tx *bbolt.Tx) error { + return kvdb.Update(ns.db, func(tx kvdb.RwTx) error { // Retrieve the existing chain bucket for this nursery store. - chainBucket := tx.Bucket(ns.pfxChainKey) + chainBucket := tx.ReadWriteBucket(ns.pfxChainKey) if chainBucket == nil { return nil } // Retrieve the channel index stored in the chain bucket. - chanIndex := chainBucket.Bucket(channelIndexKey) + chanIndex := chainBucket.NestedReadWriteBucket(channelIndexKey) if chanIndex == nil { return nil } @@ -824,7 +823,7 @@ func (ns *nurseryStore) RemoveChannel(chanPoint *wire.OutPoint) error { maturityHeight := kid.ConfHeight() + kid.BlocksToMaturity() - hghtBucket := ns.getHeightBucket(tx, maturityHeight) + hghtBucket := ns.getHeightBucketWrite(tx, maturityHeight) if hghtBucket == nil { return nil } @@ -845,7 +844,7 @@ func (ns *nurseryStore) RemoveChannel(chanPoint *wire.OutPoint) error { // its two-stage process of sweeping funds back to the user's wallet. These // outputs are persisted in the nursery store in the crib state, and will be // revisited after the first-stage output's CLTV has expired. -func (ns *nurseryStore) enterCrib(tx *bbolt.Tx, baby *babyOutput) error { +func (ns *nurseryStore) enterCrib(tx kvdb.RwTx, baby *babyOutput) error { // First, retrieve or create the channel bucket corresponding to the // baby output's origin channel point. chanPoint := baby.OriginChanPoint() @@ -902,7 +901,7 @@ func (ns *nurseryStore) enterCrib(tx *bbolt.Tx, baby *babyOutput) error { // through a single stage before sweeping. Outputs are stored in the preschool // bucket until the commitment transaction has been confirmed, at which point // they will be moved to the kindergarten bucket. -func (ns *nurseryStore) enterPreschool(tx *bbolt.Tx, kid *kidOutput) error { +func (ns *nurseryStore) enterPreschool(tx kvdb.RwTx, kid *kidOutput) error { // First, retrieve or create the channel bucket corresponding to the // baby output's origin channel point. chanPoint := kid.OriginChanPoint() @@ -935,11 +934,11 @@ func (ns *nurseryStore) enterPreschool(tx *bbolt.Tx, kid *kidOutput) error { // createChannelBucket creates or retrieves a channel bucket for the provided // channel point. -func (ns *nurseryStore) createChannelBucket(tx *bbolt.Tx, - chanPoint *wire.OutPoint) (*bbolt.Bucket, error) { +func (ns *nurseryStore) createChannelBucket(tx kvdb.RwTx, + chanPoint *wire.OutPoint) (kvdb.RwBucket, error) { // Ensure that the chain bucket for this nursery store exists. - chainBucket, err := tx.CreateBucketIfNotExists(ns.pfxChainKey) + chainBucket, err := tx.CreateTopLevelBucket(ns.pfxChainKey) if err != nil { return nil, err } @@ -966,17 +965,17 @@ func (ns *nurseryStore) createChannelBucket(tx *bbolt.Tx, // getChannelBucket retrieves an existing channel bucket from the nursery store, // using the given channel point. If the bucket does not exist, or any bucket // along its path does not exist, a nil value is returned. -func (ns *nurseryStore) getChannelBucket(tx *bbolt.Tx, - chanPoint *wire.OutPoint) *bbolt.Bucket { +func (ns *nurseryStore) getChannelBucket(tx kvdb.ReadTx, + chanPoint *wire.OutPoint) kvdb.ReadBucket { // Retrieve the existing chain bucket for this nursery store. - chainBucket := tx.Bucket(ns.pfxChainKey) + chainBucket := tx.ReadBucket(ns.pfxChainKey) if chainBucket == nil { return nil } // Retrieve the existing channel index. - chanIndex := chainBucket.Bucket(channelIndexKey) + chanIndex := chainBucket.NestedReadBucket(channelIndexKey) if chanIndex == nil { return nil } @@ -988,16 +987,44 @@ func (ns *nurseryStore) getChannelBucket(tx *bbolt.Tx, return nil } - return chanIndex.Bucket(chanBuffer.Bytes()) + return chanIndex.NestedReadBucket(chanBuffer.Bytes()) +} + +// getChannelBucketWrite retrieves an existing channel bucket from the nursery store, +// using the given channel point. If the bucket does not exist, or any bucket +// along its path does not exist, a nil value is returned. +func (ns *nurseryStore) getChannelBucketWrite(tx kvdb.RwTx, + chanPoint *wire.OutPoint) kvdb.RwBucket { + + // Retrieve the existing chain bucket for this nursery store. + chainBucket := tx.ReadWriteBucket(ns.pfxChainKey) + if chainBucket == nil { + return nil + } + + // Retrieve the existing channel index. + chanIndex := chainBucket.NestedReadWriteBucket(channelIndexKey) + if chanIndex == nil { + return nil + } + + // Serialize the provided channel point and return the bucket matching + // the serialized key. + var chanBuffer bytes.Buffer + if err := writeOutpoint(&chanBuffer, chanPoint); err != nil { + return nil + } + + return chanIndex.NestedReadWriteBucket(chanBuffer.Bytes()) } // createHeightBucket creates or retrieves an existing bucket from the height // index, corresponding to the provided height. -func (ns *nurseryStore) createHeightBucket(tx *bbolt.Tx, - height uint32) (*bbolt.Bucket, error) { +func (ns *nurseryStore) createHeightBucket(tx kvdb.RwTx, + height uint32) (kvdb.RwBucket, error) { // Ensure that the chain bucket for this nursery store exists. - chainBucket, err := tx.CreateBucketIfNotExists(ns.pfxChainKey) + chainBucket, err := tx.CreateTopLevelBucket(ns.pfxChainKey) if err != nil { return nil, err } @@ -1021,17 +1048,17 @@ func (ns *nurseryStore) createHeightBucket(tx *bbolt.Tx, // getHeightBucketPath retrieves an existing height bucket from the nursery // store, using the provided block height. If the bucket does not exist, or any // bucket along its path does not exist, a nil value is returned. -func (ns *nurseryStore) getHeightBucketPath(tx *bbolt.Tx, - height uint32) (*bbolt.Bucket, *bbolt.Bucket, *bbolt.Bucket) { +func (ns *nurseryStore) getHeightBucketPath(tx kvdb.ReadTx, + height uint32) (kvdb.ReadBucket, kvdb.ReadBucket, kvdb.ReadBucket) { // Retrieve the existing chain bucket for this nursery store. - chainBucket := tx.Bucket(ns.pfxChainKey) + chainBucket := tx.ReadBucket(ns.pfxChainKey) if chainBucket == nil { return nil, nil, nil } // Retrieve the existing channel index. - hghtIndex := chainBucket.Bucket(heightIndexKey) + hghtIndex := chainBucket.NestedReadBucket(heightIndexKey) if hghtIndex == nil { return nil, nil, nil } @@ -1041,24 +1068,63 @@ func (ns *nurseryStore) getHeightBucketPath(tx *bbolt.Tx, var heightBytes [4]byte byteOrder.PutUint32(heightBytes[:], height) - return chainBucket, hghtIndex, hghtIndex.Bucket(heightBytes[:]) + return chainBucket, hghtIndex, hghtIndex.NestedReadBucket(heightBytes[:]) +} + +// getHeightBucketPathWrite retrieves an existing height bucket from the nursery +// store, using the provided block height. If the bucket does not exist, or any +// bucket along its path does not exist, a nil value is returned. +func (ns *nurseryStore) getHeightBucketPathWrite(tx kvdb.RwTx, + height uint32) (kvdb.RwBucket, kvdb.RwBucket, kvdb.RwBucket) { + + // Retrieve the existing chain bucket for this nursery store. + chainBucket := tx.ReadWriteBucket(ns.pfxChainKey) + if chainBucket == nil { + return nil, nil, nil + } + + // Retrieve the existing channel index. + hghtIndex := chainBucket.NestedReadWriteBucket(heightIndexKey) + if hghtIndex == nil { + return nil, nil, nil + } + + // Serialize the provided block height and return the bucket matching + // the serialized key. + var heightBytes [4]byte + byteOrder.PutUint32(heightBytes[:], height) + + return chainBucket, hghtIndex, hghtIndex.NestedReadWriteBucket( + heightBytes[:], + ) } // getHeightBucket retrieves an existing height bucket from the nursery store, // using the provided block height. If the bucket does not exist, or any bucket // along its path does not exist, a nil value is returned. -func (ns *nurseryStore) getHeightBucket(tx *bbolt.Tx, - height uint32) *bbolt.Bucket { +func (ns *nurseryStore) getHeightBucket(tx kvdb.ReadTx, + height uint32) kvdb.ReadBucket { _, _, hghtBucket := ns.getHeightBucketPath(tx, height) return hghtBucket } +// getHeightBucketWrite retrieves an existing height bucket from the nursery store, +// using the provided block height. If the bucket does not exist, or any bucket +// along its path does not exist, a nil value is returned. +func (ns *nurseryStore) getHeightBucketWrite(tx kvdb.RwTx, + height uint32) kvdb.RwBucket { + + _, _, hghtBucket := ns.getHeightBucketPathWrite(tx, height) + + return hghtBucket +} + // createHeightChanBucket creates or retrieves an existing height-channel bucket // for the provided block height and channel point. This method will attempt to // instantiate all buckets along the path if required. -func (ns *nurseryStore) createHeightChanBucket(tx *bbolt.Tx, - height uint32, chanPoint *wire.OutPoint) (*bbolt.Bucket, error) { +func (ns *nurseryStore) createHeightChanBucket(tx kvdb.RwTx, + height uint32, chanPoint *wire.OutPoint) (kvdb.RwBucket, error) { // Ensure that the height bucket for this nursery store exists. hghtBucket, err := ns.createHeightBucket(tx, height) @@ -1083,8 +1149,8 @@ func (ns *nurseryStore) createHeightChanBucket(tx *bbolt.Tx, // nursery store, using the provided block height and channel point. if the // bucket does not exist, or any bucket along its path does not exist, a nil // value is returned. -func (ns *nurseryStore) getHeightChanBucket(tx *bbolt.Tx, - height uint32, chanPoint *wire.OutPoint) *bbolt.Bucket { +func (ns *nurseryStore) getHeightChanBucket(tx kvdb.ReadTx, // nolint:unused + height uint32, chanPoint *wire.OutPoint) kvdb.ReadBucket { // Retrieve the existing height bucket from this nursery store. hghtBucket := ns.getHeightBucket(tx, height) @@ -1102,7 +1168,33 @@ func (ns *nurseryStore) getHeightChanBucket(tx *bbolt.Tx, // Finally, return the height bucket specified by the serialized channel // point. - return hghtBucket.Bucket(chanBytes) + return hghtBucket.NestedReadBucket(chanBytes) +} + +// getHeightChanBucketWrite retrieves an existing height-channel bucket from the +// nursery store, using the provided block height and channel point. if the +// bucket does not exist, or any bucket along its path does not exist, a nil +// value is returned. +func (ns *nurseryStore) getHeightChanBucketWrite(tx kvdb.RwTx, + height uint32, chanPoint *wire.OutPoint) kvdb.RwBucket { + + // Retrieve the existing height bucket from this nursery store. + hghtBucket := ns.getHeightBucketWrite(tx, height) + if hghtBucket == nil { + return nil + } + + // Serialize the provided channel point, which generates the key for + // looking up the proper height-channel bucket inside the height bucket. + var chanBuffer bytes.Buffer + if err := writeOutpoint(&chanBuffer, chanPoint); err != nil { + return nil + } + chanBytes := chanBuffer.Bytes() + + // Finally, return the height bucket specified by the serialized channel + // point. + return hghtBucket.NestedReadWriteBucket(chanBytes) } // forEachHeightPrefix enumerates all outputs at the given height whose state @@ -1110,7 +1202,7 @@ func (ns *nurseryStore) getHeightChanBucket(tx *bbolt.Tx, // enumerate crib and kindergarten outputs at a particular height. The callback // is invoked with serialized bytes retrieved for each output of interest, // allowing the caller to deserialize them into the appropriate type. -func (ns *nurseryStore) forEachHeightPrefix(tx *bbolt.Tx, prefix []byte, +func (ns *nurseryStore) forEachHeightPrefix(tx kvdb.ReadTx, prefix []byte, height uint32, callback func([]byte) error) error { // Start by retrieving the height bucket corresponding to the provided @@ -1138,7 +1230,7 @@ func (ns *nurseryStore) forEachHeightPrefix(tx *bbolt.Tx, prefix []byte, // Additionally, grab the chain index, which we will facilitate queries // for each of the channel buckets of each of the channels in the list // we assembled above. - chanIndex := chainBucket.Bucket(channelIndexKey) + chanIndex := chainBucket.NestedReadBucket(channelIndexKey) if chanIndex == nil { return errors.New("unable to retrieve channel index") } @@ -1151,7 +1243,7 @@ func (ns *nurseryStore) forEachHeightPrefix(tx *bbolt.Tx, prefix []byte, for _, chanBytes := range channelsAtHeight { // Retrieve the height-channel bucket for this channel, which // holds a sub-bucket for all outputs maturing at this height. - hghtChanBucket := hghtBucket.Bucket(chanBytes) + hghtChanBucket := hghtBucket.NestedReadBucket(chanBytes) if hghtChanBucket == nil { return fmt.Errorf("unable to retrieve height-channel "+ "bucket at height %d for %x", height, chanBytes) @@ -1160,7 +1252,7 @@ func (ns *nurseryStore) forEachHeightPrefix(tx *bbolt.Tx, prefix []byte, // Load the appropriate channel bucket from the channel index, // this will allow us to retrieve the individual serialized // outputs. - chanBucket := chanIndex.Bucket(chanBytes) + chanBucket := chanIndex.NestedReadBucket(chanBytes) if chanBucket == nil { return fmt.Errorf("unable to retrieve channel "+ "bucket: '%x'", chanBytes) @@ -1170,7 +1262,7 @@ func (ns *nurseryStore) forEachHeightPrefix(tx *bbolt.Tx, prefix []byte, // prefix, we will perform a prefix scan of the buckets // contained in the height-channel bucket, efficiently // enumerating the desired outputs. - c := hghtChanBucket.Cursor() + c := hghtChanBucket.ReadCursor() for k, _ := c.Seek(prefix); bytes.HasPrefix( k, prefix); k, _ = c.Next() { @@ -1198,7 +1290,7 @@ func (ns *nurseryStore) forEachHeightPrefix(tx *bbolt.Tx, prefix []byte, // provided callback. The callback accepts a key-value pair of byte slices // corresponding to the prefixed-output key and the serialized output, // respectively. -func (ns *nurseryStore) forChanOutputs(tx *bbolt.Tx, chanPoint *wire.OutPoint, +func (ns *nurseryStore) forChanOutputs(tx kvdb.ReadTx, chanPoint *wire.OutPoint, callback func([]byte, []byte) error) error { chanBucket := ns.getChannelBucket(tx, chanPoint) @@ -1216,11 +1308,11 @@ var errBucketNotEmpty = errors.New("bucket is not empty, cannot be pruned") // removeOutputFromHeight will delete the given output from the specified // height-channel bucket, and attempt to prune the upstream directories if they // are empty. -func (ns *nurseryStore) removeOutputFromHeight(tx *bbolt.Tx, height uint32, +func (ns *nurseryStore) removeOutputFromHeight(tx kvdb.RwTx, height uint32, chanPoint *wire.OutPoint, pfxKey []byte) error { // Retrieve the height-channel bucket and delete the prefixed output. - hghtChanBucket := ns.getHeightChanBucket(tx, height, chanPoint) + hghtChanBucket := ns.getHeightChanBucketWrite(tx, height, chanPoint) if hghtChanBucket == nil { // Height-channel bucket already removed. return nil @@ -1233,7 +1325,7 @@ func (ns *nurseryStore) removeOutputFromHeight(tx *bbolt.Tx, height uint32, } // Retrieve the height bucket that contains the height-channel bucket. - hghtBucket := ns.getHeightBucket(tx, height) + hghtBucket := ns.getHeightBucketWrite(tx, height) if hghtBucket == nil { return errors.New("height bucket not found") } @@ -1268,9 +1360,9 @@ func (ns *nurseryStore) removeOutputFromHeight(tx *bbolt.Tx, height uint32, // all active outputs at this height have been removed from their respective // height-channel buckets. The returned boolean value indicated whether or not // this invocation successfully pruned the height bucket. -func (ns *nurseryStore) pruneHeight(tx *bbolt.Tx, height uint32) (bool, error) { +func (ns *nurseryStore) pruneHeight(tx kvdb.RwTx, height uint32) (bool, error) { // Fetch the existing height index and height bucket. - _, hghtIndex, hghtBucket := ns.getHeightBucketPath(tx, height) + _, hghtIndex, hghtBucket := ns.getHeightBucketPathWrite(tx, height) if hghtBucket == nil { return false, nil } @@ -1287,7 +1379,7 @@ func (ns *nurseryStore) pruneHeight(tx *bbolt.Tx, height uint32) (bool, error) { // Attempt to each height-channel bucket from the height bucket // located above. - hghtChanBucket := hghtBucket.Bucket(chanBytes) + hghtChanBucket := hghtBucket.NestedReadWriteBucket(chanBytes) if hghtChanBucket == nil { return errors.New("unable to find height-channel bucket") } @@ -1315,9 +1407,9 @@ func (ns *nurseryStore) pruneHeight(tx *bbolt.Tx, height uint32) (bool, error) { // removeBucketIfEmpty attempts to delete a bucket specified by name from the // provided parent bucket. -func removeBucketIfEmpty(parent *bbolt.Bucket, bktName []byte) error { +func removeBucketIfEmpty(parent kvdb.RwBucket, bktName []byte) error { // Attempt to fetch the named bucket from its parent. - bkt := parent.Bucket(bktName) + bkt := parent.NestedReadWriteBucket(bktName) if bkt == nil { // No bucket was found, already removed? return nil @@ -1328,25 +1420,25 @@ func removeBucketIfEmpty(parent *bbolt.Bucket, bktName []byte) error { return err } - return parent.DeleteBucket(bktName) + return parent.DeleteNestedBucket(bktName) } // removeBucketIfExists safely deletes the named bucket by first checking // that it exists in the parent bucket. -func removeBucketIfExists(parent *bbolt.Bucket, bktName []byte) error { +func removeBucketIfExists(parent kvdb.RwBucket, bktName []byte) error { // Attempt to fetch the named bucket from its parent. - bkt := parent.Bucket(bktName) + bkt := parent.NestedReadWriteBucket(bktName) if bkt == nil { // No bucket was found, already removed? return nil } - return parent.DeleteBucket(bktName) + return parent.DeleteNestedBucket(bktName) } // isBucketEmpty returns errBucketNotEmpty if the bucket has a non-zero number // of children. -func isBucketEmpty(parent *bbolt.Bucket) error { +func isBucketEmpty(parent kvdb.ReadBucket) error { return parent.ForEach(func(_, _ []byte) error { return errBucketNotEmpty }) diff --git a/peer.go b/peer.go index ade90b41b5..3ae9977781 100644 --- a/peer.go +++ b/peer.go @@ -21,12 +21,15 @@ import ( "github.com/lightningnetwork/lnd/buffer" "github.com/lightningnetwork/lnd/chainntnfs" "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/channelnotifier" "github.com/lightningnetwork/lnd/contractcourt" + "github.com/lightningnetwork/lnd/feature" "github.com/lightningnetwork/lnd/htlcswitch" "github.com/lightningnetwork/lnd/lnpeer" "github.com/lightningnetwork/lnd/lnwallet" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/pool" + "github.com/lightningnetwork/lnd/queue" "github.com/lightningnetwork/lnd/ticker" ) @@ -51,6 +54,9 @@ const ( // messages to be sent across the wire, requested by objects outside // this struct. outgoingQueueLen = 50 + + // errorBufferSize is the number of historic peer errors that we store. + errorBufferSize = 10 ) // outgoingMsg packages an lnwire.Message to be sent out on the wire, along with @@ -90,6 +96,13 @@ type channelCloseUpdate struct { Success bool } +// timestampedError is a timestamped error that is used to store the most recent +// errors we have experienced with our peers. +type timestampedError struct { + error error + timestamp time.Time +} + // peer is an active peer on the Lightning Network. This struct is responsible // for managing any channel state related to this peer. To do so, it has // several helper goroutines to handle events such as HTLC timeouts, new @@ -147,6 +160,13 @@ type peer struct { // activeChannels is a map which stores the state machines of all // active channels. Channels are indexed into the map by the txid of // the funding transaction which opened the channel. + // + // NOTE: On startup, pending channels are stored as nil in this map. + // Confirmed channels have channel data populated in the map. This means + // that accesses to this map should nil-check the LightningChannel to + // see if this is a pending channel or not. The tradeoff here is either + // having two maps everywhere (one for pending, one for confirmed chans) + // or having an extra nil-check per access. activeChannels map[lnwire.ChannelID]*lnwallet.LightningChannel // addedChannels tracks any new channels opened during this peer's @@ -190,29 +210,38 @@ type peer struct { server *server - // localFeatures is the set of local features that we advertised to the - // remote node. - localFeatures *lnwire.RawFeatureVector + // features is the set of features that we advertised to the remote + // node. + features *lnwire.FeatureVector + + // legacyFeatures is the set of features that we advertised to the remote + // node for backwards compatibility. Nodes that have not implemented + // flat featurs will still be able to read our feature bits from the + // legacy global field, but we will also advertise everything in the + // default features field. + legacyFeatures *lnwire.FeatureVector // outgoingCltvRejectDelta defines the number of blocks before expiry of // an htlc where we don't offer an htlc anymore. outgoingCltvRejectDelta uint32 - // remoteLocalFeatures is the local feature vector received from the - // peer during the connection handshake. - remoteLocalFeatures *lnwire.FeatureVector + // remoteFeatures is the feature vector received from the peer during + // the connection handshake. + remoteFeatures *lnwire.FeatureVector - // remoteGlobalFeatures is the global feature vector received from the - // peer during the connection handshake. - remoteGlobalFeatures *lnwire.FeatureVector + // resentChanSyncMsg is a set that keeps track of which channels we + // have re-sent channel reestablishment messages for. This is done to + // avoid getting into loop where both peers will respond to the other + // peer's chansync message with its own over and over again. + resentChanSyncMsg map[lnwire.ChannelID]struct{} - // failedChannels is a set that tracks channels we consider `failed`. - // This is a temporary measure until we have implemented real failure - // handling at the link level, to handle the case where we reconnect to - // a peer and try to re-sync a failed channel, triggering a disconnect - // loop. - // TODO(halseth): remove when link failure is properly handled. - failedChannels map[lnwire.ChannelID]struct{} + // errorBuffer stores a set of errors related to a peer. It contains + // error messages that our peer has recently sent us over the wire and + // records of unknown messages that were sent to us and, so that we can + // track a full record of the communication errors we have had with our + // peer. If we choose to disconnect from a peer, it also stores the + // reason we had for disconnecting. + errorBuffer *queue.CircularBuffer // writePool is the task pool to that manages reuse of write buffers. // Write tasks are submitted to the pool in order to conserve the total @@ -231,12 +260,15 @@ type peer struct { var _ lnpeer.Peer = (*peer)(nil) // newPeer creates a new peer from an establish connection object, and a -// pointer to the main server. +// pointer to the main server. It takes an error buffer which may contain errors +// from a previous connection with the peer if we have been connected to them +// before. func newPeer(conn net.Conn, connReq *connmgr.ConnReq, server *server, addr *lnwire.NetAddress, inbound bool, - localFeatures *lnwire.RawFeatureVector, + features, legacyFeatures *lnwire.FeatureVector, chanActiveTimeout time.Duration, - outgoingCltvRejectDelta uint32) ( + outgoingCltvRejectDelta uint32, + errBuffer *queue.CircularBuffer) ( *peer, error) { nodePub := addr.IdentityKey @@ -252,7 +284,8 @@ func newPeer(conn net.Conn, connReq *connmgr.ConnReq, server *server, server: server, - localFeatures: localFeatures, + features: features, + legacyFeatures: legacyFeatures, outgoingCltvRejectDelta: outgoingCltvRejectDelta, @@ -269,10 +302,12 @@ func newPeer(conn net.Conn, connReq *connmgr.ConnReq, server *server, localCloseChanReqs: make(chan *htlcswitch.ChanClose), linkFailures: make(chan linkFailureReport), chanCloseMsgs: make(chan *closeMsg), - failedChannels: make(map[lnwire.ChannelID]struct{}), + resentChanSyncMsg: make(map[lnwire.ChannelID]struct{}), chanActiveTimeout: chanActiveTimeout, + errorBuffer: errBuffer, + writePool: server.writePool, readPool: server.readPool, @@ -335,6 +370,7 @@ func (p *peer) Start() error { msg := <-msgChan if msg, ok := msg.(*lnwire.Init); ok { if err := p.handleInitMsg(msg); err != nil { + p.storeError(err) return err } } else { @@ -389,6 +425,19 @@ func (p *peer) Start() error { } } + // Node announcements don't propagate very well throughout the network + // as there isn't a way to efficiently query for them through their + // timestamp, mostly affecting nodes that were offline during the time + // of broadcast. We'll resend our node announcement to the remote peer + // as a best-effort delivery such that it can also propagate to their + // peers. To ensure they can successfully process it in most cases, + // we'll only resend it as long as we have at least one confirmed + // advertised channel with the remote peer. + // + // TODO(wilmer): Remove this once we're able to query for node + // announcements through their timestamps. + p.maybeSendNodeAnn(activeChans) + return nil } @@ -399,7 +448,7 @@ func (p *peer) initGossipSync() { // If the remote peer knows of the new gossip queries feature, then // we'll create a new gossipSyncer in the AuthenticatedGossiper for it. - case p.remoteLocalFeatures.HasFeature(lnwire.GossipQueriesOptional): + case p.remoteFeatures.HasFeature(lnwire.GossipQueriesOptional): srvrLog.Infof("Negotiated chan series queries with %x", p.pubKeyBytes[:]) @@ -455,11 +504,9 @@ func (p *peer) loadActiveChannels(chans []*channeldb.OpenChannel) ( // Skip adding any permanently irreconcilable channels to the // htlcswitch. switch { - case dbChan.HasChanStatus(channeldb.ChanStatusBorked): - fallthrough - case dbChan.HasChanStatus(channeldb.ChanStatusCommitBroadcasted): - fallthrough - case dbChan.HasChanStatus(channeldb.ChanStatusLocalDataLoss): + case !dbChan.HasChanStatus(channeldb.ChanStatusDefault) && + !dbChan.HasChanStatus(channeldb.ChanStatusRestored): + peerLog.Warnf("ChannelPoint(%v) has status %v, won't "+ "start.", chanPoint, dbChan.ChanStatus()) @@ -481,14 +528,6 @@ func (p *peer) loadActiveChannels(chans []*channeldb.OpenChannel) ( continue } - // Also skip adding any channel marked as `failed` for this - // session. - if _, ok := p.failedChannels[chanID]; ok { - peerLog.Warnf("ChannelPoint(%v) is failed, won't "+ - "start.", chanPoint) - continue - } - _, currentHeight, err := p.server.cc.chainIO.GetBestBlock() if err != nil { return nil, err @@ -525,7 +564,7 @@ func (p *peer) loadActiveChannels(chans []*channeldb.OpenChannel) ( var forwardingPolicy *htlcswitch.ForwardingPolicy if selfPolicy != nil { forwardingPolicy = &htlcswitch.ForwardingPolicy{ - MinHTLC: selfPolicy.MinHTLC, + MinHTLCOut: selfPolicy.MinHTLC, MaxHTLC: selfPolicy.MaxHTLC, BaseFee: selfPolicy.FeeBaseMSat, FeeRate: selfPolicy.FeeProportionalMillionths, @@ -541,9 +580,21 @@ func (p *peer) loadActiveChannels(chans []*channeldb.OpenChannel) ( peerLog.Tracef("Using link policy of: %v", spew.Sdump(forwardingPolicy)) - // Register this new channel link with the HTLC Switch. This is - // necessary to properly route multi-hop payments, and forward - // new payments triggered by RPC clients. + // If the channel is pending, set the value to nil in the + // activeChannels map. This is done to signify that the channel is + // pending. We don't add the link to the switch here - it's the funding + // manager's responsibility to spin up pending channels. Adding them + // here would just be extra work as we'll tear them down when creating + // + adding the final link. + if lnChan.IsPending() { + p.activeChanMtx.Lock() + p.activeChannels[chanID] = nil + p.activeChanMtx.Unlock() + + continue + } + + // Subscribe to the set of on-chain events for this channel. chainEvents, err := p.server.chainArb.SubscribeChannelEvents( *chanPoint, ) @@ -551,7 +602,6 @@ func (p *peer) loadActiveChannels(chans []*channeldb.OpenChannel) ( return nil, err } - // Create the link and add it to the switch. err = p.addLink( chanPoint, lnChan, forwardingPolicy, chainEvents, currentHeight, true, @@ -619,6 +669,7 @@ func (p *peer) addLink(chanPoint *wire.OutPoint, SyncStates: syncStates, BatchTicker: ticker.New(50 * time.Millisecond), FwdPkgGCTicker: ticker.New(time.Minute), + PendingCommitTicker: ticker.New(time.Minute), BatchSize: 10, UnsafeReplay: cfg.UnsafeReplay, MinFeeUpdateTimeout: htlcswitch.DefaultMinLinkFeeUpdateTimeout, @@ -627,8 +678,10 @@ func (p *peer) addLink(chanPoint *wire.OutPoint, TowerClient: p.server.towerClient, MaxOutgoingCltvExpiry: cfg.MaxOutgoingCltvExpiry, MaxFeeAllocation: cfg.MaxChannelFeeAllocation, + NotifyActiveLink: p.server.channelNotifier.NotifyActiveLinkEvent, NotifyActiveChannel: p.server.channelNotifier.NotifyActiveChannelEvent, NotifyInactiveChannel: p.server.channelNotifier.NotifyInactiveChannelEvent, + HtlcNotifier: p.server.htlcNotifier, } link := htlcswitch.NewChannelLink(linkCfg, lnChan) @@ -645,6 +698,37 @@ func (p *peer) addLink(chanPoint *wire.OutPoint, return p.server.htlcSwitch.AddLink(link) } +// maybeSendNodeAnn sends our node announcement to the remote peer if at least +// one confirmed advertised channel exists with them. +func (p *peer) maybeSendNodeAnn(channels []*channeldb.OpenChannel) { + hasConfirmedPublicChan := false + for _, channel := range channels { + if channel.IsPending { + continue + } + if channel.ChannelFlags&lnwire.FFAnnounceChannel == 0 { + continue + } + + hasConfirmedPublicChan = true + break + } + if !hasConfirmedPublicChan { + return + } + + ourNodeAnn, err := p.server.genNodeAnnouncement(false) + if err != nil { + srvrLog.Debugf("Unable to retrieve node announcement: %v", err) + return + } + + if err := p.SendMessageLazy(false, &ourNodeAnn); err != nil { + srvrLog.Debugf("Unable to resend node announcement to %x: %v", + p.pubKeyBytes, err) + } +} + // WaitForDisconnect waits until the peer has disconnected. A peer may be // disconnected if the local or remote side terminating the connection, or an // irrecoverable protocol error has been encountered. This method will only @@ -670,7 +754,10 @@ func (p *peer) Disconnect(reason error) { return } - peerLog.Infof("Disconnecting %s, reason: %v", p, reason) + err := fmt.Errorf("disconnecting %s, reason: %v", p, reason) + p.storeError(err) + + peerLog.Infof(err.Error()) // Ensure that the TCP connection is properly closed before continuing. p.conn.Close() @@ -899,6 +986,69 @@ func (ms *msgStream) AddMsg(msg lnwire.Message) { ms.msgCond.Signal() } +// waitUntilLinkActive waits until the target link is active and returns a +// ChannelLink to pass messages to. It accomplishes this by subscribing to +// an ActiveLinkEvent which is emitted by the link when it first starts up. +func waitUntilLinkActive(p *peer, cid lnwire.ChannelID) htlcswitch.ChannelLink { + // Subscribe to receive channel events. + // + // NOTE: If the link is already active by SubscribeChannelEvents, then + // GetLink will retrieve the link and we can send messages. If the link + // becomes active between SubscribeChannelEvents and GetLink, then GetLink + // will retrieve the link. If the link becomes active after GetLink, then + // we will get an ActiveLinkEvent notification and retrieve the link. If + // the call to GetLink is before SubscribeChannelEvents, however, there + // will be a race condition. + sub, err := p.server.channelNotifier.SubscribeChannelEvents() + if err != nil { + // If we have a non-nil error, then the server is shutting down and we + // can exit here and return nil. This means no message will be delivered + // to the link. + return nil + } + defer sub.Cancel() + + // The link may already be active by this point, and we may have missed the + // ActiveLinkEvent. Check if the link exists. + link, _ := p.server.htlcSwitch.GetLink(cid) + if link != nil { + return link + } + + // If the link is nil, we must wait for it to be active. + for { + select { + // A new event has been sent by the ChannelNotifier. We first check + // whether the event is an ActiveLinkEvent. If it is, we'll check + // that the event is for this channel. Otherwise, we discard the + // message. + case e := <-sub.Updates(): + event, ok := e.(channelnotifier.ActiveLinkEvent) + if !ok { + // Ignore this notification. + continue + } + + chanPoint := event.ChannelPoint + + // Check whether the retrieved chanPoint matches the target + // channel id. + if !cid.IsChanPoint(chanPoint) { + continue + } + + // The link shouldn't be nil as we received an + // ActiveLinkEvent. If it is nil, we return nil and the + // calling function should catch it. + link, _ = p.server.htlcSwitch.GetLink(cid) + return link + + case <-p.quit: + return nil + } + } +} + // newChanMsgStream is used to create a msgStream between the peer and // particular channel link in the htlcswitch. We utilize additional // synchronization with the fundingManager to ensure we don't attempt to @@ -914,51 +1064,17 @@ func newChanMsgStream(p *peer, cid lnwire.ChannelID) *msgStream { fmt.Sprintf("Update stream for ChannelID(%x) exiting", cid[:]), 1000, func(msg lnwire.Message) { - _, isChanSyncMsg := msg.(*lnwire.ChannelReestablish) - - // If this is the chanSync message, then we'll deliver - // it immediately to the active link. - if !isChanSyncMsg { - // We'll send a message to the funding manager - // and wait iff an active funding process for - // this channel hasn't yet completed. We do - // this in order to account for the following - // scenario: we send the funding locked message - // to the other side, they immediately send a - // channel update message, but we haven't yet - // sent the channel to the channelManager. - err := p.server.fundingMgr.waitUntilChannelOpen( - cid, p.quit, - ) - if err != nil { - // If we have a non-nil error, then the - // funding manager is shutting down, s - // we can exit here without attempting - // to deliver the message. - return - } - } - - // In order to avoid unnecessarily delivering message - // as the peer is exiting, we'll check quickly to see - // if we need to exit. - select { - case <-p.quit: - return - default: - } - - // Dispatch the commitment update message to the proper - // active goroutine dedicated to this channel. + // This check is fine because if the link no longer exists, it will + // be removed from the activeChannels map and subsequent messages + // shouldn't reach the chan msg stream. if chanLink == nil { - link, err := p.server.htlcSwitch.GetLink(cid) - if err != nil { - peerLog.Errorf("recv'd update for "+ - "unknown channel %v from %v: "+ - "%v", cid, p, err) + chanLink = waitUntilLinkActive(p, cid) + + // If the link is still not active and the calling function + // errored out, just return. + if chanLink == nil { return } - chanLink = link } // In order to avoid unnecessarily delivering message @@ -1028,12 +1144,17 @@ out: peerLog.Infof("unable to read message from %v: %v", p, err) - switch err.(type) { + // If we could not read our peer's message due to an + // unknown type or invalid alias, we continue processing + // as normal. We store unknown message and address + // types, as they may provide debugging insight. + switch e := err.(type) { // If this is just a message we don't yet recognize, // we'll continue processing as normal as this allows // us to introduce new messages in a forwards // compatible manner. case *lnwire.UnknownMessage: + p.storeError(e) idleTimer.Reset(idleTimeout) continue @@ -1042,12 +1163,15 @@ out: // simply continue parsing the remainder of their // messages. case *lnwire.ErrUnknownAddrType: + p.storeError(e) idleTimer.Reset(idleTimeout) continue // If the NodeAnnouncement has an invalid alias, then // we'll log that error above and continue so we can - // continue to read messges from the peer. + // continue to read messages from the peer. We do not + // store this error because it is of little debugging + // value. case *lnwire.ErrInvalidNodeAlias: idleTimer.Reset(idleTimeout) continue @@ -1143,8 +1267,13 @@ out: discStream.AddMsg(msg) default: - peerLog.Errorf("unknown message %v received from peer "+ - "%v", uint16(msg.MsgType()), p) + // If the message we received is unknown to us, store + // the type to track the failure. + err := fmt.Errorf("unknown message type %v received", + uint16(msg.MsgType())) + p.storeError(err) + + peerLog.Errorf("peer: %v, %v", p, err) } if isLinkUpdate { @@ -1183,24 +1312,56 @@ func (p *peer) isActiveChannel(chanID lnwire.ChannelID) bool { return ok } +// storeError stores an error in our peer's buffer of recent errors with the +// current timestamp. Errors are only stored if we have at least one active +// channel with the peer to mitigate dos attack vectors where a peer costlessly +// connects to us and spams us with errors. +func (p *peer) storeError(err error) { + var haveChannels bool + + p.activeChanMtx.RLock() + for _, channel := range p.activeChannels { + // Pending channels will be nil in the activeChannels map. + if channel == nil { + continue + } + + haveChannels = true + break + } + p.activeChanMtx.RUnlock() + + // If we do not have any active channels with the peer, we do not store + // errors as a dos mitigation. + if !haveChannels { + peerLog.Tracef("no channels with peer: %v, not storing err", p) + return + } + + p.errorBuffer.Add( + ×tampedError{timestamp: time.Now(), error: err}, + ) +} + // handleError processes an error message read from the remote peer. The boolean // returns indicates whether the message should be delivered to a targeted peer. +// It stores the error we received from the peer in memory if we have a channel +// open with the peer. // // NOTE: This method should only be called from within the readHandler. func (p *peer) handleError(msg *lnwire.Error) bool { key := p.addr.IdentityKey + // Store the error we have received. + p.storeError(msg) + switch { // In the case of an all-zero channel ID we want to forward the error to // all channels with this peer. case msg.ChanID == lnwire.ConnectionWideID: - for chanID, chanStream := range p.activeMsgStreams { + for _, chanStream := range p.activeMsgStreams { chanStream.AddMsg(msg) - - // Also marked this channel as failed, so we won't try - // to restart it on reconnect with this peer. - p.failedChannels[chanID] = struct{}{} } return false @@ -1212,9 +1373,6 @@ func (p *peer) handleError(msg *lnwire.Error) bool { // If not we hand the error to the channel link for this channel. case p.isActiveChannel(msg.ChanID): - // Mark this channel as failed, so we won't try to restart it on - // reconnect with this peer. - p.failedChannels[msg.ChanID] = struct{}{} return true default: @@ -1329,8 +1487,10 @@ func messageSummary(msg lnwire.Message) string { msg.Complete) case *lnwire.ReplyChannelRange: - return fmt.Sprintf("complete=%v, encoding=%v, num_chans=%v", - msg.Complete, msg.EncodingType, len(msg.ShortChanIDs)) + return fmt.Sprintf("start_height=%v, end_height=%v, "+ + "num_chans=%v, encoding=%v", msg.FirstBlockHeight, + msg.LastBlockHeight(), len(msg.ShortChanIDs), + msg.EncodingType) case *lnwire.QueryShortChanIDs: return fmt.Sprintf("chain_hash=%v, encoding=%v, num_chans=%v", @@ -1338,8 +1498,8 @@ func messageSummary(msg lnwire.Message) string { case *lnwire.QueryChannelRange: return fmt.Sprintf("chain_hash=%v, start_height=%v, "+ - "num_blocks=%v", msg.ChainHash, msg.FirstBlockHeight, - msg.NumBlocks) + "end_height=%v", msg.ChainHash, msg.FirstBlockHeight, + msg.LastBlockHeight()) case *lnwire.GossipTimestampRange: return fmt.Sprintf("chain_hash=%v, first_stamp=%v, "+ @@ -1715,6 +1875,11 @@ func (p *peer) ChannelSnapshots() []*channeldb.ChannelSnapshot { snapshots := make([]*channeldb.ChannelSnapshot, 0, len(p.activeChannels)) for _, activeChan := range p.activeChannels { + // If the activeChan is nil, then we skip it as the channel is pending. + if activeChan == nil { + continue + } + // We'll only return a snapshot for channels that are // *immedately* available for routing payments over. if activeChan.RemoteNextRevocation() == nil { @@ -1767,9 +1932,12 @@ out: chanPoint := &newChan.FundingOutpoint chanID := lnwire.NewChanIDFromOutPoint(chanPoint) - // Make sure this channel is not already active. + // Only update RemoteNextRevocation if the channel is in the + // activeChannels map and if we added the link to the switch. + // Only active channels will be added to the switch. p.activeChanMtx.Lock() - if currentChan, ok := p.activeChannels[chanID]; ok { + currentChan, ok := p.activeChannels[chanID] + if ok && currentChan != nil { peerLog.Infof("Already have ChannelPoint(%v), "+ "ignoring.", chanPoint) @@ -1815,6 +1983,8 @@ out: continue } + // This refreshes the activeChannels entry if the link was not in + // the switch, also populates for new entries. p.activeChannels[chanID] = lnChan p.addedChannels[chanID] = struct{}{} p.activeChanMtx.Unlock() @@ -1856,17 +2026,24 @@ out: fwdMinHtlc := lnChan.FwdMinHtlc() defaultPolicy := p.server.cc.routingPolicy forwardingPolicy := &htlcswitch.ForwardingPolicy{ - MinHTLC: fwdMinHtlc, + MinHTLCOut: fwdMinHtlc, MaxHTLC: newChan.LocalChanCfg.MaxPendingAmount, BaseFee: defaultPolicy.BaseFee, FeeRate: defaultPolicy.FeeRate, TimeLockDelta: defaultPolicy.TimeLockDelta, } + // If we've reached this point, there are two possible scenarios. + // If the channel was in the active channels map as nil, then it + // was loaded from disk and we need to send reestablish. Else, + // it was not loaded from disk and we don't need to send + // reestablish as this is a fresh channel. + shouldReestablish := ok + // Create the link and add it to the switch. err = p.addLink( chanPoint, lnChan, forwardingPolicy, - chainEvents, currentHeight, false, + chainEvents, currentHeight, shouldReestablish, ) if err != nil { err := fmt.Errorf("can't register new channel "+ @@ -1985,6 +2162,11 @@ out: // our active channel back to their default state. p.activeChanMtx.Lock() for _, channel := range p.activeChannels { + // If the channel is nil, continue as it's a pending channel. + if channel == nil { + continue + } + channel.ResetState() } p.activeChanMtx.Unlock() @@ -2004,6 +2186,11 @@ func (p *peer) reenableActiveChannels() { var activePublicChans []wire.OutPoint p.activeChanMtx.RLock() for chanID, lnChan := range p.activeChannels { + // If the lnChan is nil, continue as this is a pending channel. + if lnChan == nil { + continue + } + dbChan := lnChan.State() isPublic := dbChan.ChannelFlags&lnwire.FFAnnounceChannel != 0 if !isPublic || dbChan.IsPending { @@ -2047,7 +2234,10 @@ func (p *peer) fetchActiveChanCloser(chanID lnwire.ChannelID) (*channelCloser, e p.activeChanMtx.RLock() channel, ok := p.activeChannels[chanID] p.activeChanMtx.RUnlock() - if !ok { + + // If the channel isn't in the map or the channel is nil, return + // ErrChannelNotFound as the channel is pending. + if !ok || channel == nil { return nil, ErrChannelNotFound } @@ -2064,13 +2254,18 @@ func (p *peer) fetchActiveChanCloser(chanID lnwire.ChannelID) (*channelCloser, e "channel w/ active htlcs") } - // We'll create a valid closing state machine in order to - // respond to the initiated cooperative channel closure. - deliveryAddr, err := p.genDeliveryScript() - if err != nil { - peerLog.Errorf("unable to gen delivery script: %v", err) - - return nil, fmt.Errorf("close addr unavailable") + // We'll create a valid closing state machine in order to respond to the + // initiated cooperative channel closure. First, we set the delivery + // script that our funds will be paid out to. If an upfront shutdown script + // was set, we will use it. Otherwise, we get a fresh delivery script. + deliveryScript := channel.LocalUpfrontShutdownScript() + if len(deliveryScript) == 0 { + var err error + deliveryScript, err = p.genDeliveryScript() + if err != nil { + peerLog.Errorf("unable to gen delivery script: %v", err) + return nil, fmt.Errorf("close addr unavailable") + } } // In order to begin fee negotiations, we'll first compute our @@ -2095,12 +2290,16 @@ func (p *peer) fetchActiveChanCloser(chanID lnwire.ChannelID) (*channelCloser, e unregisterChannel: p.server.htlcSwitch.RemoveLink, broadcastTx: p.server.cc.wallet.PublishTransaction, disableChannel: p.server.chanStatusMgr.RequestDisable, - quit: p.quit, + disconnect: func() error { + return p.server.DisconnectPeer(p.IdentityKey()) + }, + quit: p.quit, }, - deliveryAddr, + deliveryScript, feePerKw, uint32(startingHeight), nil, + false, ) p.activeChanCloses[chanID] = chanCloser } @@ -2108,6 +2307,37 @@ func (p *peer) fetchActiveChanCloser(chanID lnwire.ChannelID) (*channelCloser, e return chanCloser, nil } +// chooseDeliveryScript takes two optionally set shutdown scripts and returns +// a suitable script to close out to. This may be nil if neither script is +// set. If both scripts are set, this function will error if they do not match. +func chooseDeliveryScript(upfront, + requested lnwire.DeliveryAddress) (lnwire.DeliveryAddress, error) { + + // If no upfront upfront shutdown script was provided, return the user + // requested address (which may be nil). + if len(upfront) == 0 { + return requested, nil + } + + // If an upfront shutdown script was provided, and the user did not request + // a custom shutdown script, return the upfront address. + if len(requested) == 0 { + return upfront, nil + } + + // If both an upfront shutdown script and a custom close script were + // provided, error if the user provided shutdown script does not match + // the upfront shutdown script (because closing out to a different script + // would violate upfront shutdown). + if !bytes.Equal(upfront, requested) { + return nil, errUpfrontShutdownScriptMismatch + } + + // The user requested script matches the upfront shutdown script, so we + // can return it without error. + return upfront, nil +} + // handleLocalCloseReq kicks-off the workflow to execute a cooperative or // forced unilateral closure of the channel initiated by a local subsystem. func (p *peer) handleLocalCloseReq(req *htlcswitch.ChanClose) { @@ -2116,7 +2346,10 @@ func (p *peer) handleLocalCloseReq(req *htlcswitch.ChanClose) { p.activeChanMtx.RLock() channel, ok := p.activeChannels[chanID] p.activeChanMtx.RUnlock() - if !ok { + + // Though this function can't be called for pending channels, we still + // check whether channel is nil for safety. + if !ok || channel == nil { err := fmt.Errorf("unable to close channel, ChannelID(%v) is "+ "unknown", chanID) peerLog.Errorf(err.Error()) @@ -2130,16 +2363,34 @@ func (p *peer) handleLocalCloseReq(req *htlcswitch.ChanClose) { // out this channel on-chain, so we execute the cooperative channel // closure workflow. case htlcswitch.CloseRegular: - // First, we'll fetch a fresh delivery address that we'll use - // to send the funds to in the case of a successful - // negotiation. - deliveryAddr, err := p.genDeliveryScript() + // First, we'll choose a delivery address that we'll use to send the + // funds to in the case of a successful negotiation. + + // An upfront shutdown and user provided script are both optional, + // but must be equal if both set (because we cannot serve a request + // to close out to a script which violates upfront shutdown). Get the + // appropriate address to close out to (which may be nil if neither + // are set) and error if they are both set and do not match. + deliveryScript, err := chooseDeliveryScript( + channel.LocalUpfrontShutdownScript(), req.DeliveryScript, + ) if err != nil { - peerLog.Errorf(err.Error()) + peerLog.Errorf("cannot close channel %v: %v", req.ChanPoint, err) req.Err <- err return } + // If neither an upfront address or a user set address was + // provided, generate a fresh script. + if len(deliveryScript) == 0 { + deliveryScript, err = p.genDeliveryScript() + if err != nil { + peerLog.Errorf(err.Error()) + req.Err <- err + return + } + } + // Next, we'll create a new channel closer state machine to // handle the close negotiation. _, startingHeight, err := p.server.cc.chainIO.GetBestBlock() @@ -2155,12 +2406,16 @@ func (p *peer) handleLocalCloseReq(req *htlcswitch.ChanClose) { unregisterChannel: p.server.htlcSwitch.RemoveLink, broadcastTx: p.server.cc.wallet.PublishTransaction, disableChannel: p.server.chanStatusMgr.RequestDisable, - quit: p.quit, + disconnect: func() error { + return p.server.DisconnectPeer(p.IdentityKey()) + }, + quit: p.quit, }, - deliveryAddr, + deliveryScript, req.TargetFeePerKw, uint32(startingHeight), req, + true, ) p.activeChanCloses[chanID] = chanCloser @@ -2187,13 +2442,7 @@ func (p *peer) handleLocalCloseReq(req *htlcswitch.ChanClose) { // TODO(roasbeef): no longer need with newer beach logic? peerLog.Infof("ChannelPoint(%v) has been breached, wiping "+ "channel", req.ChanPoint) - if err := p.WipeChannel(req.ChanPoint); err != nil { - peerLog.Infof("Unable to wipe channel after detected "+ - "breach: %v", err) - req.Err <- err - return - } - return + p.WipeChannel(req.ChanPoint) } } @@ -2220,11 +2469,7 @@ func (p *peer) handleLinkFailure(failure linkFailureReport) { // link and cancel back any adds in its mailboxes such that we can // safely force close without the link being added again and updates // being applied. - if err := p.WipeChannel(&failure.chanPoint); err != nil { - peerLog.Errorf("Unable to wipe link for chanpoint=%v", - failure.chanPoint) - return - } + p.WipeChannel(&failure.chanPoint) // If the error encountered was severe enough, we'll now force close the // channel to prevent readding it to the switch in the future. @@ -2276,11 +2521,7 @@ func (p *peer) finalizeChanClosure(chanCloser *channelCloser) { // First, we'll clear all indexes related to the channel in question. chanPoint := chanCloser.cfg.channel.ChannelPoint() - if err := p.WipeChannel(chanPoint); err != nil { - if closeReq != nil { - closeReq.Err <- err - } - } + p.WipeChannel(chanPoint) // Next, we'll launch a goroutine which will request to be notified by // the ChainNotifier once the closure transaction obtains a single @@ -2370,7 +2611,7 @@ func waitForChanToClose(bestHeight uint32, notifier chainntnfs.ChainNotifier, // WipeChannel removes the passed channel point from all indexes associated with // the peer, and the switch. -func (p *peer) WipeChannel(chanPoint *wire.OutPoint) error { +func (p *peer) WipeChannel(chanPoint *wire.OutPoint) { chanID := lnwire.NewChanIDFromOutPoint(chanPoint) p.activeChanMtx.Lock() @@ -2380,69 +2621,74 @@ func (p *peer) WipeChannel(chanPoint *wire.OutPoint) error { // Instruct the HtlcSwitch to close this link as the channel is no // longer active. p.server.htlcSwitch.RemoveLink(chanID) - - return nil } // handleInitMsg handles the incoming init message which contains global and // local features vectors. If feature vectors are incompatible then disconnect. func (p *peer) handleInitMsg(msg *lnwire.Init) error { - p.remoteLocalFeatures = lnwire.NewFeatureVector( - msg.LocalFeatures, lnwire.LocalFeatures, - ) - p.remoteGlobalFeatures = lnwire.NewFeatureVector( - msg.GlobalFeatures, lnwire.GlobalFeatures, + // First, merge any features from the legacy global features field into + // those presented in the local features fields. + err := msg.Features.Merge(msg.GlobalFeatures) + if err != nil { + return fmt.Errorf("unable to merge legacy global features: %v", + err) + } + + // Then, finalize the remote feature vector providing the flatteneed + // feature bit namespace. + p.remoteFeatures = lnwire.NewFeatureVector( + msg.Features, lnwire.Features, ) // Now that we have their features loaded, we'll ensure that they // didn't set any required bits that we don't know of. - unknownLocalFeatures := p.remoteLocalFeatures.UnknownRequiredFeatures() - if len(unknownLocalFeatures) > 0 { - err := fmt.Errorf("Peer set unknown local feature bits: %v", - unknownLocalFeatures) - return err + err = feature.ValidateRequired(p.remoteFeatures) + if err != nil { + return fmt.Errorf("invalid remote features: %v", err) } - unknownGlobalFeatures := p.remoteGlobalFeatures.UnknownRequiredFeatures() - if len(unknownGlobalFeatures) > 0 { - err := fmt.Errorf("Peer set unknown global feature bits: %v", - unknownGlobalFeatures) - return err + + // Ensure the remote party's feature vector contains all transistive + // dependencies. We know ours are are correct since they are validated + // during the feature manager's instantiation. + err = feature.ValidateDeps(p.remoteFeatures) + if err != nil { + return fmt.Errorf("invalid remote features: %v", err) } // Now that we know we understand their requirements, we'll check to // see if they don't support anything that we deem to be mandatory. switch { - case !p.remoteLocalFeatures.HasFeature(lnwire.DataLossProtectRequired): + case !p.remoteFeatures.HasFeature(lnwire.DataLossProtectRequired): return fmt.Errorf("data loss protection required") } return nil } -// LocalGlobalFeatures returns the set of global features that has been -// advertised by the local node. This allows sub-systems that use this -// interface to gate their behavior off the set of negotiated feature bits. +// LocalFeatures returns the set of global features that has been advertised by +// the local node. This allows sub-systems that use this interface to gate their +// behavior off the set of negotiated feature bits. // // NOTE: Part of the lnpeer.Peer interface. -func (p *peer) LocalGlobalFeatures() *lnwire.FeatureVector { - return p.server.globalFeatures +func (p *peer) LocalFeatures() *lnwire.FeatureVector { + return p.features } -// RemoteGlobalFeatures returns the set of global features that has been -// advertised by the remote node. This allows sub-systems that use this -// interface to gate their behavior off the set of negotiated feature bits. +// RemoteFeatures returns the set of global features that has been advertised by +// the remote node. This allows sub-systems that use this interface to gate +// their behavior off the set of negotiated feature bits. // // NOTE: Part of the lnpeer.Peer interface. -func (p *peer) RemoteGlobalFeatures() *lnwire.FeatureVector { - return p.remoteGlobalFeatures +func (p *peer) RemoteFeatures() *lnwire.FeatureVector { + return p.remoteFeatures } // sendInitMsg sends init message to remote peer which contains our currently // supported local and global features. func (p *peer) sendInitMsg() error { msg := lnwire.NewInitMessage( - p.server.globalFeatures.RawFeatureVector, - p.localFeatures, + p.legacyFeatures.RawFeatureVector, + p.features.RawFeatureVector, ) return p.writeMessage(msg) @@ -2451,6 +2697,12 @@ func (p *peer) sendInitMsg() error { // resendChanSyncMsg will attempt to find a channel sync message for the closed // channel and resend it to our peer. func (p *peer) resendChanSyncMsg(cid lnwire.ChannelID) error { + // If we already re-sent the mssage for this channel, we won't do it + // again. + if _, ok := p.resentChanSyncMsg[cid]; ok { + return nil + } + // Check if we have any channel sync messages stored for this channel. c, err := p.server.chanDB.FetchClosedChannelForID(cid) if err != nil { @@ -2479,6 +2731,10 @@ func (p *peer) resendChanSyncMsg(cid lnwire.ChannelID) error { peerLog.Debugf("Re-sent channel sync message for channel %v to peer "+ "%v", cid, p) + // Note down that we sent the message, so we won't resend it again for + // this connection. + p.resentChanSyncMsg[cid] = struct{}{} + return nil } diff --git a/peer_test.go b/peer_test.go index 9db7e7f36a..4669168be0 100644 --- a/peer_test.go +++ b/peer_test.go @@ -3,17 +3,32 @@ package lnd import ( + "bytes" "testing" "time" + "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcutil" "github.com/lightningnetwork/lnd/chainntnfs" + "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/htlcswitch" - "github.com/lightningnetwork/lnd/lnwallet" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" "github.com/lightningnetwork/lnd/lnwire" ) +var ( + // p2SHAddress is a valid pay to script hash address. + p2SHAddress = "2NBFNJTktNa7GZusGbDbGKRZTxdK9VVez3n" + + // p2wshAddress is a valid pay to witness script hash address. + p2wshAddress = "bc1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3qccfmv3" + + // timeout is a timeout value to use for tests which need ot wait for + // a return value on a channel. + timeout = time.Second * 5 +) + // TestPeerChannelClosureAcceptFeeResponder tests the shutdown responder's // behavior if we can agree on the fee immediately. func TestPeerChannelClosureAcceptFeeResponder(t *testing.T) { @@ -25,7 +40,8 @@ func TestPeerChannelClosureAcceptFeeResponder(t *testing.T) { broadcastTxChan := make(chan *wire.MsgTx) responder, responderChan, initiatorChan, cleanUp, err := createTestPeer( - notifier, broadcastTxChan) + notifier, broadcastTxChan, noUpdate, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -45,7 +61,7 @@ func TestPeerChannelClosureAcceptFeeResponder(t *testing.T) { select { case outMsg := <-responder.outgoingQueue: msg = outMsg.msg - case <-time.After(time.Second * 5): + case <-time.After(timeout): t.Fatalf("did not receive shutdown message") } @@ -61,7 +77,7 @@ func TestPeerChannelClosureAcceptFeeResponder(t *testing.T) { select { case outMsg := <-responder.outgoingQueue: msg = outMsg.msg - case <-time.After(time.Second * 5): + case <-time.After(timeout): t.Fatalf("did not receive ClosingSigned message") } @@ -80,7 +96,7 @@ func TestPeerChannelClosureAcceptFeeResponder(t *testing.T) { t.Fatalf("error creating close proposal: %v", err) } - parsedSig, err := lnwire.NewSigFromRawSignature(initiatorSig) + parsedSig, err := lnwire.NewSigFromSignature(initiatorSig) if err != nil { t.Fatalf("error parsing signature: %v", err) } @@ -94,7 +110,7 @@ func TestPeerChannelClosureAcceptFeeResponder(t *testing.T) { // the closing transaction. select { case <-broadcastTxChan: - case <-time.After(time.Second * 5): + case <-time.After(timeout): t.Fatalf("closing tx not broadcast") } @@ -113,7 +129,8 @@ func TestPeerChannelClosureAcceptFeeInitiator(t *testing.T) { broadcastTxChan := make(chan *wire.MsgTx) initiator, initiatorChan, responderChan, cleanUp, err := createTestPeer( - notifier, broadcastTxChan) + notifier, broadcastTxChan, noUpdate, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -136,7 +153,7 @@ func TestPeerChannelClosureAcceptFeeInitiator(t *testing.T) { select { case outMsg := <-initiator.outgoingQueue: msg = outMsg.msg - case <-time.After(time.Second * 5): + case <-time.After(timeout): t.Fatalf("did not receive shutdown request") } @@ -156,7 +173,7 @@ func TestPeerChannelClosureAcceptFeeInitiator(t *testing.T) { dummyDeliveryScript), } - estimator := lnwallet.NewStaticFeeEstimator(12500, 0) + estimator := chainfee.NewStaticEstimator(12500, 0) feePerKw, err := estimator.EstimateFeePerKW(1) if err != nil { t.Fatalf("unable to query fee estimator: %v", err) @@ -167,7 +184,7 @@ func TestPeerChannelClosureAcceptFeeInitiator(t *testing.T) { if err != nil { t.Fatalf("unable to create close proposal: %v", err) } - parsedSig, err := lnwire.NewSigFromRawSignature(closeSig) + parsedSig, err := lnwire.NewSigFromSignature(closeSig) if err != nil { t.Fatalf("unable to parse signature: %v", err) } @@ -184,7 +201,7 @@ func TestPeerChannelClosureAcceptFeeInitiator(t *testing.T) { select { case outMsg := <-initiator.outgoingQueue: msg = outMsg.msg - case <-time.After(time.Second * 5): + case <-time.After(timeout): t.Fatalf("did not receive closing signed message") } @@ -202,7 +219,7 @@ func TestPeerChannelClosureAcceptFeeInitiator(t *testing.T) { // the closing transaction. select { case <-broadcastTxChan: - case <-time.After(time.Second * 5): + case <-time.After(timeout): t.Fatalf("closing tx not broadcast") } @@ -222,7 +239,7 @@ func TestPeerChannelClosureFeeNegotiationsResponder(t *testing.T) { broadcastTxChan := make(chan *wire.MsgTx) responder, responderChan, initiatorChan, cleanUp, err := createTestPeer( - notifier, broadcastTxChan, + notifier, broadcastTxChan, noUpdate, ) if err != nil { t.Fatalf("unable to create test channels: %v", err) @@ -244,7 +261,7 @@ func TestPeerChannelClosureFeeNegotiationsResponder(t *testing.T) { select { case outMsg := <-responder.outgoingQueue: msg = outMsg.msg - case <-time.After(time.Second * 5): + case <-time.After(timeout): t.Fatalf("did not receive shutdown message") } @@ -260,7 +277,7 @@ func TestPeerChannelClosureFeeNegotiationsResponder(t *testing.T) { select { case outMsg := <-responder.outgoingQueue: msg = outMsg.msg - case <-time.After(time.Second * 5): + case <-time.After(timeout): t.Fatalf("did not receive closing signed message") } @@ -279,7 +296,7 @@ func TestPeerChannelClosureFeeNegotiationsResponder(t *testing.T) { t.Fatalf("error creating close proposal: %v", err) } - parsedSig, err := lnwire.NewSigFromRawSignature(initiatorSig) + parsedSig, err := lnwire.NewSigFromSignature(initiatorSig) if err != nil { t.Fatalf("error parsing signature: %v", err) } @@ -296,7 +313,7 @@ func TestPeerChannelClosureFeeNegotiationsResponder(t *testing.T) { select { case outMsg := <-responder.outgoingQueue: msg = outMsg.msg - case <-time.After(time.Second * 5): + case <-time.After(timeout): t.Fatalf("did not receive closing signed message") } @@ -323,7 +340,7 @@ func TestPeerChannelClosureFeeNegotiationsResponder(t *testing.T) { t.Fatalf("error creating close proposal: %v", err) } - parsedSig, err = lnwire.NewSigFromRawSignature(initiatorSig) + parsedSig, err = lnwire.NewSigFromSignature(initiatorSig) if err != nil { t.Fatalf("error parsing signature: %v", err) } @@ -338,7 +355,7 @@ func TestPeerChannelClosureFeeNegotiationsResponder(t *testing.T) { select { case outMsg := <-responder.outgoingQueue: msg = outMsg.msg - case <-time.After(time.Second * 5): + case <-time.After(timeout): t.Fatalf("did not receive closing signed message") } @@ -368,7 +385,7 @@ func TestPeerChannelClosureFeeNegotiationsResponder(t *testing.T) { t.Fatalf("error creating close proposal: %v", err) } - parsedSig, err = lnwire.NewSigFromRawSignature(initiatorSig) + parsedSig, err = lnwire.NewSigFromSignature(initiatorSig) if err != nil { t.Fatalf("error parsing signature: %v", err) } @@ -382,7 +399,7 @@ func TestPeerChannelClosureFeeNegotiationsResponder(t *testing.T) { // the closing transaction. select { case <-broadcastTxChan: - case <-time.After(time.Second * 5): + case <-time.After(timeout): t.Fatalf("closing tx not broadcast") } @@ -402,7 +419,8 @@ func TestPeerChannelClosureFeeNegotiationsInitiator(t *testing.T) { broadcastTxChan := make(chan *wire.MsgTx) initiator, initiatorChan, responderChan, cleanUp, err := createTestPeer( - notifier, broadcastTxChan) + notifier, broadcastTxChan, noUpdate, + ) if err != nil { t.Fatalf("unable to create test channels: %v", err) } @@ -426,7 +444,7 @@ func TestPeerChannelClosureFeeNegotiationsInitiator(t *testing.T) { select { case outMsg := <-initiator.outgoingQueue: msg = outMsg.msg - case <-time.After(time.Second * 5): + case <-time.After(timeout): t.Fatalf("did not receive shutdown request") } @@ -446,7 +464,7 @@ func TestPeerChannelClosureFeeNegotiationsInitiator(t *testing.T) { msg: respShutdown, } - estimator := lnwallet.NewStaticFeeEstimator(12500, 0) + estimator := chainfee.NewStaticEstimator(12500, 0) initiatorIdealFeeRate, err := estimator.EstimateFeePerKW(1) if err != nil { t.Fatalf("unable to query fee estimator: %v", err) @@ -459,7 +477,7 @@ func TestPeerChannelClosureFeeNegotiationsInitiator(t *testing.T) { if err != nil { t.Fatalf("unable to create close proposal: %v", err) } - parsedSig, err := lnwire.NewSigFromRawSignature(closeSig) + parsedSig, err := lnwire.NewSigFromSignature(closeSig) if err != nil { t.Fatalf("unable to parse signature: %v", err) } @@ -477,7 +495,7 @@ func TestPeerChannelClosureFeeNegotiationsInitiator(t *testing.T) { select { case outMsg := <-initiator.outgoingQueue: msg = outMsg.msg - case <-time.After(time.Second * 5): + case <-time.After(timeout): t.Fatalf("did not receive closing signed") } closingSignedMsg, ok := msg.(*lnwire.ClosingSigned) @@ -495,7 +513,7 @@ func TestPeerChannelClosureFeeNegotiationsInitiator(t *testing.T) { select { case outMsg := <-initiator.outgoingQueue: msg = outMsg.msg - case <-time.After(time.Second * 5): + case <-time.After(timeout): t.Fatalf("did not receive closing signed") } closingSignedMsg, ok = msg.(*lnwire.ClosingSigned) @@ -525,7 +543,7 @@ func TestPeerChannelClosureFeeNegotiationsInitiator(t *testing.T) { t.Fatalf("error creating close proposal: %v", err) } - parsedSig, err = lnwire.NewSigFromRawSignature(responderSig) + parsedSig, err = lnwire.NewSigFromSignature(responderSig) if err != nil { t.Fatalf("error parsing signature: %v", err) } @@ -541,7 +559,7 @@ func TestPeerChannelClosureFeeNegotiationsInitiator(t *testing.T) { select { case outMsg := <-initiator.outgoingQueue: msg = outMsg.msg - case <-time.After(time.Second * 5): + case <-time.After(timeout): t.Fatalf("did not receive closing signed") } @@ -571,7 +589,7 @@ func TestPeerChannelClosureFeeNegotiationsInitiator(t *testing.T) { t.Fatalf("error creating close proposal: %v", err) } - parsedSig, err = lnwire.NewSigFromRawSignature(responderSig) + parsedSig, err = lnwire.NewSigFromSignature(responderSig) if err != nil { t.Fatalf("error parsing signature: %v", err) } @@ -584,7 +602,233 @@ func TestPeerChannelClosureFeeNegotiationsInitiator(t *testing.T) { // Wait for closing tx to be broadcasted. select { case <-broadcastTxChan: - case <-time.After(time.Second * 5): + case <-time.After(timeout): t.Fatalf("closing tx not broadcast") } } + +// TestChooseDeliveryScript tests that chooseDeliveryScript correctly errors +// when upfront and user set scripts that do not match are provided, allows +// matching values and returns appropriate values in the case where one or none +// are set. +func TestChooseDeliveryScript(t *testing.T) { + // generate non-zero scripts for testing. + script1 := genScript(t, p2SHAddress) + script2 := genScript(t, p2wshAddress) + + tests := []struct { + name string + userScript lnwire.DeliveryAddress + shutdownScript lnwire.DeliveryAddress + expectedScript lnwire.DeliveryAddress + expectedError error + }{ + { + name: "Neither set", + userScript: nil, + shutdownScript: nil, + expectedScript: nil, + expectedError: nil, + }, + { + name: "Both set and equal", + userScript: script1, + shutdownScript: script1, + expectedScript: script1, + expectedError: nil, + }, + { + name: "Both set and not equal", + userScript: script1, + shutdownScript: script2, + expectedScript: nil, + expectedError: errUpfrontShutdownScriptMismatch, + }, + { + name: "Only upfront script", + userScript: nil, + shutdownScript: script1, + expectedScript: script1, + expectedError: nil, + }, + { + name: "Only user script", + userScript: script2, + shutdownScript: nil, + expectedScript: script2, + expectedError: nil, + }, + } + + for _, test := range tests { + test := test + + t.Run(test.name, func(t *testing.T) { + script, err := chooseDeliveryScript( + test.shutdownScript, test.userScript, + ) + if err != test.expectedError { + t.Fatalf("Expected: %v, got: %v", test.expectedError, err) + } + + if !bytes.Equal(script, test.expectedScript) { + t.Fatalf("Expected: %x, got: %x", test.expectedScript, script) + } + }) + } +} + +// TestCustomShutdownScript tests that the delivery script of a shutdown +// message can be set to a specified address. It checks that setting a close +// script fails for channels which have an upfront shutdown script already set. +func TestCustomShutdownScript(t *testing.T) { + script := genScript(t, p2SHAddress) + + // setShutdown is a function which sets the upfront shutdown address for + // the local channel. + setShutdown := func(a, b *channeldb.OpenChannel) { + a.LocalShutdownScript = script + b.RemoteShutdownScript = script + } + + tests := []struct { + name string + + // update is a function used to set values on the channel set up for the + // test. It is used to set values for upfront shutdown addresses. + update func(a, b *channeldb.OpenChannel) + + // userCloseScript is the address specified by the user. + userCloseScript lnwire.DeliveryAddress + + // expectedScript is the address we expect to be set on the shutdown + // message. + expectedScript lnwire.DeliveryAddress + + // expectedError is the error we expect, if any. + expectedError error + }{ + { + name: "User set script", + update: noUpdate, + userCloseScript: script, + expectedScript: script, + }, + { + name: "No user set script", + update: noUpdate, + }, + { + name: "Shutdown set, no user script", + update: setShutdown, + expectedScript: script, + }, + { + name: "Shutdown set, user script matches", + update: setShutdown, + userCloseScript: script, + expectedScript: script, + }, + { + name: "Shutdown set, user script different", + update: setShutdown, + userCloseScript: []byte("different addr"), + expectedError: errUpfrontShutdownScriptMismatch, + }, + } + + for _, test := range tests { + test := test + + t.Run(test.name, func(t *testing.T) { + notifier := &mockNotfier{ + confChannel: make(chan *chainntnfs.TxConfirmation), + } + broadcastTxChan := make(chan *wire.MsgTx) + + // Open a channel. + initiator, initiatorChan, _, cleanUp, err := createTestPeer( + notifier, broadcastTxChan, test.update, + ) + if err != nil { + t.Fatalf("unable to create test channels: %v", err) + } + defer cleanUp() + + // Request initiator to cooperatively close the channel, with + // a specified delivery address. + updateChan := make(chan interface{}, 1) + errChan := make(chan error, 1) + chanPoint := initiatorChan.ChannelPoint() + closeCommand := htlcswitch.ChanClose{ + CloseType: htlcswitch.CloseRegular, + ChanPoint: chanPoint, + Updates: updateChan, + TargetFeePerKw: 12500, + DeliveryScript: test.userCloseScript, + Err: errChan, + } + + // Send the close command for the correct channel and check that a + // shutdown message is sent. + initiator.localCloseChanReqs <- &closeCommand + + var msg lnwire.Message + select { + case outMsg := <-initiator.outgoingQueue: + msg = outMsg.msg + case <-time.After(timeout): + t.Fatalf("did not receive shutdown message") + case err := <-errChan: + // Fail if we do not expect an error. + if err != test.expectedError { + t.Fatalf("error closing channel: %v", err) + } + + // Terminate the test early if have received an error, no + // further action is expected. + return + } + + // Check that we have received a shutdown message. + shutdownMsg, ok := msg.(*lnwire.Shutdown) + if !ok { + t.Fatalf("expected shutdown message, got %T", msg) + } + + // If the test has not specified an expected address, do not check + // whether the shutdown address matches. This covers the case where + // we epect shutdown to a random address and cannot match it. + if len(test.expectedScript) == 0 { + return + } + + // Check that the Shutdown message includes the expected delivery + // script. + if !bytes.Equal(test.expectedScript, shutdownMsg.Address) { + t.Fatalf("expected delivery script: %x, got: %x", + test.expectedScript, shutdownMsg.Address) + } + }) + } +} + +// genScript creates a script paying out to the address provided, which must +// be a valid address. +func genScript(t *testing.T, address string) lnwire.DeliveryAddress { + // Generate an address which can be used for testing. + deliveryAddr, err := btcutil.DecodeAddress( + address, + activeNetParams.Params, + ) + if err != nil { + t.Fatalf("invalid delivery address: %v", err) + } + + script, err := txscript.PayToAddrScript(deliveryAddr) + if err != nil { + t.Fatalf("cannot create script: %v", err) + } + + return script +} diff --git a/pilot.go b/pilot.go index b783c5b65b..923c7865a9 100644 --- a/pilot.go +++ b/pilot.go @@ -31,7 +31,7 @@ func validateAtplCfg(cfg *autoPilotConfig) ([]*autopilot.WeightedHeuristic, for _, a := range autopilot.AvailableHeuristics { heuristicsStr += fmt.Sprintf(" '%v' ", a.Name()) } - availStr := fmt.Sprintf("Avaiblable heuristcs are: [%v]", heuristicsStr) + availStr := fmt.Sprintf("Available heuristics are: [%v]", heuristicsStr) // We'll go through the config and make sure all the heuristics exists, // and that the sum of their weights is 1.0. @@ -70,10 +70,11 @@ func validateAtplCfg(cfg *autoPilotConfig) ([]*autopilot.WeightedHeuristic, // chanController is an implementation of the autopilot.ChannelController // interface that's backed by a running lnd instance. type chanController struct { - server *server - private bool - minConfs int32 - confTarget uint32 + server *server + private bool + minConfs int32 + confTarget uint32 + chanMinHtlcIn lnwire.MilliSatoshi } // OpenChannel opens a channel to a target peer, with a capacity of the @@ -91,9 +92,6 @@ func (c *chanController) OpenChannel(target *btcec.PublicKey, return err } - // TODO(halseth): make configurable? - minHtlc := lnwire.NewMSatFromSatoshis(1) - // Construct the open channel request and send it to the server to begin // the funding workflow. req := &openChanReq{ @@ -102,7 +100,7 @@ func (c *chanController) OpenChannel(target *btcec.PublicKey, subtractFees: true, localFundingAmt: amt, pushAmt: 0, - minHtlc: minHtlc, + minHtlcIn: c.chanMinHtlcIn, fundingFeePerKw: feePerKw, private: c.private, remoteCsvDelay: 0, @@ -136,14 +134,17 @@ func (c *chanController) SpliceOut(chanPoint *wire.OutPoint, // autopilot.ChannelController interface. var _ autopilot.ChannelController = (*chanController)(nil) -// initAutoPilot initializes a new autopilot.ManagerCfg to manage an -// autopilot.Agent instance based on the passed configuration struct. The agent -// and all interfaces needed to drive it won't be launched before the Manager's +// initAutoPilot initializes a new autopilot.ManagerCfg to manage an autopilot. +// Agent instance based on the passed configuration structs. The agent and all +// interfaces needed to drive it won't be launched before the Manager's // StartAgent method is called. -func initAutoPilot(svr *server, cfg *autoPilotConfig) (*autopilot.ManagerCfg, error) { - atplLog.Infof("Instantiating autopilot with max_channels=%d, allocation=%f, "+ - "min_chan_size=%d, max_chan_size=%d, private=%t, min_confs=%d, "+ - "conf_target=%d", cfg.MaxChannels, cfg.Allocation, cfg.MinChannelSize, +func initAutoPilot(svr *server, cfg *autoPilotConfig, chainCfg *chainConfig) ( + *autopilot.ManagerCfg, error) { + + atplLog.Infof("Instantiating autopilot with active=%v, "+ + "max_channels=%d, allocation=%f, min_chan_size=%d, "+ + "max_chan_size=%d, private=%t, min_confs=%d, conf_target=%d", + cfg.Active, cfg.MaxChannels, cfg.Allocation, cfg.MinChannelSize, cfg.MaxChannelSize, cfg.Private, cfg.MinConfs, cfg.ConfTarget) // Set up the constraints the autopilot heuristics must adhere to. @@ -173,10 +174,11 @@ func initAutoPilot(svr *server, cfg *autoPilotConfig) (*autopilot.ManagerCfg, er Self: self, Heuristic: weightedAttachment, ChanController: &chanController{ - server: svr, - private: cfg.Private, - minConfs: cfg.MinConfs, - confTarget: cfg.ConfTarget, + server: svr, + private: cfg.Private, + minConfs: cfg.MinConfs, + confTarget: cfg.ConfTarget, + chanMinHtlcIn: chainCfg.MinHTLCIn, }, WalletBalance: func() (btcutil.Amount, error) { return svr.cc.wallet.ConfirmedBalance(cfg.MinConfs) diff --git a/queue/circular_buf.go b/queue/circular_buf.go new file mode 100644 index 0000000000..5e319f004c --- /dev/null +++ b/queue/circular_buf.go @@ -0,0 +1,116 @@ +package queue + +import ( + "errors" +) + +// errInvalidSize is returned when an invalid size for a buffer is provided. +var errInvalidSize = errors.New("buffer size must be > 0") + +// CircularBuffer is a buffer which retains a set of values in memory, and +// overwrites the oldest item in the buffer when a new item needs to be +// written. +type CircularBuffer struct { + // total is the total number of items that have been added to the + // buffer. + total int + + // items is the set of buffered items. + items []interface{} +} + +// NewCircularBuffer returns a new circular buffer with the size provided. It +// will fail if a zero or negative size parameter is provided. +func NewCircularBuffer(size int) (*CircularBuffer, error) { + if size <= 0 { + return nil, errInvalidSize + } + + return &CircularBuffer{ + total: 0, + + // Create a slice with length and capacity equal to the size of + // the buffer so that we do not need to resize the underlying + // array when we add items. + items: make([]interface{}, size), + }, nil +} + +// index returns the index that should be written to next. +func (c *CircularBuffer) index() int { + return c.total % len(c.items) +} + +// Add adds an item to the buffer, overwriting the oldest item if the buffer +// is full. +func (c *CircularBuffer) Add(item interface{}) { + // Set the item in the next free index in the items array. + c.items[c.index()] = item + + // Increment the total number of items that we have stored. + c.total++ +} + +// List returns a copy of the items in the buffer ordered from the oldest to +// newest item. +func (c *CircularBuffer) List() []interface{} { + size := cap(c.items) + index := c.index() + + switch { + // If no items have been stored yet, we can just return a nil list. + case c.total == 0: + return nil + + // If we have added fewer items than the buffer size, we can simply + // return the total number of items from the beginning of the list + // to the index. This special case is added because the oldest item + // is at the beginning of the underlying array, not at the index when + // we have not filled the array yet. + case c.total < size: + resp := make([]interface{}, c.total) + copy(resp, c.items[:c.index()]) + return resp + } + + resp := make([]interface{}, size) + + // Get the items in the underlying array from index to end, the first + // item in this slice will be the oldest item in the list. + firstHalf := c.items[index:] + + // Copy the first set into our response slice from index 0, so that + // the response returned is from oldest to newest. + copy(resp, firstHalf) + + // Get the items in the underlying array from beginning until the write + // index, the last item in this slice will be the newest item in the + // list. + secondHalf := c.items[:index] + + // Copy the second set of items into the response slice offset by the + // length of the first set of items so that we return a response which + // is ordered from oldest to newest entry. + copy(resp[len(firstHalf):], secondHalf) + + return resp +} + +// Total returns the total number of items that have been added to the buffer. +func (c *CircularBuffer) Total() int { + return c.total +} + +// Latest returns the item that was most recently added to the buffer. +func (c *CircularBuffer) Latest() interface{} { + // If no items have been added yet, return nil. + if c.total == 0 { + return nil + } + + // The latest item is one before our total, mod by length. + latest := (c.total - 1) % len(c.items) + + // Return the latest item added. + return c.items[latest] +} diff --git a/queue/circular_buf_test.go b/queue/circular_buf_test.go new file mode 100644 index 0000000000..4f2c029c90 --- /dev/null +++ b/queue/circular_buf_test.go @@ -0,0 +1,198 @@ +package queue + +import ( + "reflect" + "testing" +) + +// TestNewCircularBuffer tests the size parameter check when creating a circular +// buffer. +func TestNewCircularBuffer(t *testing.T) { + tests := []struct { + name string + size int + expectedError error + }{ + { + name: "zero size", + size: 0, + expectedError: errInvalidSize, + }, + { + name: "negative size", + size: -1, + expectedError: errInvalidSize, + }, + { + name: "ok size", + size: 1, + expectedError: nil, + }, + } + + for _, test := range tests { + test := test + + t.Run(test.name, func(t *testing.T) { + _, err := NewCircularBuffer(test.size) + if err != test.expectedError { + t.Fatalf("expected: %v, got: %v", + test.expectedError, err) + } + }) + } +} + +// TestCircularBuffer tests the adding and listing of items in a circular +// buffer. +func TestCircularBuffer(t *testing.T) { + tests := []struct { + name string + size int + itemCount int + expectedItems []interface{} + }{ + { + name: "no elements", + size: 5, + itemCount: 0, + expectedItems: nil, + }, + { + name: "single element", + size: 5, + itemCount: 1, + expectedItems: []interface{}{ + 0, + }, + }, + { + name: "no wrap, not full", + size: 5, + itemCount: 4, + expectedItems: []interface{}{ + 0, 1, 2, 3, + }, + }, + { + name: "no wrap, exactly full", + size: 5, + itemCount: 5, + expectedItems: []interface{}{ + 0, 1, 2, 3, 4, + }, + }, + { + // The underlying array should contain {5, 1, 2, 3, 4}. + name: "wrap, one over", + size: 5, + itemCount: 6, + expectedItems: []interface{}{ + 1, 2, 3, 4, 5, + }, + }, + { + // The underlying array should contain {5, 6, 2, 3, 4}. + name: "wrap, two over", + size: 5, + itemCount: 7, + expectedItems: []interface{}{ + 2, 3, 4, 5, 6, + }, + }, + } + + for _, test := range tests { + test := test + + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + buffer, err := NewCircularBuffer(test.size) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + for i := 0; i < test.itemCount; i++ { + buffer.Add(i) + } + + // List the items in the buffer and check that the list + // is as expected. + list := buffer.List() + if !reflect.DeepEqual(test.expectedItems, list) { + t.Fatalf("expected %v, got: %v", + test.expectedItems, list) + } + }) + } +} + +// TestLatest tests fetching of the last item added to a circular buffer. +func TestLatest(t *testing.T) { + tests := []struct { + name string + size int + + // items is the number of items to add to the buffer. + items int + + // expectedItem is the value we expect from Latest(). + expectedItem interface{} + }{ + { + name: "no items", + size: 3, + items: 0, + expectedItem: nil, + }, + { + name: "one item", + size: 3, + items: 1, + expectedItem: 0, + }, + { + name: "exactly full", + size: 3, + items: 3, + expectedItem: 2, + }, + { + name: "overflow to index 0", + size: 3, + items: 4, + expectedItem: 3, + }, + { + name: "overflow twice to index 0", + size: 3, + items: 7, + expectedItem: 6, + }, + } + + for _, test := range tests { + test := test + + t.Run(test.name, func(t *testing.T) { + //t.Parallel() + + buffer, err := NewCircularBuffer(test.size) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + for i := 0; i < test.items; i++ { + buffer.Add(i) + } + + latest := buffer.Latest() + + if !reflect.DeepEqual(latest, test.expectedItem) { + t.Fatalf("expected: %v, got: %v", + test.expectedItem, latest) + } + }) + } +} diff --git a/queue/priority_queue.go b/queue/priority_queue.go new file mode 100644 index 0000000000..06485e5316 --- /dev/null +++ b/queue/priority_queue.go @@ -0,0 +1,76 @@ +package queue + +import ( + "container/heap" +) + +// PriorityQueueItem is an interface that represents items in a PriorityQueue. +// Users of PriorityQueue will need to define a Less function such that +// PriorityQueue will be able to use that to build and restore an underlying +// heap. +type PriorityQueueItem interface { + // Less must return true if this item is ordered before other and false + // otherwise. + Less(other PriorityQueueItem) bool +} + +type priorityQueue []PriorityQueueItem + +// Len returns the length of the priorityQueue. +func (pq priorityQueue) Len() int { return len(pq) } + +// Less is used to order PriorityQueueItem items in the queue. +func (pq priorityQueue) Less(i, j int) bool { + return pq[i].Less(pq[j]) +} + +// Swap swaps two items in the priorityQueue. Swap is used by heap.Interface. +func (pq priorityQueue) Swap(i, j int) { + pq[i], pq[j] = pq[j], pq[i] +} + +// Push adds a new item the the priorityQueue. +func (pq *priorityQueue) Push(x interface{}) { + item := x.(PriorityQueueItem) + *pq = append(*pq, item) +} + +// Pop removes the top item from the priorityQueue. +func (pq *priorityQueue) Pop() interface{} { + old := *pq + n := len(old) + item := old[n-1] + old[n-1] = nil + *pq = old[0 : n-1] + return item +} + +// PriorityQueue wraps a standard heap into a self contained class. +type PriorityQueue struct { + queue priorityQueue +} + +// Len returns the length of the queue. +func (pq *PriorityQueue) Len() int { + return len(pq.queue) +} + +// Empty returns true if the queue is empty. +func (pq *PriorityQueue) Empty() bool { + return len(pq.queue) == 0 +} + +// Push adds an item to the priority queue. +func (pq *PriorityQueue) Push(item PriorityQueueItem) { + heap.Push(&pq.queue, item) +} + +// Pop removes the top most item from the queue. +func (pq *PriorityQueue) Pop() PriorityQueueItem { + return heap.Pop(&pq.queue).(PriorityQueueItem) +} + +// Top returns the top most item from the queue without removing it. +func (pq *PriorityQueue) Top() PriorityQueueItem { + return pq.queue[0] +} diff --git a/queue/priority_queue_test.go b/queue/priority_queue_test.go new file mode 100644 index 0000000000..be1209e066 --- /dev/null +++ b/queue/priority_queue_test.go @@ -0,0 +1,67 @@ +package queue + +import ( + "math/rand" + "testing" + "time" +) + +type testQueueItem struct { + Value int + Expiry time.Time +} + +func (e testQueueItem) Less(other PriorityQueueItem) bool { + return e.Expiry.Before(other.(*testQueueItem).Expiry) +} + +func TestExpiryQueue(t *testing.T) { + // The number of elements we push to the queue. + count := 100 + // Generate a random permutation of a range [0, count) + array := rand.Perm(count) + // t0 holds a reference time point. + t0 := time.Date(1975, time.April, 5, 12, 0, 0, 0, time.UTC) + + var testQueue PriorityQueue + + if testQueue.Len() != 0 && !testQueue.Empty() { + t.Fatal("Expected the queue to be empty") + } + + // Create elements with expiry of t0 + value * second. + for _, value := range array { + testQueue.Push(&testQueueItem{ + Value: value, + Expiry: t0.Add(time.Duration(value) * time.Second), + }) + } + + // Now expect that we can retrieve elements in order of their expiry. + for i := 0; i < count; i++ { + expectedQueueLen := count - i + if testQueue.Len() != expectedQueueLen { + t.Fatalf("Expected the queue len %v, got %v", + expectedQueueLen, testQueue.Len()) + } + + if testQueue.Empty() { + t.Fatalf("Did not expect the queue to be empty") + } + + top := testQueue.Top().(*testQueueItem) + if top.Value != i { + t.Fatalf("Expected queue top %v, got %v", i, top.Value) + } + + popped := testQueue.Pop().(*testQueueItem) + if popped != top { + t.Fatalf("Expected queue top %v equal to popped: %v", + top, popped) + } + } + + if testQueue.Len() != 0 || !testQueue.Empty() { + t.Fatalf("Expected the queue to be empty") + } +} diff --git a/queue/queue.go b/queue/queue.go index e3b01b2656..3c0702052e 100644 --- a/queue/queue.go +++ b/queue/queue.go @@ -58,6 +58,7 @@ func (cq *ConcurrentQueue) start() { go func() { defer cq.wg.Done() + readLoop: for { nextElement := cq.overflow.Front() if nextElement == nil { @@ -65,7 +66,10 @@ func (cq *ConcurrentQueue) start() { // directly to the output channel. If output channel is full // though, push to overflow. select { - case item := <-cq.chanIn: + case item, ok := <-cq.chanIn: + if !ok { + break readLoop + } select { case cq.chanOut <- item: // Optimistically push directly to chanOut @@ -79,7 +83,10 @@ func (cq *ConcurrentQueue) start() { // Overflow queue is not empty, so any new items get pushed to // the back to preserve order. select { - case item := <-cq.chanIn: + case item, ok := <-cq.chanIn: + if !ok { + break readLoop + } cq.overflow.PushBack(item) case cq.chanOut <- nextElement.Value: cq.overflow.Remove(nextElement) @@ -88,6 +95,22 @@ func (cq *ConcurrentQueue) start() { } } } + + // Incoming channel has been closed. Empty overflow queue into + // the outgoing channel. + nextElement := cq.overflow.Front() + for nextElement != nil { + select { + case cq.chanOut <- nextElement.Value: + cq.overflow.Remove(nextElement) + case <-cq.quit: + return + } + nextElement = cq.overflow.Front() + } + + // Close outgoing channel. + close(cq.chanOut) }() } diff --git a/queue/queue_test.go b/queue/queue_test.go index 9aee0cfb73..bd74dcc025 100644 --- a/queue/queue_test.go +++ b/queue/queue_test.go @@ -63,3 +63,25 @@ func TestConcurrentQueueIdempotentStop(t *testing.T) { testQueueAddDrain(t, 100, 1, 10, 1000, 1000) } + +// TestQueueCloseIncoming tests that the queue properly handles an incoming +// channel that is closed. +func TestQueueCloseIncoming(t *testing.T) { + t.Parallel() + + queue := queue.NewConcurrentQueue(10) + queue.Start() + + queue.ChanIn() <- 1 + close(queue.ChanIn()) + + item := <-queue.ChanOut() + if item.(int) != 1 { + t.Fatalf("unexpected item") + } + + _, ok := <-queue.ChanOut() + if ok { + t.Fatalf("expected outgoing channel being closed") + } +} diff --git a/record/amp.go b/record/amp.go new file mode 100644 index 0000000000..72b4cbf427 --- /dev/null +++ b/record/amp.go @@ -0,0 +1,107 @@ +package record + +import ( + "fmt" + "io" + + "github.com/lightningnetwork/lnd/tlv" +) + +// AMPOnionType is the type used in the onion to reference the AMP fields: +// root_share, set_id, and child_index. +const AMPOnionType tlv.Type = 10 + +// AMP is a record that encodes the fields necessary for atomic multi-path +// payments. +type AMP struct { + rootShare [32]byte + setID [32]byte + childIndex uint16 +} + +// NewAMP generate a new AMP record with the given root_share, set_id, and +// child_index. +func NewAMP(rootShare, setID [32]byte, childIndex uint16) *AMP { + return &{ + rootShare: rootShare, + setID: setID, + childIndex: childIndex, + } +} + +// RootShare returns the root share contained in the AMP record. +func (a *AMP) RootShare() [32]byte { + return a.rootShare +} + +// SetID returns the set id contained in the AMP record. +func (a *AMP) SetID() [32]byte { + return a.setID +} + +// ChildIndex returns the child index contained in the AMP record. +func (a *AMP) ChildIndex() uint16 { + return a.childIndex +} + +// AMPEncoder writes the AMP record to the provided io.Writer. +func AMPEncoder(w io.Writer, val interface{}, buf *[8]byte) error { + if v, ok := val.(*AMP); ok { + if err := tlv.EBytes32(w, &v.rootShare, buf); err != nil { + return err + } + + if err := tlv.EBytes32(w, &v.setID, buf); err != nil { + return err + } + + return tlv.ETUint16T(w, v.childIndex, buf) + } + return tlv.NewTypeForEncodingErr(val, "AMP") +} + +const ( + // minAMPLength is the minimum length of a serialized AMP TLV record, + // which occurs when the truncated encoding of child_index takes 0 + // bytes, leaving only the root_share and set_id. + minAMPLength = 64 + + // maxAMPLength is the maximum legnth of a serialized AMP TLV record, + // which occurs when the truncated endoing of a child_index takes 2 + // bytes. + maxAMPLength = 66 +) + +// AMPDecoder reads the AMP record from the provided io.Reader. +func AMPDecoder(r io.Reader, val interface{}, buf *[8]byte, l uint64) error { + if v, ok := val.(*AMP); ok && minAMPLength <= l && l <= maxAMPLength { + if err := tlv.DBytes32(r, &v.rootShare, buf, 32); err != nil { + return err + } + + if err := tlv.DBytes32(r, &v.setID, buf, 32); err != nil { + return err + } + + return tlv.DTUint16(r, &v.childIndex, buf, l-64) + } + return tlv.NewTypeForDecodingErr(val, "AMP", l, maxAMPLength) +} + +// Record returns a tlv.Record that can be used to encode or decode this record. +func (a *AMP) Record() tlv.Record { + return tlv.MakeDynamicRecord( + AMPOnionType, a, a.PayloadSize, AMPEncoder, AMPDecoder, + ) +} + +// PayloadSize returns the size this record takes up in encoded form. +func (a *AMP) PayloadSize() uint64 { + return 32 + 32 + tlv.SizeTUint16(a.childIndex) +} + +// String returns a human-readble description of the amp payload fields. +func (a *AMP) String() string { + return fmt.Sprintf("root_share=%x set_id=%x child_index=%d", + a.rootShare, a.setID, a.childIndex) +} diff --git a/record/custom_records.go b/record/custom_records.go new file mode 100644 index 0000000000..f9e4f3426f --- /dev/null +++ b/record/custom_records.go @@ -0,0 +1,24 @@ +package record + +import "fmt" + +const ( + // CustomTypeStart is the start of the custom tlv type range as defined + // in BOLT 01. + CustomTypeStart = 65536 +) + +// CustomSet stores a set of custom key/value pairs. +type CustomSet map[uint64][]byte + +// Validate checks that all custom records are in the custom type range. +func (c CustomSet) Validate() error { + for key := range c { + if key < CustomTypeStart { + return fmt.Errorf("no custom records with types "+ + "below %v allowed", CustomTypeStart) + } + } + + return nil +} diff --git a/record/experimental.go b/record/experimental.go new file mode 100644 index 0000000000..3aff0ff264 --- /dev/null +++ b/record/experimental.go @@ -0,0 +1,6 @@ +package record + +const ( + // KeySendType is the custom record identifier for keysend preimages. + KeySendType uint64 = 5482373484 +) diff --git a/record/mpp.go b/record/mpp.go new file mode 100644 index 0000000000..6e260d5451 --- /dev/null +++ b/record/mpp.go @@ -0,0 +1,109 @@ +package record + +import ( + "fmt" + "io" + + "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/tlv" +) + +// MPPOnionType is the type used in the onion to reference the MPP fields: +// total_amt and payment_addr. +const MPPOnionType tlv.Type = 8 + +// MPP is a record that encodes the fields necessary for multi-path payments. +type MPP struct { + // paymentAddr is a random, receiver-generated value used to avoid + // collisions with concurrent payers. + paymentAddr [32]byte + + // totalMsat is the total value of the payment, potentially spread + // across more than one HTLC. + totalMsat lnwire.MilliSatoshi +} + +// NewMPP generates a new MPP record with the given total and payment address. +func NewMPP(total lnwire.MilliSatoshi, addr [32]byte) *MPP { + return &MPP{ + paymentAddr: addr, + totalMsat: total, + } +} + +// PaymentAddr returns the payment address contained in the MPP record. +func (r *MPP) PaymentAddr() [32]byte { + return r.paymentAddr +} + +// TotalMsat returns the total value of an MPP payment in msats. +func (r *MPP) TotalMsat() lnwire.MilliSatoshi { + return r.totalMsat +} + +// MPPEncoder writes the MPP record to the provided io.Writer. +func MPPEncoder(w io.Writer, val interface{}, buf *[8]byte) error { + if v, ok := val.(*MPP); ok { + err := tlv.EBytes32(w, &v.paymentAddr, buf) + if err != nil { + return err + } + + return tlv.ETUint64T(w, uint64(v.totalMsat), buf) + } + return tlv.NewTypeForEncodingErr(val, "MPP") +} + +const ( + // minMPPLength is the minimum length of a serialized MPP TLV record, + // which occurs when the truncated encoding of total_amt_msat takes 0 + // bytes, leaving only the payment_addr. + minMPPLength = 32 + + // maxMPPLength is the maximum length of a serialized MPP TLV record, + // which occurs when the truncated encoding of total_amt_msat takes 8 + // bytes. + maxMPPLength = 40 +) + +// MPPDecoder reads the MPP record to the provided io.Reader. +func MPPDecoder(r io.Reader, val interface{}, buf *[8]byte, l uint64) error { + if v, ok := val.(*MPP); ok && minMPPLength <= l && l <= maxMPPLength { + if err := tlv.DBytes32(r, &v.paymentAddr, buf, 32); err != nil { + return err + } + + var total uint64 + if err := tlv.DTUint64(r, &total, buf, l-32); err != nil { + return err + } + v.totalMsat = lnwire.MilliSatoshi(total) + + return nil + + } + return tlv.NewTypeForDecodingErr(val, "MPP", l, maxMPPLength) +} + +// Record returns a tlv.Record that can be used to encode or decode this record. +func (r *MPP) Record() tlv.Record { + // Fixed-size, 32 byte payment address followed by truncated 64-bit + // total msat. + size := func() uint64 { + return 32 + tlv.SizeTUint64(uint64(r.totalMsat)) + } + + return tlv.MakeDynamicRecord( + MPPOnionType, r, size, MPPEncoder, MPPDecoder, + ) +} + +// PayloadSize returns the size this record takes up in encoded form. +func (r *MPP) PayloadSize() uint64 { + return 32 + tlv.SizeTUint64(uint64(r.totalMsat)) +} + +// String returns a human-readable representation of the mpp payload field. +func (r *MPP) String() string { + return fmt.Sprintf("total=%v, addr=%x", r.totalMsat, r.paymentAddr) +} diff --git a/record/record_test.go b/record/record_test.go new file mode 100644 index 0000000000..8c39790ed8 --- /dev/null +++ b/record/record_test.go @@ -0,0 +1,99 @@ +package record_test + +import ( + "bytes" + "testing" + + "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/record" + "github.com/lightningnetwork/lnd/tlv" +) + +type recordEncDecTest struct { + name string + encRecord func() tlv.RecordProducer + decRecord func() tlv.RecordProducer + assert func(*testing.T, interface{}) +} + +var ( + testTotal = lnwire.MilliSatoshi(45) + testAddr = [32]byte{0x01, 0x02} + testShare = [32]byte{0x03, 0x04} + testSetID = [32]byte{0x05, 0x06} + testChildIndex = uint16(17) +) + +var recordEncDecTests = []recordEncDecTest{ + { + name: "mpp", + encRecord: func() tlv.RecordProducer { + return record.NewMPP(testTotal, testAddr) + }, + decRecord: func() tlv.RecordProducer { + return new(record.MPP) + }, + assert: func(t *testing.T, r interface{}) { + mpp := r.(*record.MPP) + if mpp.TotalMsat() != testTotal { + t.Fatal("incorrect total msat") + } + if mpp.PaymentAddr() != testAddr { + t.Fatal("incorrect payment addr") + } + }, + }, + { + name: "amp", + encRecord: func() tlv.RecordProducer { + return record.NewAMP( + testShare, testSetID, testChildIndex, + ) + }, + decRecord: func() tlv.RecordProducer { + return new(record.AMP) + }, + assert: func(t *testing.T, r interface{}) { + amp := r.(*record.AMP) + if amp.RootShare() != testShare { + t.Fatal("incorrect root share") + } + if amp.SetID() != testSetID { + t.Fatal("incorrect set id") + } + if amp.ChildIndex() != testChildIndex { + t.Fatal("incorrect child index") + } + }, + }, +} + +// TestRecordEncodeDecode is a generic test framework for custom TLV records. It +// asserts that records can encode and decode themselves, and that the value of +// the original record matches the decoded record. +func TestRecordEncodeDecode(t *testing.T) { + for _, test := range recordEncDecTests { + test := test + t.Run(test.name, func(t *testing.T) { + r := test.encRecord() + r2 := test.decRecord() + encStream := tlv.MustNewStream(r.Record()) + decStream := tlv.MustNewStream(r2.Record()) + + test.assert(t, r) + + var b bytes.Buffer + err := encStream.Encode(&b) + if err != nil { + t.Fatalf("unable to encode record: %v", err) + } + + err = decStream.Decode(bytes.NewReader(b.Bytes())) + if err != nil { + t.Fatalf("unable to decode record: %v", err) + } + + test.assert(t, r2) + }) + } +} diff --git a/routing/control_tower.go b/routing/control_tower.go index 0bf084769f..be23c4a904 100644 --- a/routing/control_tower.go +++ b/routing/control_tower.go @@ -1,12 +1,12 @@ package routing import ( - "errors" "sync" "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/lntypes" - "github.com/lightningnetwork/lnd/routing/route" + "github.com/lightningnetwork/lnd/multimutex" + "github.com/lightningnetwork/lnd/queue" ) // ControlTower tracks all outgoing payments made, whose primary purpose is to @@ -15,54 +15,79 @@ import ( // restarts. Payments are transitioned through various payment states, and the // ControlTower interface provides access to driving the state transitions. type ControlTower interface { - // InitPayment atomically moves the payment into the InFlight state. // This method checks that no suceeded payment exist for this payment // hash. InitPayment(lntypes.Hash, *channeldb.PaymentCreationInfo) error - // RegisterAttempt atomically records the provided PaymentAttemptInfo. - RegisterAttempt(lntypes.Hash, *channeldb.PaymentAttemptInfo) error + // RegisterAttempt atomically records the provided HTLCAttemptInfo. + RegisterAttempt(lntypes.Hash, *channeldb.HTLCAttemptInfo) error - // Success transitions a payment into the Succeeded state. After - // invoking this method, InitPayment should always return an error to - // prevent us from making duplicate payments to the same payment hash. - // The provided preimage is atomically saved to the DB for record - // keeping. - Success(lntypes.Hash, lntypes.Preimage) error + // SettleAttempt marks the given attempt settled with the preimage. If + // this is a multi shard payment, this might implicitly mean the the + // full payment succeeded. + // + // After invoking this method, InitPayment should always return an + // error to prevent us from making duplicate payments to the same + // payment hash. The provided preimage is atomically saved to the DB + // for record keeping. + SettleAttempt(lntypes.Hash, uint64, *channeldb.HTLCSettleInfo) error + + // FailAttempt marks the given payment attempt failed. + FailAttempt(lntypes.Hash, uint64, *channeldb.HTLCFailInfo) error + + // FetchPayment fetches the payment corresponding to the given payment + // hash. + FetchPayment(paymentHash lntypes.Hash) (*channeldb.MPPayment, error) // Fail transitions a payment into the Failed state, and records the - // reason the payment failed. After invoking this method, InitPayment - // should return nil on its next call for this payment hash, allowing - // the switch to make a subsequent payment. + // ultimate reason the payment failed. Note that this should only be + // called when all active active attempts are already failed. After + // invoking this method, InitPayment should return nil on its next call + // for this payment hash, allowing the user to make a subsequent + // payment. Fail(lntypes.Hash, channeldb.FailureReason) error // FetchInFlightPayments returns all payments with status InFlight. FetchInFlightPayments() ([]*channeldb.InFlightPayment, error) // SubscribePayment subscribes to updates for the payment with the given - // hash. It returns a boolean indicating whether the payment is still in - // flight and a channel that provides the final outcome of the payment. - SubscribePayment(paymentHash lntypes.Hash) (bool, chan PaymentResult, + // hash. A first update with the current state of the payment is always + // sent out immediately. + SubscribePayment(paymentHash lntypes.Hash) (*ControlTowerSubscriber, error) } -// PaymentResult is the struct describing the events received by payment -// subscribers. -type PaymentResult struct { - // Success indicates whether the payment was successful. - Success bool +// ControlTowerSubscriber contains the state for a payment update subscriber. +type ControlTowerSubscriber struct { + // Updates is the channel over which *channeldb.MPPayment updates can be + // received. + Updates <-chan interface{} + + queue *queue.ConcurrentQueue + quit chan struct{} +} + +// newControlTowerSubscriber instantiates a new subscriber state object. +func newControlTowerSubscriber() *ControlTowerSubscriber { + // Create a queue for payment updates. + queue := queue.NewConcurrentQueue(20) + queue.Start() - // Route is the (last) route attempted to send the HTLC. It is only set - // for successful payments. - Route *route.Route + return &ControlTowerSubscriber{ + Updates: queue.ChanOut(), + queue: queue, + quit: make(chan struct{}), + } +} - // PaymentPreimage is the preimage of a successful payment. This serves - // as a proof of payment. It is only set for successful payments. - Preimage lntypes.Preimage +// Close signals that the subscriber is no longer interested in updates. +func (s *ControlTowerSubscriber) Close() { + // Close quit channel so that any pending writes to the queue are + // cancelled. + close(s.quit) - // Failure is a failure reason code indicating the reason the payment - // failed. It is only set for failed payments. - FailureReason channeldb.FailureReason + // Stop the queue goroutine so that it won't leak. + s.queue.Stop() } // controlTower is persistent implementation of ControlTower to restrict @@ -70,15 +95,21 @@ type PaymentResult struct { type controlTower struct { db *channeldb.PaymentControl - subscribers map[lntypes.Hash][]chan PaymentResult + subscribers map[lntypes.Hash][]*ControlTowerSubscriber subscribersMtx sync.Mutex + + // paymentsMtx provides synchronization on the payment level to ensure + // that no race conditions occur in between updating the database and + // sending a notification. + paymentsMtx *multimutex.HashMutex } // NewControlTower creates a new instance of the controlTower. func NewControlTower(db *channeldb.PaymentControl) ControlTower { return &controlTower{ db: db, - subscribers: make(map[lntypes.Hash][]chan PaymentResult), + subscribers: make(map[lntypes.Hash][]*ControlTowerSubscriber), + paymentsMtx: multimutex.NewHashMutex(), } } @@ -92,61 +123,68 @@ func (p *controlTower) InitPayment(paymentHash lntypes.Hash, return p.db.InitPayment(paymentHash, info) } -// RegisterAttempt atomically records the provided PaymentAttemptInfo to the +// RegisterAttempt atomically records the provided HTLCAttemptInfo to the // DB. func (p *controlTower) RegisterAttempt(paymentHash lntypes.Hash, - attempt *channeldb.PaymentAttemptInfo) error { + attempt *channeldb.HTLCAttemptInfo) error { + + p.paymentsMtx.Lock(paymentHash) + defer p.paymentsMtx.Unlock(paymentHash) - return p.db.RegisterAttempt(paymentHash, attempt) + payment, err := p.db.RegisterAttempt(paymentHash, attempt) + if err != nil { + return err + } + + // Notify subscribers of the attempt registration. + p.notifySubscribers(paymentHash, payment) + + return nil } -// Success transitions a payment into the Succeeded state. After invoking this -// method, InitPayment should always return an error to prevent us from making -// duplicate payments to the same payment hash. The provided preimage is -// atomically saved to the DB for record keeping. -func (p *controlTower) Success(paymentHash lntypes.Hash, - preimage lntypes.Preimage) error { +// SettleAttempt marks the given attempt settled with the preimage. If +// this is a multi shard payment, this might implicitly mean the the +// full payment succeeded. +func (p *controlTower) SettleAttempt(paymentHash lntypes.Hash, + attemptID uint64, settleInfo *channeldb.HTLCSettleInfo) error { + + p.paymentsMtx.Lock(paymentHash) + defer p.paymentsMtx.Unlock(paymentHash) - route, err := p.db.Success(paymentHash, preimage) + payment, err := p.db.SettleAttempt(paymentHash, attemptID, settleInfo) if err != nil { return err } // Notify subscribers of success event. - p.notifyFinalEvent( - paymentHash, createSuccessResult(route, preimage), - ) + p.notifySubscribers(paymentHash, payment) return nil } -// createSuccessResult creates a success result to send to subscribers. -func createSuccessResult(rt *route.Route, - preimage lntypes.Preimage) *PaymentResult { +// FailAttempt marks the given payment attempt failed. +func (p *controlTower) FailAttempt(paymentHash lntypes.Hash, + attemptID uint64, failInfo *channeldb.HTLCFailInfo) error { + + p.paymentsMtx.Lock(paymentHash) + defer p.paymentsMtx.Unlock(paymentHash) - return &PaymentResult{ - Success: true, - Preimage: preimage, - Route: rt, + payment, err := p.db.FailAttempt(paymentHash, attemptID, failInfo) + if err != nil { + return err } -} -// createFailResult creates a failed result to send to subscribers. -func createFailedResult(rt *route.Route, - reason channeldb.FailureReason) *PaymentResult { + // Notify subscribers of failed attempt. + p.notifySubscribers(paymentHash, payment) - result := &PaymentResult{ - Success: false, - FailureReason: reason, - } + return nil +} - // In case of incorrect payment details, set the route. This can be used - // for probing and to extract a fee estimate from the route. - if reason == channeldb.FailureReasonIncorrectPaymentDetails { - result.Route = rt - } +// FetchPayment fetches the payment corresponding to the given payment hash. +func (p *controlTower) FetchPayment(paymentHash lntypes.Hash) ( + *channeldb.MPPayment, error) { - return result + return p.db.FetchPayment(paymentHash) } // Fail transitions a payment into the Failed state, and records the reason the @@ -156,15 +194,16 @@ func createFailedResult(rt *route.Route, func (p *controlTower) Fail(paymentHash lntypes.Hash, reason channeldb.FailureReason) error { - route, err := p.db.Fail(paymentHash, reason) + p.paymentsMtx.Lock(paymentHash) + defer p.paymentsMtx.Unlock(paymentHash) + + payment, err := p.db.Fail(paymentHash, reason) if err != nil { return err } // Notify subscribers of fail event. - p.notifyFinalEvent( - paymentHash, createFailedResult(route, reason), - ) + p.notifySubscribers(paymentHash, payment) return nil } @@ -174,92 +213,81 @@ func (p *controlTower) FetchInFlightPayments() ([]*channeldb.InFlightPayment, er return p.db.FetchInFlightPayments() } -// SubscribePayment subscribes to updates for the payment with the given hash. -// It returns a boolean indicating whether the payment is still in flight and a -// channel that provides the final outcome of the payment. +// SubscribePayment subscribes to updates for the payment with the given hash. A +// first update with the current state of the payment is always sent out +// immediately. func (p *controlTower) SubscribePayment(paymentHash lntypes.Hash) ( - bool, chan PaymentResult, error) { - - // Create a channel with buffer size 1. For every payment there will be - // exactly one event sent. - c := make(chan PaymentResult, 1) + *ControlTowerSubscriber, error) { - // Take lock before querying the db to prevent this scenario: - // FetchPayment returns us an in-flight state -> payment succeeds, but - // there is no subscriber to notify yet -> we add ourselves as a - // subscriber -> ... we will never receive a notification. - p.subscribersMtx.Lock() - defer p.subscribersMtx.Unlock() + // Take lock before querying the db to prevent missing or duplicating an + // update. + p.paymentsMtx.Lock(paymentHash) + defer p.paymentsMtx.Unlock(paymentHash) payment, err := p.db.FetchPayment(paymentHash) if err != nil { - return false, nil, err + return nil, err } - var event PaymentResult + subscriber := newControlTowerSubscriber() - switch payment.Status { + // Always write current payment state to the channel. + subscriber.queue.ChanIn() <- payment - // Payment is currently in flight. Register this subscriber and - // return without writing a result to the channel yet. - case channeldb.StatusInFlight: + // Payment is currently in flight. Register this subscriber for further + // updates. Otherwise this update is the final update and the incoming + // channel can be closed. This will close the queue's outgoing channel + // when all updates have been written. + if payment.Status == channeldb.StatusInFlight { + p.subscribersMtx.Lock() p.subscribers[paymentHash] = append( - p.subscribers[paymentHash], c, - ) - - return true, c, nil - - // Payment already succeeded. It is not necessary to register as - // a subscriber, because we can send the result on the channel - // immediately. - case channeldb.StatusSucceeded: - event = *createSuccessResult( - &payment.Attempt.Route, *payment.PaymentPreimage, + p.subscribers[paymentHash], subscriber, ) - - // Payment already failed. It is not necessary to register as a - // subscriber, because we can send the result on the channel - // immediately. - case channeldb.StatusFailed: - var route *route.Route - if payment.Attempt != nil { - route = &payment.Attempt.Route - } - event = *createFailedResult( - route, *payment.Failure, - ) - - default: - return false, nil, errors.New("unknown payment status") + p.subscribersMtx.Unlock() + } else { + close(subscriber.queue.ChanIn()) } - // Write immediate result to the channel. - c <- event - close(c) - - return false, c, nil + return subscriber, nil } -// notifyFinalEvent sends a final payment event to all subscribers of this -// payment. The channel will be closed after this. -func (p *controlTower) notifyFinalEvent(paymentHash lntypes.Hash, - event *PaymentResult) { +// notifySubscribers sends a final payment event to all subscribers of this +// payment. The channel will be closed after this. Note that this function must +// be executed atomically (by means of a lock) with the database update to +// guarantuee consistency of the notifications. +func (p *controlTower) notifySubscribers(paymentHash lntypes.Hash, + event *channeldb.MPPayment) { - // Get all subscribers for this hash. As there is only a single outcome, - // the subscriber list can be cleared. + // Get all subscribers for this payment. p.subscribersMtx.Lock() list, ok := p.subscribers[paymentHash] if !ok { p.subscribersMtx.Unlock() return } - delete(p.subscribers, paymentHash) + + // If the payment reached a terminal state, the subscriber list can be + // cleared. There won't be any more updates. + terminal := event.Status != channeldb.StatusInFlight + if terminal { + delete(p.subscribers, paymentHash) + } p.subscribersMtx.Unlock() - // Notify all subscribers of the event. The subscriber channel is - // buffered, so it cannot block here. + // Notify all subscribers of the event. for _, subscriber := range list { - subscriber <- *event - close(subscriber) + select { + case subscriber.queue.ChanIn() <- event: + // If this event is the last, close the incoming channel + // of the queue. This will signal the subscriber that + // there won't be any more updates. + if terminal { + close(subscriber.queue.ChanIn()) + } + + // If subscriber disappeared, skip notification. For further + // notifications, we'll keep skipping over this subscriber. + case <-subscriber.quit: + } } } diff --git a/routing/control_tower_test.go b/routing/control_tower_test.go index 49cc6d436e..7907e37e2c 100644 --- a/routing/control_tower_test.go +++ b/routing/control_tower_test.go @@ -13,9 +13,8 @@ import ( "github.com/btcsuite/btcd/btcec" "github.com/davecgh/go-spew/spew" "github.com/lightningnetwork/lnd/channeldb" - "github.com/lightningnetwork/lnd/routing/route" - "github.com/lightningnetwork/lnd/lntypes" + "github.com/lightningnetwork/lnd/routing/route" ) var ( @@ -56,7 +55,7 @@ func TestControlTowerSubscribeUnknown(t *testing.T) { pControl := NewControlTower(channeldb.NewPaymentControl(db)) // Subscription should fail when the payment is not known. - _, _, err = pControl.SubscribePayment(lntypes.Hash{1}) + _, err = pControl.SubscribePayment(lntypes.Hash{1}) if err != channeldb.ErrPaymentNotInitiated { t.Fatal("expected subscribe to fail for unknown payment") } @@ -87,13 +86,10 @@ func TestControlTowerSubscribeSuccess(t *testing.T) { // Subscription should succeed and immediately report the InFlight // status. - inFlight, subscriber1, err := pControl.SubscribePayment(info.PaymentHash) + subscriber1, err := pControl.SubscribePayment(info.PaymentHash) if err != nil { t.Fatalf("expected subscribe to succeed, but got: %v", err) } - if !inFlight { - t.Fatalf("unexpected payment to be in flight") - } // Register an attempt. err = pControl.RegisterAttempt(info.PaymentHash, attempt) @@ -102,58 +98,65 @@ func TestControlTowerSubscribeSuccess(t *testing.T) { } // Register a second subscriber after the first attempt has started. - inFlight, subscriber2, err := pControl.SubscribePayment(info.PaymentHash) + subscriber2, err := pControl.SubscribePayment(info.PaymentHash) if err != nil { t.Fatalf("expected subscribe to succeed, but got: %v", err) } - if !inFlight { - t.Fatalf("unexpected payment to be in flight") - } // Mark the payment as successful. - if err := pControl.Success(info.PaymentHash, preimg); err != nil { + err = pControl.SettleAttempt( + info.PaymentHash, attempt.AttemptID, + &channeldb.HTLCSettleInfo{ + Preimage: preimg, + }, + ) + if err != nil { t.Fatal(err) } // Register a third subscriber after the payment succeeded. - inFlight, subscriber3, err := pControl.SubscribePayment(info.PaymentHash) + subscriber3, err := pControl.SubscribePayment(info.PaymentHash) if err != nil { t.Fatalf("expected subscribe to succeed, but got: %v", err) } - if inFlight { - t.Fatalf("expected payment to be finished") - } // We expect all subscribers to now report the final outcome followed by // no other events. - subscribers := []chan PaymentResult{ + subscribers := []*ControlTowerSubscriber{ subscriber1, subscriber2, subscriber3, } for _, s := range subscribers { - var result PaymentResult - select { - case result = <-s: - case <-time.After(testTimeout): - t.Fatal("timeout waiting for payment result") + var result *channeldb.MPPayment + for result == nil || result.Status == channeldb.StatusInFlight { + select { + case item := <-s.Updates: + result = item.(*channeldb.MPPayment) + case <-time.After(testTimeout): + t.Fatal("timeout waiting for payment result") + } } - if !result.Success { + if result.Status != channeldb.StatusSucceeded { t.Fatal("unexpected payment state") } - if result.Preimage != preimg { + settle, _ := result.TerminalInfo() + if settle.Preimage != preimg { t.Fatal("unexpected preimage") } - - if !reflect.DeepEqual(result.Route, &attempt.Route) { - t.Fatalf("unexpected route: %v vs %v", - spew.Sdump(result.Route), + if len(result.HTLCs) != 1 { + t.Fatalf("expected one htlc, got %d", len(result.HTLCs)) + } + htlc := result.HTLCs[0] + if !reflect.DeepEqual(htlc.Route, attempt.Route) { + t.Fatalf("unexpected htlc route: %v vs %v", + spew.Sdump(htlc.Route), spew.Sdump(attempt.Route)) } // After the final event, we expect the channel to be closed. select { - case _, ok := <-s: + case _, ok := <-s.Updates: if ok { t.Fatal("expected channel to be closed") } @@ -168,6 +171,15 @@ func TestControlTowerSubscribeSuccess(t *testing.T) { func TestPaymentControlSubscribeFail(t *testing.T) { t.Parallel() + t.Run("register attempt", func(t *testing.T) { + testPaymentControlSubscribeFail(t, true) + }) + t.Run("no register attempt", func(t *testing.T) { + testPaymentControlSubscribeFail(t, false) + }) +} + +func testPaymentControlSubscribeFail(t *testing.T, registerAttempt bool) { db, err := initDB() if err != nil { t.Fatalf("unable to init db: %v", err) @@ -176,7 +188,7 @@ func TestPaymentControlSubscribeFail(t *testing.T) { pControl := NewControlTower(channeldb.NewPaymentControl(db)) // Initiate a payment. - info, _, _, err := genInfo() + info, attempt, _, err := genInfo() if err != nil { t.Fatal(err) } @@ -187,52 +199,91 @@ func TestPaymentControlSubscribeFail(t *testing.T) { } // Subscription should succeed. - _, subscriber1, err := pControl.SubscribePayment(info.PaymentHash) + subscriber1, err := pControl.SubscribePayment(info.PaymentHash) if err != nil { t.Fatalf("expected subscribe to succeed, but got: %v", err) } + // Conditionally register the attempt based on the test type. This + // allows us to simulate failing after attempting with an htlc or before + // making any attempts at all. + if registerAttempt { + // Register an attempt. + err = pControl.RegisterAttempt(info.PaymentHash, attempt) + if err != nil { + t.Fatal(err) + } + + // Fail the payment attempt. + err := pControl.FailAttempt( + info.PaymentHash, attempt.AttemptID, + &channeldb.HTLCFailInfo{}, + ) + if err != nil { + t.Fatalf("unable to fail htlc: %v", err) + } + } + // Mark the payment as failed. if err := pControl.Fail(info.PaymentHash, channeldb.FailureReasonTimeout); err != nil { t.Fatal(err) } // Register a second subscriber after the payment failed. - inFlight, subscriber2, err := pControl.SubscribePayment(info.PaymentHash) + subscriber2, err := pControl.SubscribePayment(info.PaymentHash) if err != nil { t.Fatalf("expected subscribe to succeed, but got: %v", err) } - if inFlight { - t.Fatalf("expected payment to be finished") - } // We expect all subscribers to now report the final outcome followed by // no other events. - subscribers := []chan PaymentResult{ + subscribers := []*ControlTowerSubscriber{ subscriber1, subscriber2, } for _, s := range subscribers { - var result PaymentResult - select { - case result = <-s: - case <-time.After(testTimeout): - t.Fatal("timeout waiting for payment result") + var result *channeldb.MPPayment + for result == nil || result.Status == channeldb.StatusInFlight { + select { + case item := <-s.Updates: + result = item.(*channeldb.MPPayment) + case <-time.After(testTimeout): + t.Fatal("timeout waiting for payment result") + } } - if result.Success { + if result.Status == channeldb.StatusSucceeded { t.Fatal("unexpected payment state") } - if result.Route != nil { - t.Fatal("expected no route") + + // There will either be one or zero htlcs depending on whether + // or not the attempt was registered. Assert the correct number + // is present, and the route taken if the attempt was + // registered. + if registerAttempt { + if len(result.HTLCs) != 1 { + t.Fatalf("expected 1 htlc, got: %d", + len(result.HTLCs)) + } + + htlc := result.HTLCs[0] + if !reflect.DeepEqual(htlc.Route, testRoute) { + t.Fatalf("unexpected htlc route: %v vs %v", + spew.Sdump(htlc.Route), + spew.Sdump(testRoute)) + } + } else if len(result.HTLCs) != 0 { + t.Fatalf("expected 0 htlcs, got: %d", + len(result.HTLCs)) } - if result.FailureReason != channeldb.FailureReasonTimeout { + + if *result.FailureReason != channeldb.FailureReasonTimeout { t.Fatal("unexpected failure reason") } // After the final event, we expect the channel to be closed. select { - case _, ok := <-s: + case _, ok := <-s.Updates: if ok { t.Fatal("expected channel to be closed") } @@ -256,7 +307,7 @@ func initDB() (*channeldb.DB, error) { return db, err } -func genInfo() (*channeldb.PaymentCreationInfo, *channeldb.PaymentAttemptInfo, +func genInfo() (*channeldb.PaymentCreationInfo, *channeldb.HTLCAttemptInfo, lntypes.Preimage, error) { preimage, err := genPreimage() @@ -268,12 +319,12 @@ func genInfo() (*channeldb.PaymentCreationInfo, *channeldb.PaymentAttemptInfo, rhash := sha256.Sum256(preimage[:]) return &channeldb.PaymentCreationInfo{ PaymentHash: rhash, - Value: 1, - CreationDate: time.Unix(time.Now().Unix(), 0), + Value: testRoute.ReceiverAmt(), + CreationTime: time.Unix(time.Now().Unix(), 0), PaymentRequest: []byte("hola"), }, - &channeldb.PaymentAttemptInfo{ - PaymentID: 1, + &channeldb.HTLCAttemptInfo{ + AttemptID: 1, SessionKey: priv, Route: testRoute, }, preimage, nil diff --git a/routing/errors.go b/routing/errors.go index 0eb6cb37b8..3beac229af 100644 --- a/routing/errors.go +++ b/routing/errors.go @@ -7,50 +7,14 @@ import "github.com/go-errors/errors" type errorCode uint8 const ( - // ErrNoPathFound is returned when a path to the target destination - // does not exist in the graph. - ErrNoPathFound errorCode = iota - - // ErrNoRouteFound is returned when the router is unable to find a - // valid route to the target destination after fees and time-lock - // limitations are factored in. - ErrNoRouteFound - - // ErrInsufficientCapacity is returned when a path if found, yet the - // capacity of one of the channels in the path is insufficient to carry - // the payment. - ErrInsufficientCapacity - - // ErrMaxHopsExceeded is returned when a candidate path is found, but - // the length of that path exceeds HopLimit. - ErrMaxHopsExceeded - - // ErrTargetNotInNetwork is returned when the target of a path-finding - // or payment attempt isn't known to be within the current version of - // the channel graph. - ErrTargetNotInNetwork - // ErrOutdated is returned when the routing update already have // been applied, or a newer update is already known. - ErrOutdated + ErrOutdated errorCode = iota // ErrIgnored is returned when the update have been ignored because // this update can't bring us something new, or because a node // announcement was given for node not found in any channel. ErrIgnored - - // ErrRejected is returned if the update is for a channel ID that was - // previously added to the reject cache because of an invalid update - // was attempted to be processed. - ErrRejected - - // ErrPaymentAttemptTimeout is an error that indicates that a payment - // attempt timed out before we were able to successfully route an HTLC. - ErrPaymentAttemptTimeout - - // ErrFeeLimitExceeded is returned when the total fees of a route exceed - // the user-specified fee limit. - ErrFeeLimitExceeded ) // routerError is a structure that represent the error inside the routing package, diff --git a/routing/graph.go b/routing/graph.go index f3dfa121da..c1a68bae99 100644 --- a/routing/graph.go +++ b/routing/graph.go @@ -1,4 +1,104 @@ package routing -// TODO(roasbeef): abstract out graph to interface -// * add in-memory version of graph for tests +import ( + "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/channeldb/kvdb" + "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/routing/route" +) + +// routingGraph is an abstract interface that provides information about nodes +// and edges to pathfinding. +type routingGraph interface { + // forEachNodeChannel calls the callback for every channel of the given node. + forEachNodeChannel(nodePub route.Vertex, + cb func(*channeldb.ChannelEdgeInfo, *channeldb.ChannelEdgePolicy, + *channeldb.ChannelEdgePolicy) error) error + + // sourceNode returns the source node of the graph. + sourceNode() route.Vertex + + // fetchNodeFeatures returns the features of the given node. + fetchNodeFeatures(nodePub route.Vertex) (*lnwire.FeatureVector, error) +} + +// dbRoutingTx is a routingGraph implementation that retrieves from the +// database. +type dbRoutingTx struct { + graph *channeldb.ChannelGraph + tx kvdb.ReadTx + source route.Vertex +} + +// newDbRoutingTx instantiates a new db-connected routing graph. It implictly +// instantiates a new read transaction. +func newDbRoutingTx(graph *channeldb.ChannelGraph) (*dbRoutingTx, error) { + sourceNode, err := graph.SourceNode() + if err != nil { + return nil, err + } + + tx, err := graph.Database().BeginReadTx() + if err != nil { + return nil, err + } + + return &dbRoutingTx{ + graph: graph, + tx: tx, + source: sourceNode.PubKeyBytes, + }, nil +} + +// close closes the underlying db transaction. +func (g *dbRoutingTx) close() error { + return g.tx.Rollback() +} + +// forEachNodeChannel calls the callback for every channel of the given node. +// +// NOTE: Part of the routingGraph interface. +func (g *dbRoutingTx) forEachNodeChannel(nodePub route.Vertex, + cb func(*channeldb.ChannelEdgeInfo, *channeldb.ChannelEdgePolicy, + *channeldb.ChannelEdgePolicy) error) error { + + txCb := func(_ kvdb.ReadTx, info *channeldb.ChannelEdgeInfo, + p1, p2 *channeldb.ChannelEdgePolicy) error { + + return cb(info, p1, p2) + } + + return g.graph.ForEachNodeChannel(g.tx, nodePub[:], txCb) +} + +// sourceNode returns the source node of the graph. +// +// NOTE: Part of the routingGraph interface. +func (g *dbRoutingTx) sourceNode() route.Vertex { + return g.source +} + +// fetchNodeFeatures returns the features of the given node. If the node is +// unknown, assume no additional features are supported. +// +// NOTE: Part of the routingGraph interface. +func (g *dbRoutingTx) fetchNodeFeatures(nodePub route.Vertex) ( + *lnwire.FeatureVector, error) { + + targetNode, err := g.graph.FetchLightningNode(g.tx, nodePub) + switch err { + + // If the node exists and has features, return them directly. + case nil: + return targetNode.Features, nil + + // If we couldn't find a node announcement, populate a blank feature + // vector. + case channeldb.ErrGraphNodeNotFound: + return lnwire.EmptyFeatureVector(), nil + + // Otherwise bubble the error up. + default: + return nil, err + } +} diff --git a/routing/heap.go b/routing/heap.go index 1cbbc5e537..f6869663cd 100644 --- a/routing/heap.go +++ b/routing/heap.go @@ -3,6 +3,7 @@ package routing import ( "container/heap" + "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/routing/route" ) @@ -23,9 +24,10 @@ type nodeWithDist struct { // amount that includes also the fees for subsequent hops. amountToReceive lnwire.MilliSatoshi - // incomingCltv is the expected cltv value for the incoming htlc of this - // node. This value does not include the final cltv. - incomingCltv uint32 + // incomingCltv is the expected absolute expiry height for the incoming + // htlc of this node. This value should already include the final cltv + // delta. + incomingCltv int32 // probability is the probability that from this node onward the route // is successful. @@ -35,12 +37,19 @@ type nodeWithDist struct { // Includes the routing fees and a virtual cost factor to account for // time locks. weight int64 + + // nextHop is the edge this route comes from. + nextHop *channeldb.ChannelEdgePolicy + + // routingInfoSize is the total size requirement for the payloads field + // in the onion packet from this hop towards the final destination. + routingInfoSize uint64 } // distanceHeap is a min-distance heap that's used within our path finding // algorithm to keep track of the "closest" node to our source node. type distanceHeap struct { - nodes []nodeWithDist + nodes []*nodeWithDist // pubkeyIndices maps public keys of nodes to their respective index in // the heap. This is used as a way to avoid db lookups by using heap.Fix @@ -50,9 +59,10 @@ type distanceHeap struct { // newDistanceHeap initializes a new distance heap. This is required because // we must initialize the pubkeyIndices map for path-finding optimizations. -func newDistanceHeap() distanceHeap { +func newDistanceHeap(numNodes int) distanceHeap { distHeap := distanceHeap{ - pubkeyIndices: make(map[route.Vertex]int), + pubkeyIndices: make(map[route.Vertex]int, numNodes), + nodes: make([]*nodeWithDist, 0, numNodes), } return distHeap @@ -68,6 +78,11 @@ func (d *distanceHeap) Len() int { return len(d.nodes) } // // NOTE: This is part of the heap.Interface implementation. func (d *distanceHeap) Less(i, j int) bool { + // If distances are equal, tie break on probability. + if d.nodes[i].dist == d.nodes[j].dist { + return d.nodes[i].probability > d.nodes[j].probability + } + return d.nodes[i].dist < d.nodes[j].dist } @@ -84,7 +99,7 @@ func (d *distanceHeap) Swap(i, j int) { // // NOTE: This is part of the heap.Interface implementation. func (d *distanceHeap) Push(x interface{}) { - n := x.(nodeWithDist) + n := x.(*nodeWithDist) d.nodes = append(d.nodes, n) d.pubkeyIndices[n.node] = len(d.nodes) - 1 } @@ -96,6 +111,7 @@ func (d *distanceHeap) Push(x interface{}) { func (d *distanceHeap) Pop() interface{} { n := len(d.nodes) x := d.nodes[n-1] + d.nodes[n-1] = nil d.nodes = d.nodes[0 : n-1] delete(d.pubkeyIndices, x.node) return x @@ -106,7 +122,7 @@ func (d *distanceHeap) Pop() interface{} { // modify its position and reorder the heap. If the vertex does not already // exist in the heap, then it is pushed onto the heap. Otherwise, we will end // up performing more db lookups on the same node in the pathfinding algorithm. -func (d *distanceHeap) PushOrFix(dist nodeWithDist) { +func (d *distanceHeap) PushOrFix(dist *nodeWithDist) { index, ok := d.pubkeyIndices[dist.node] if !ok { heap.Push(d, dist) diff --git a/routing/heap_test.go b/routing/heap_test.go index 4214e965b0..659653f708 100644 --- a/routing/heap_test.go +++ b/routing/heap_test.go @@ -17,19 +17,19 @@ func TestHeapOrdering(t *testing.T) { // First, create a blank heap, we'll use this to push on randomly // generated items. - nodeHeap := newDistanceHeap() + nodeHeap := newDistanceHeap(0) prand.Seed(1) // Create 100 random entries adding them to the heap created above, but // also a list that we'll sort with the entries. const numEntries = 100 - sortedEntries := make([]nodeWithDist, 0, numEntries) + sortedEntries := make([]*nodeWithDist, 0, numEntries) for i := 0; i < numEntries; i++ { var pubKey [33]byte prand.Read(pubKey[:]) - entry := nodeWithDist{ + entry := &nodeWithDist{ node: route.Vertex(pubKey), dist: prand.Int63(), } @@ -55,9 +55,9 @@ func TestHeapOrdering(t *testing.T) { // One by one, pop of all the entries from the heap, they should come // out in sorted order. - var poppedEntries []nodeWithDist + var poppedEntries []*nodeWithDist for nodeHeap.Len() != 0 { - e := heap.Pop(&nodeHeap).(nodeWithDist) + e := heap.Pop(&nodeHeap).(*nodeWithDist) poppedEntries = append(poppedEntries, e) } diff --git a/routing/integrated_routing_context_test.go b/routing/integrated_routing_context_test.go new file mode 100644 index 0000000000..b223668e63 --- /dev/null +++ b/routing/integrated_routing_context_test.go @@ -0,0 +1,250 @@ +package routing + +import ( + "fmt" + "io/ioutil" + "math" + "os" + "testing" + "time" + + "github.com/lightningnetwork/lnd/channeldb/kvdb" + "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/routing/route" +) + +const ( + sourceNodeID = 1 + targetNodeID = 2 +) + +// integratedRoutingContext defines the context in which integrated routing +// tests run. +type integratedRoutingContext struct { + graph *mockGraph + t *testing.T + + source *mockNode + target *mockNode + + amt lnwire.MilliSatoshi + finalExpiry int32 + + mcCfg MissionControlConfig + pathFindingCfg PathFindingConfig +} + +// newIntegratedRoutingContext instantiates a new integrated routing test +// context with a source and a target node. +func newIntegratedRoutingContext(t *testing.T) *integratedRoutingContext { + // Instantiate a mock graph. + source := newMockNode(sourceNodeID) + target := newMockNode(targetNodeID) + + graph := newMockGraph(t) + graph.addNode(source) + graph.addNode(target) + graph.source = source + + // Initiate the test context with a set of default configuration values. + // We don't use the lnd defaults here, because otherwise changing the + // defaults would break the unit tests. The actual values picked aren't + // critical to excite certain behavior, but do need to be aligned with + // the test case assertions. + ctx := integratedRoutingContext{ + t: t, + graph: graph, + amt: 100000, + finalExpiry: 40, + + mcCfg: MissionControlConfig{ + PenaltyHalfLife: 30 * time.Minute, + AprioriHopProbability: 0.6, + AprioriWeight: 0.5, + SelfNode: source.pubkey, + }, + + pathFindingCfg: PathFindingConfig{ + PaymentAttemptPenalty: 1000, + MinProbability: 0.01, + }, + + source: source, + target: target, + } + + return &ctx +} + +// htlcAttempt records the route and outcome of an attempted htlc. +type htlcAttempt struct { + route *route.Route + success bool +} + +func (h htlcAttempt) String() string { + return fmt.Sprintf("success=%v, route=%v", h.success, h.route) +} + +// testPayment launches a test payment and asserts that it is completed after +// the expected number of attempts. +func (c *integratedRoutingContext) testPayment(maxParts uint32) ([]htlcAttempt, + error) { + + var ( + nextPid uint64 + attempts []htlcAttempt + ) + + // Create temporary database for mission control. + file, err := ioutil.TempFile("", "*.db") + if err != nil { + c.t.Fatal(err) + } + + dbPath := file.Name() + defer os.Remove(dbPath) + + db, err := kvdb.Open(kvdb.BoltBackendName, dbPath, true) + if err != nil { + c.t.Fatal(err) + } + defer db.Close() + + // Instantiate a new mission control with the current configuration + // values. + mc, err := NewMissionControl(db, &c.mcCfg) + if err != nil { + c.t.Fatal(err) + } + + getBandwidthHints := func() (map[uint64]lnwire.MilliSatoshi, error) { + // Create bandwidth hints based on local channel balances. + bandwidthHints := map[uint64]lnwire.MilliSatoshi{} + for _, ch := range c.graph.nodes[c.source.pubkey].channels { + bandwidthHints[ch.id] = ch.balance + } + + return bandwidthHints, nil + } + + var paymentAddr [32]byte + payment := LightningPayment{ + FinalCLTVDelta: uint16(c.finalExpiry), + FeeLimit: lnwire.MaxMilliSatoshi, + Target: c.target.pubkey, + PaymentAddr: &paymentAddr, + DestFeatures: lnwire.NewFeatureVector(mppFeatures, nil), + Amount: c.amt, + CltvLimit: math.MaxUint32, + MaxParts: maxParts, + } + + session, err := newPaymentSession( + &payment, getBandwidthHints, + func() (routingGraph, func(), error) { + return c.graph, func() {}, nil + }, + mc, c.pathFindingCfg, + ) + if err != nil { + c.t.Fatal(err) + } + + // Override default minimum shard amount. + session.minShardAmt = lnwire.NewMSatFromSatoshis(5000) + + // Now the payment control loop starts. It will keep trying routes until + // the payment succeeds. + var ( + amtRemaining = payment.Amount + inFlightHtlcs uint32 + ) + for { + // Create bandwidth hints based on local channel balances. + bandwidthHints := map[uint64]lnwire.MilliSatoshi{} + for _, ch := range c.graph.nodes[c.source.pubkey].channels { + bandwidthHints[ch.id] = ch.balance + } + + // Find a route. + route, err := session.RequestRoute( + amtRemaining, lnwire.MaxMilliSatoshi, inFlightHtlcs, 0, + ) + if err != nil { + return attempts, err + } + + // Send out the htlc on the mock graph. + pid := nextPid + nextPid++ + htlcResult, err := c.graph.sendHtlc(route) + if err != nil { + c.t.Fatal(err) + } + + success := htlcResult.failure == nil + attempts = append(attempts, htlcAttempt{ + route: route, + success: success, + }) + + // Process the result. In normal Lightning operations, the + // sender doesn't get an acknowledgement from the recipient that + // the htlc arrived. In integrated routing tests, this + // acknowledgement is available. It is a simplification of + // reality that still allows certain classes of tests to be + // performed. + if success { + inFlightHtlcs++ + + err := mc.ReportPaymentSuccess(pid, route) + if err != nil { + c.t.Fatal(err) + } + + amtRemaining -= route.ReceiverAmt() + + // If the full amount has been paid, the payment is + // successful and the control loop can be terminated. + if amtRemaining == 0 { + break + } + + // Otherwise try to send the remaining amount. + continue + } + + // Failure, update mission control and retry. + finalResult, err := mc.ReportPaymentFail( + pid, route, + getNodeIndex(route, htlcResult.failureSource), + htlcResult.failure, + ) + if err != nil { + c.t.Fatal(err) + } + + if finalResult != nil { + break + } + } + + return attempts, nil +} + +// getNodeIndex returns the zero-based index of the given node in the route. +func getNodeIndex(route *route.Route, failureSource route.Vertex) *int { + if failureSource == route.SourcePubKey { + idx := 0 + return &idx + } + + for i, h := range route.Hops { + if h.PubKeyBytes == failureSource { + idx := i + 1 + return &idx + } + } + return nil +} diff --git a/routing/integrated_routing_test.go b/routing/integrated_routing_test.go new file mode 100644 index 0000000000..4f044d8e00 --- /dev/null +++ b/routing/integrated_routing_test.go @@ -0,0 +1,300 @@ +package routing + +import ( + "testing" + + "github.com/btcsuite/btcutil" + "github.com/lightningnetwork/lnd/lnwire" +) + +// TestProbabilityExtrapolation tests that probabilities for tried channels are +// extrapolated to untried channels. This is a way to improve pathfinding +// success by steering away from bad nodes. +func TestProbabilityExtrapolation(t *testing.T) { + ctx := newIntegratedRoutingContext(t) + + // Create the following network of nodes: + // source -> expensiveNode (charges routing fee) -> target + // source -> intermediate1 (free routing) -> intermediate(1-10) (free routing) -> target + g := ctx.graph + + const expensiveNodeID = 3 + expensiveNode := newMockNode(expensiveNodeID) + expensiveNode.baseFee = 10000 + g.addNode(expensiveNode) + + g.addChannel(100, sourceNodeID, expensiveNodeID, 100000) + g.addChannel(101, targetNodeID, expensiveNodeID, 100000) + + const intermediate1NodeID = 4 + intermediate1 := newMockNode(intermediate1NodeID) + g.addNode(intermediate1) + g.addChannel(102, sourceNodeID, intermediate1NodeID, 100000) + + for i := 0; i < 10; i++ { + imNodeID := byte(10 + i) + imNode := newMockNode(imNodeID) + g.addNode(imNode) + g.addChannel(uint64(200+i), imNodeID, targetNodeID, 100000) + g.addChannel(uint64(300+i), imNodeID, intermediate1NodeID, 100000) + + // The channels from intermediate1 all have insufficient balance. + g.nodes[intermediate1.pubkey].channels[imNode.pubkey].balance = 0 + } + + // It is expected that pathfinding will try to explore the routes via + // intermediate1 first, because those are free. But as failures happen, + // the node probability of intermediate1 will go down in favor of the + // paid route via expensiveNode. + // + // The exact number of attempts required is dependent on mission control + // config. For this test, it would have been enough to only assert that + // we are not trying all routes via intermediate1. However, we do assert + // a specific number of attempts to safe-guard against accidental + // modifications anywhere in the chain of components that is involved in + // this test. + attempts, err := ctx.testPayment(1) + if err != nil { + t.Fatalf("payment failed: %v", err) + } + if len(attempts) != 5 { + t.Fatalf("expected 5 attempts, but needed %v", len(attempts)) + } + + // If we use a static value for the node probability (no extrapolation + // of data from other channels), all ten bad channels will be tried + // first before switching to the paid channel. + ctx.mcCfg.AprioriWeight = 1 + attempts, err = ctx.testPayment(1) + if err != nil { + t.Fatalf("payment failed: %v", err) + } + if len(attempts) != 11 { + t.Fatalf("expected 11 attempts, but needed %v", len(attempts)) + } +} + +type mppSendTestCase struct { + name string + amt btcutil.Amount + expectedAttempts int + + // expectedSuccesses is a list of htlcs that made it to the receiver, + // regardless of whether the final set became complete or not. + expectedSuccesses []expectedHtlcSuccess + + graph func(g *mockGraph) + expectedFailure bool + maxParts uint32 +} + +const ( + chanSourceIm1 = 13 + chanIm1Target = 32 + chanSourceIm2 = 14 + chanIm2Target = 42 +) + +func onePathGraph(g *mockGraph) { + // Create the following network of nodes: + // source -> intermediate1 -> target + + const im1NodeID = 3 + intermediate1 := newMockNode(im1NodeID) + g.addNode(intermediate1) + + g.addChannel(chanSourceIm1, sourceNodeID, im1NodeID, 200000) + g.addChannel(chanIm1Target, targetNodeID, im1NodeID, 100000) +} + +func twoPathGraph(g *mockGraph, capacityOut, capacityIn btcutil.Amount) { + // Create the following network of nodes: + // source -> intermediate1 -> target + // source -> intermediate2 -> target + + const im1NodeID = 3 + intermediate1 := newMockNode(im1NodeID) + g.addNode(intermediate1) + + const im2NodeID = 4 + intermediate2 := newMockNode(im2NodeID) + g.addNode(intermediate2) + + g.addChannel(chanSourceIm1, sourceNodeID, im1NodeID, capacityOut) + g.addChannel(chanSourceIm2, sourceNodeID, im2NodeID, capacityOut) + g.addChannel(chanIm1Target, targetNodeID, im1NodeID, capacityIn) + g.addChannel(chanIm2Target, targetNodeID, im2NodeID, capacityIn) +} + +var mppTestCases = []mppSendTestCase{ + // Test a two-path graph with sufficient liquidity. It is expected that + // pathfinding will try first try to send the full amount via the two + // available routes. When that fails, it will half the amount to 35k sat + // and retry. That attempt reaches the target successfully. Then the + // same route is tried again. Because the channel only had 50k sat, it + // will fail. Finally the second route is tried for 35k and it succeeds + // too. Mpp payment complete. + { + + name: "sufficient inbound", + graph: func(g *mockGraph) { + twoPathGraph(g, 200000, 100000) + }, + amt: 70000, + expectedAttempts: 5, + expectedSuccesses: []expectedHtlcSuccess{ + { + amt: 35000, + chans: []uint64{chanSourceIm1, chanIm1Target}, + }, + { + amt: 35000, + chans: []uint64{chanSourceIm2, chanIm2Target}, + }, + }, + maxParts: 1000, + }, + + // Test that a cap on the max htlcs makes it impossible to pay. + { + name: "no splitting", + graph: func(g *mockGraph) { + twoPathGraph(g, 200000, 100000) + }, + amt: 70000, + expectedAttempts: 2, + expectedSuccesses: []expectedHtlcSuccess{}, + expectedFailure: true, + maxParts: 1, + }, + + // Test that an attempt is made to split the payment in multiple parts + // that all use the same route if the full amount cannot be sent in a + // single htlc. The sender is effectively probing the receiver's + // incoming channel to see if it has sufficient balance. In this test + // case, the endeavour fails. + { + + name: "one path split", + graph: onePathGraph, + amt: 70000, + expectedAttempts: 7, + expectedSuccesses: []expectedHtlcSuccess{ + { + amt: 35000, + chans: []uint64{chanSourceIm1, chanIm1Target}, + }, + { + amt: 8750, + chans: []uint64{chanSourceIm1, chanIm1Target}, + }, + }, + expectedFailure: true, + maxParts: 1000, + }, + + // Test that no attempts are made if the total local balance is + // insufficient. + { + name: "insufficient total balance", + graph: func(g *mockGraph) { + twoPathGraph(g, 100000, 500000) + }, + amt: 300000, + expectedAttempts: 0, + expectedFailure: true, + maxParts: 10, + }, +} + +// TestMppSend tests that a payment can be completed using multiple shards. +func TestMppSend(t *testing.T) { + for _, testCase := range mppTestCases { + testCase := testCase + + t.Run(testCase.name, func(t *testing.T) { + testMppSend(t, &testCase) + }) + } +} + +func testMppSend(t *testing.T, testCase *mppSendTestCase) { + ctx := newIntegratedRoutingContext(t) + + g := ctx.graph + testCase.graph(g) + + ctx.amt = lnwire.NewMSatFromSatoshis(testCase.amt) + + attempts, err := ctx.testPayment(testCase.maxParts) + switch { + case err == nil && testCase.expectedFailure: + t.Fatal("expected payment to fail") + case err != nil && !testCase.expectedFailure: + t.Fatal("expected payment to succeed") + } + + if len(attempts) != testCase.expectedAttempts { + t.Fatalf("expected %v attempts, but needed %v", + testCase.expectedAttempts, len(attempts), + ) + } + + assertSuccessAttempts(t, attempts, testCase.expectedSuccesses) +} + +// expectedHtlcSuccess describes an expected successful htlc attempt. +type expectedHtlcSuccess struct { + amt btcutil.Amount + chans []uint64 +} + +// equals matches the expectation with an actual attempt. +func (e *expectedHtlcSuccess) equals(a htlcAttempt) bool { + if a.route.TotalAmount != + lnwire.NewMSatFromSatoshis(e.amt) { + + return false + } + + if len(a.route.Hops) != len(e.chans) { + return false + } + + for i, h := range a.route.Hops { + if h.ChannelID != e.chans[i] { + return false + } + } + + return true +} + +// assertSuccessAttempts asserts that the set of successful htlc attempts +// matches the given expectation. +func assertSuccessAttempts(t *testing.T, attempts []htlcAttempt, + expected []expectedHtlcSuccess) { + + successCount := 0 +loop: + for _, a := range attempts { + if !a.success { + continue + } + + successCount++ + + for _, exp := range expected { + if exp.equals(a) { + continue loop + } + } + + t.Fatalf("htlc success %v not found", a) + } + + if successCount != len(expected) { + t.Fatalf("expected %v successful htlcs, but got %v", + expected, successCount) + } +} diff --git a/routing/localchans/log.go b/routing/localchans/log.go new file mode 100644 index 0000000000..9ebcf4d0ff --- /dev/null +++ b/routing/localchans/log.go @@ -0,0 +1,16 @@ +package localchans + +import ( + "github.com/btcsuite/btclog" +) + +// log is a logger that is initialized with no output filters. This +// means the package will not perform any logging by default until the caller +// requests it. +var log btclog.Logger + +// UseLogger uses a specified Logger to output package logging info. This +// function is called from the parent package htlcswitch logger initialization. +func UseLogger(logger btclog.Logger) { + log = logger +} diff --git a/routing/localchans/manager.go b/routing/localchans/manager.go index 9d1c3e49df..413f864335 100644 --- a/routing/localchans/manager.go +++ b/routing/localchans/manager.go @@ -79,6 +79,9 @@ func (r *Manager) UpdatePolicy(newSchema routing.ChannelPolicy, // Apply the new policy to the edge. err := r.updateEdge(info.ChannelPoint, edge, newSchema) if err != nil { + log.Warnf("Cannot update policy for %v: %v\n", + info.ChannelPoint, err, + ) return nil } @@ -93,7 +96,7 @@ func (r *Manager) UpdatePolicy(newSchema routing.ChannelPolicy, BaseFee: edge.FeeBaseMSat, FeeRate: edge.FeeProportionalMillionths, TimeLockDelta: uint32(edge.TimeLockDelta), - MinHTLC: edge.MinHTLC, + MinHTLCOut: edge.MinHTLC, MaxHTLC: edge.MaxHTLC, } @@ -157,6 +160,11 @@ func (r *Manager) updateEdge(chanPoint wire.OutPoint, edge.MaxHTLC = amtMax } + // If a new min htlc is specified, update the edge. + if newSchema.MinHTLC != nil { + edge.MinHTLC = *newSchema.MinHTLC + } + // If the MaxHtlc flag wasn't already set, we can set it now. edge.MessageFlags |= lnwire.ChanUpdateOptionMaxHtlc diff --git a/routing/log.go b/routing/log.go index ce550812a2..e5c2e7a078 100644 --- a/routing/log.go +++ b/routing/log.go @@ -11,9 +11,11 @@ import ( // it. var log btclog.Logger +const Subsystem = "CRTR" + // The default amount of logging is none. func init() { - UseLogger(build.NewSubLogger("CRTR", nil)) + UseLogger(build.NewSubLogger(Subsystem, nil)) } // DisableLog disables all library log output. Logging output is disabled by diff --git a/routing/missioncontrol.go b/routing/missioncontrol.go index cebb7ea98e..40e2ab4f1b 100644 --- a/routing/missioncontrol.go +++ b/routing/missioncontrol.go @@ -1,12 +1,11 @@ package routing import ( - "math" "sync" "time" - "github.com/coreos/bbolt" "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/routing/route" ) @@ -47,8 +46,20 @@ const ( // prevSuccessProbability is the assumed probability for node pairs that // successfully relayed the previous attempt. prevSuccessProbability = 0.95 + + // DefaultAprioriWeight is the default a priori weight. See + // MissionControlConfig for further explanation. + DefaultAprioriWeight = 0.5 + + // DefaultMinFailureRelaxInterval is the default minimum time that must + // have passed since the previously recorded failure before the failure + // amount may be raised. + DefaultMinFailureRelaxInterval = time.Minute ) +// NodeResults contains previous results from a node to its peers. +type NodeResults map[route.Vertex]TimedPairResult + // MissionControl contains state which summarizes the past attempts of HTLC // routing by external callers when sending payments throughout the network. It // acts as a shared memory during routing attempts with the goal to optimize the @@ -59,15 +70,9 @@ const ( // since the last failure is used to estimate a success probability that is fed // into the path finding process for subsequent payment attempts. type MissionControl struct { - // lastPairResult tracks the last payment result per node pair. - lastPairResult map[DirectedNodePair]timedPairResult - - // lastNodeFailure tracks the last node level failure per node. - lastNodeFailure map[route.Vertex]time.Time - - // lastSecondChance tracks the last time a second chance was granted for - // a directed node pair. - lastSecondChance map[DirectedNodePair]time.Time + // state is the internal mission control state that is input for + // probability estimation. + state *missionControlState // now is expected to return the current time. It is supplied as an // external function to enable deterministic unit tests. @@ -77,6 +82,10 @@ type MissionControl struct { store *missionControlStore + // estimator is the probability estimator that is used with the payment + // results that mission control collects. + estimator *probabilityEstimator + sync.Mutex // TODO(roasbeef): further counters, if vertex continually unavailable, @@ -99,60 +108,61 @@ type MissionControlConfig struct { // MaxMcHistory defines the maximum number of payment results that are // held on disk. MaxMcHistory int + + // AprioriWeight is a value in the range [0, 1] that defines to what + // extent historical results should be extrapolated to untried + // connections. Setting it to one will completely ignore historical + // results and always assume the configured a priori probability for + // untried connections. A value of zero will ignore the a priori + // probability completely and only base the probability on historical + // results, unless there are none available. + AprioriWeight float64 + + // MinFailureRelaxInterval is the minimum time that must have passed + // since the previously recorded failure before the failure amount may + // be raised. + MinFailureRelaxInterval time.Duration + + // SelfNode is our own pubkey. + SelfNode route.Vertex } -// timedPairResult describes a timestamped pair result. -type timedPairResult struct { - // timestamp is the time when this result was obtained. - timestamp time.Time +// TimedPairResult describes a timestamped pair result. +type TimedPairResult struct { + // FailTime is the time of the last failure. + FailTime time.Time - pairResult + // FailAmt is the amount of the last failure. This amount may be pushed + // up if a later success is higher than the last failed amount. + FailAmt lnwire.MilliSatoshi + + // SuccessTime is the time of the last success. + SuccessTime time.Time + + // SuccessAmt is the highest amount that successfully forwarded. This + // isn't necessarily the last success amount. The value of this field + // may also be pushed down if a later failure is lower than the highest + // success amount. Because of this, SuccessAmt may not match + // SuccessTime. + SuccessAmt lnwire.MilliSatoshi } // MissionControlSnapshot contains a snapshot of the current state of mission // control. type MissionControlSnapshot struct { - // Nodes contains the per node information of this snapshot. - Nodes []MissionControlNodeSnapshot - // Pairs is a list of channels for which specific information is // logged. Pairs []MissionControlPairSnapshot } -// MissionControlNodeSnapshot contains a snapshot of the current node state in -// mission control. -type MissionControlNodeSnapshot struct { - // Node pubkey. - Node route.Vertex - - // LastFail is the time of last failure. - LastFail time.Time - - // OtherSuccessProb is the success probability for pairs not in - // the Pairs slice. - OtherSuccessProb float64 -} - // MissionControlPairSnapshot contains a snapshot of the current node pair // state in mission control. type MissionControlPairSnapshot struct { // Pair is the node pair of which the state is described. Pair DirectedNodePair - // Timestamp is the time of last result. - Timestamp time.Time - - // MinPenalizeAmt is the minimum amount for which the channel will be - // penalized. - MinPenalizeAmt lnwire.MilliSatoshi - - // SuccessProb is the success probability estimation for this channel. - SuccessProb float64 - - // LastAttemptSuccessful indicates whether the last payment attempt - // through this pair was successful. - LastAttemptSuccessful bool + // TimedPairResult contains the data for this pair. + TimedPairResult } // paymentResult is the information that becomes available when a payment @@ -167,25 +177,32 @@ type paymentResult struct { } // NewMissionControl returns a new instance of missionControl. -func NewMissionControl(db *bbolt.DB, cfg *MissionControlConfig) ( +func NewMissionControl(db kvdb.Backend, cfg *MissionControlConfig) ( *MissionControl, error) { log.Debugf("Instantiating mission control with config: "+ - "PenaltyHalfLife=%v, AprioriHopProbability=%v", - cfg.PenaltyHalfLife, cfg.AprioriHopProbability) + "PenaltyHalfLife=%v, AprioriHopProbability=%v, "+ + "AprioriWeight=%v", cfg.PenaltyHalfLife, + cfg.AprioriHopProbability, cfg.AprioriWeight) store, err := newMissionControlStore(db, cfg.MaxMcHistory) if err != nil { return nil, err } + estimator := &probabilityEstimator{ + aprioriHopProbability: cfg.AprioriHopProbability, + aprioriWeight: cfg.AprioriWeight, + penaltyHalfLife: cfg.PenaltyHalfLife, + prevSuccessProbability: prevSuccessProbability, + } + mc := &MissionControl{ - lastPairResult: make(map[DirectedNodePair]timedPairResult), - lastNodeFailure: make(map[route.Vertex]time.Time), - lastSecondChance: make(map[DirectedNodePair]time.Time), - now: time.Now, - cfg: cfg, - store: store, + state: newMissionControlState(cfg.MinFailureRelaxInterval), + now: time.Now, + cfg: cfg, + store: store, + estimator: estimator, } if err := mc.init(); err != nil { @@ -226,9 +243,7 @@ func (m *MissionControl) ResetHistory() error { return err } - m.lastPairResult = make(map[DirectedNodePair]timedPairResult) - m.lastNodeFailure = make(map[route.Vertex]time.Time) - m.lastSecondChance = make(map[DirectedNodePair]time.Time) + m.state.resetHistory() log.Debugf("Mission control history cleared") @@ -243,93 +258,15 @@ func (m *MissionControl) GetProbability(fromNode, toNode route.Vertex, m.Lock() defer m.Unlock() - return m.getPairProbability(fromNode, toNode, amt) -} - -// getProbAfterFail returns a probability estimate based on a last failure time. -func (m *MissionControl) getProbAfterFail(lastFailure time.Time) float64 { - if lastFailure.IsZero() { - return m.cfg.AprioriHopProbability - } - - timeSinceLastFailure := m.now().Sub(lastFailure) - - // Calculate success probability. It is an exponential curve that brings - // the probability down to zero when a failure occurs. From there it - // recovers asymptotically back to the a priori probability. The rate at - // which this happens is controlled by the penaltyHalfLife parameter. - exp := -timeSinceLastFailure.Hours() / m.cfg.PenaltyHalfLife.Hours() - probability := m.cfg.AprioriHopProbability * (1 - math.Pow(2, exp)) - - return probability -} - -// getPairProbability estimates the probability of successfully -// traversing from fromNode to toNode based on historical payment outcomes. -func (m *MissionControl) getPairProbability(fromNode, - toNode route.Vertex, amt lnwire.MilliSatoshi) float64 { - - // Start by getting the last node level failure. A node failure is - // considered a failure that would have affected every edge. Therefore - // we insert a node level failure into the history of every channel. If - // there is none, lastFail will be zero. - lastFail := m.lastNodeFailure[fromNode] - - // Retrieve the last pair outcome. - pair := NewDirectedNodePair(fromNode, toNode) - lastPairResult, ok := m.lastPairResult[pair] - - // Only look at the last pair outcome if it happened after the last node - // level failure. Otherwise the node level failure is the most recent - // and used as the basis for calculation of the probability. - if ok && lastPairResult.timestamp.After(lastFail) { - if lastPairResult.success { - return prevSuccessProbability - } + now := m.now() + results, _ := m.state.getLastPairResult(fromNode) - // Take into account a minimum penalize amount. For balance - // errors, a failure may be reported with such a minimum to - // prevent too aggresive penalization. We only take into account - // a previous failure if the amount that we currently get the - // probability for is greater or equal than the minPenalizeAmt - // of the previous failure. - if amt >= lastPairResult.minPenalizeAmt { - lastFail = lastPairResult.timestamp - } + // Use a distinct probability estimation function for local channels. + if fromNode == m.cfg.SelfNode { + return m.estimator.getLocalPairProbability(now, results, toNode) } - return m.getProbAfterFail(lastFail) -} - -// requestSecondChance checks whether the node fromNode can have a second chance -// at providing a channel update for its channel with toNode. -func (m *MissionControl) requestSecondChance(timestamp time.Time, - fromNode, toNode route.Vertex) bool { - - // Look up previous second chance time. - pair := DirectedNodePair{ - From: fromNode, - To: toNode, - } - lastSecondChance, ok := m.lastSecondChance[pair] - - // If the channel hasn't already be given a second chance or its last - // second chance was long ago, we give it another chance. - if !ok || timestamp.Sub(lastSecondChance) > minSecondChanceInterval { - m.lastSecondChance[pair] = timestamp - - log.Debugf("Second chance granted for %v->%v", fromNode, toNode) - - return true - } - - // Otherwise penalize the channel, because we don't allow channel - // updates that are that frequent. This is to prevent nodes from keeping - // us busy by continuously sending new channel updates. - log.Debugf("Second chance denied for %v->%v, remaining interval: %v", - fromNode, toNode, timestamp.Sub(lastSecondChance)) - - return false + return m.estimator.getPairProbability(now, results, toNode, amt) } // GetHistorySnapshot takes a snapshot from the current mission control state @@ -338,45 +275,29 @@ func (m *MissionControl) GetHistorySnapshot() *MissionControlSnapshot { m.Lock() defer m.Unlock() - log.Debugf("Requesting history snapshot from mission control: "+ - "node_failure_count=%v, pair_result_count=%v", - len(m.lastNodeFailure), len(m.lastPairResult)) - - nodes := make([]MissionControlNodeSnapshot, 0, len(m.lastNodeFailure)) - for v, h := range m.lastNodeFailure { - otherProb := m.getPairProbability(v, route.Vertex{}, 0) - - nodes = append(nodes, MissionControlNodeSnapshot{ - Node: v, - LastFail: h, - OtherSuccessProb: otherProb, - }) - } + log.Debugf("Requesting history snapshot from mission control") - pairs := make([]MissionControlPairSnapshot, 0, len(m.lastPairResult)) + return m.state.getSnapshot() +} - for v, h := range m.lastPairResult { - // Show probability assuming amount meets min - // penalization amount. - prob := m.getPairProbability(v.From, v.To, h.minPenalizeAmt) +// GetPairHistorySnapshot returns the stored history for a given node pair. +func (m *MissionControl) GetPairHistorySnapshot( + fromNode, toNode route.Vertex) TimedPairResult { - pair := MissionControlPairSnapshot{ - Pair: v, - MinPenalizeAmt: h.minPenalizeAmt, - Timestamp: h.timestamp, - SuccessProb: prob, - LastAttemptSuccessful: h.success, - } + m.Lock() + defer m.Unlock() - pairs = append(pairs, pair) + results, ok := m.state.getLastPairResult(fromNode) + if !ok { + return TimedPairResult{} } - snapshot := MissionControlSnapshot{ - Nodes: nodes, - Pairs: pairs, + result, ok := results[toNode] + if !ok { + return TimedPairResult{} } - return &snapshot + return result } // ReportPaymentFail reports a failed payment to mission control as input for @@ -455,7 +376,7 @@ func (m *MissionControl) applyPaymentResult( defer m.Unlock() if i.policyFailure != nil { - if m.requestSecondChance( + if m.state.requestSecondChance( result.timeReply, i.policyFailure.From, i.policyFailure.To, ) { @@ -463,27 +384,46 @@ func (m *MissionControl) applyPaymentResult( } } + // If there is a node-level failure, record a failure for every tried + // connection of that node. A node-level failure can be considered as a + // failure that would have occurred with any of the node's channels. + // + // Ideally we'd also record the failure for the untried connections of + // the node. Unfortunately this would require access to the graph and + // adding this dependency and db calls does not outweigh the benefits. + // + // Untried connections will fall back to the node probability. After the + // call to setAllPairResult below, the node probability will be equal to + // the probability of the tried channels except that the a priori + // probability is mixed in too. This effect is controlled by the + // aprioriWeight parameter. If that parameter isn't set to an extreme + // and there are a few known connections, there shouldn't be much of a + // difference. The largest difference occurs when aprioriWeight is 1. In + // that case, a node-level failure would not be applied to untried + // channels. if i.nodeFailure != nil { log.Debugf("Reporting node failure to Mission Control: "+ "node=%v", *i.nodeFailure) - m.lastNodeFailure[*i.nodeFailure] = result.timeReply + m.state.setAllFail(*i.nodeFailure, result.timeReply) } for pair, pairResult := range i.pairResults { + pairResult := pairResult + if pairResult.success { log.Debugf("Reporting pair success to Mission "+ - "Control: pair=%v", pair) + "Control: pair=%v, amt=%v", + pair, pairResult.amt) } else { log.Debugf("Reporting pair failure to Mission "+ - "Control: pair=%v, minPenalizeAmt=%v", - pair, pairResult.minPenalizeAmt) + "Control: pair=%v, amt=%v", + pair, pairResult.amt) } - m.lastPairResult[pair] = timedPairResult{ - timestamp: result.timeReply, - pairResult: pairResult, - } + m.state.setLastPairResult( + pair.From, pair.To, result.timeReply, &pairResult, + ) } return i.finalFailureReason diff --git a/routing/missioncontrol_state.go b/routing/missioncontrol_state.go new file mode 100644 index 0000000000..06e2ef13a3 --- /dev/null +++ b/routing/missioncontrol_state.go @@ -0,0 +1,211 @@ +package routing + +import ( + "time" + + "github.com/lightningnetwork/lnd/routing/route" +) + +// missionControlState is an object that manages the internal mission control +// state. Note that it isn't thread safe and synchronization needs to be +// enforced externally. +type missionControlState struct { + // lastPairResult tracks the last payment result (on a pair basis) for + // each transited node. This is a multi-layer map that allows us to look + // up the failure history of all connected channels (node pairs) for a + // particular node. + lastPairResult map[route.Vertex]NodeResults + + // lastSecondChance tracks the last time a second chance was granted for + // a directed node pair. + lastSecondChance map[DirectedNodePair]time.Time + + // minFailureRelaxInterval is the minimum time that must have passed + // since the previously recorded failure before the failure amount may + // be raised. + minFailureRelaxInterval time.Duration +} + +// newMissionControlState instantiates a new mission control state object. +func newMissionControlState( + minFailureRelaxInterval time.Duration) *missionControlState { + + return &missionControlState{ + lastPairResult: make(map[route.Vertex]NodeResults), + lastSecondChance: make(map[DirectedNodePair]time.Time), + minFailureRelaxInterval: minFailureRelaxInterval, + } +} + +// getLastPairResult returns the current state for connections to the given node. +func (m *missionControlState) getLastPairResult(node route.Vertex) (NodeResults, + bool) { + + result, ok := m.lastPairResult[node] + return result, ok +} + +// ResetHistory resets the history of MissionControl returning it to a state as +// if no payment attempts have been made. +func (m *missionControlState) resetHistory() { + m.lastPairResult = make(map[route.Vertex]NodeResults) + m.lastSecondChance = make(map[DirectedNodePair]time.Time) +} + +// setLastPairResult stores a result for a node pair. +func (m *missionControlState) setLastPairResult(fromNode, toNode route.Vertex, + timestamp time.Time, result *pairResult) { + + nodePairs, ok := m.lastPairResult[fromNode] + if !ok { + nodePairs = make(NodeResults) + m.lastPairResult[fromNode] = nodePairs + } + + current := nodePairs[toNode] + + // Apply the new result to the existing data for this pair. If there is + // no existing data, apply it to the default values for TimedPairResult. + if result.success { + successAmt := result.amt + current.SuccessTime = timestamp + + // Only update the success amount if this amount is higher. This + // prevents the success range from shrinking when there is no + // reason to do so. For example: small amount probes shouldn't + // affect a previous success for a much larger amount. + if successAmt > current.SuccessAmt { + current.SuccessAmt = successAmt + } + + // If the success amount goes into the failure range, move the + // failure range up. Future attempts up to the success amount + // are likely to succeed. We don't want to clear the failure + // completely, because we haven't learnt much for amounts above + // the current success amount. + if !current.FailTime.IsZero() && successAmt >= current.FailAmt { + current.FailAmt = successAmt + 1 + } + } else { + // For failures we always want to update both the amount and the + // time. Those need to relate to the same result, because the + // time is used to gradually diminish the penality for that + // specific result. Updating the timestamp but not the amount + // could cause a failure for a lower amount (a more severe + // condition) to be revived as if it just happened. + failAmt := result.amt + + // Drop result if it would increase the failure amount too soon + // after a previous failure. This can happen if htlc results + // come in out of order. This check makes it easier for payment + // processes to converge to a final state. + failInterval := timestamp.Sub(current.FailTime) + if failAmt > current.FailAmt && + failInterval < m.minFailureRelaxInterval { + + log.Debugf("Ignoring higher amount failure within min "+ + "failure relaxation interval: prev_fail_amt=%v, "+ + "fail_amt=%v, interval=%v", + current.FailAmt, failAmt, failInterval) + + return + } + + current.FailTime = timestamp + current.FailAmt = failAmt + + switch { + // The failure amount is set to zero when the failure is + // amount-independent, meaning that the attempt would have + // failed regardless of the amount. This should also reset the + // success amount to zero. + case failAmt == 0: + current.SuccessAmt = 0 + + // If the failure range goes into the success range, move the + // success range down. + case failAmt <= current.SuccessAmt: + current.SuccessAmt = failAmt - 1 + } + } + + log.Debugf("Setting %v->%v range to [%v-%v]", + fromNode, toNode, current.SuccessAmt, current.FailAmt) + + nodePairs[toNode] = current +} + +// setAllFail stores a fail result for all known connections to and from the +// given node. +func (m *missionControlState) setAllFail(node route.Vertex, + timestamp time.Time) { + + for fromNode, nodePairs := range m.lastPairResult { + for toNode := range nodePairs { + if fromNode == node || toNode == node { + nodePairs[toNode] = TimedPairResult{ + FailTime: timestamp, + } + } + } + } +} + +// requestSecondChance checks whether the node fromNode can have a second chance +// at providing a channel update for its channel with toNode. +func (m *missionControlState) requestSecondChance(timestamp time.Time, + fromNode, toNode route.Vertex) bool { + + // Look up previous second chance time. + pair := DirectedNodePair{ + From: fromNode, + To: toNode, + } + lastSecondChance, ok := m.lastSecondChance[pair] + + // If the channel hasn't already be given a second chance or its last + // second chance was long ago, we give it another chance. + if !ok || timestamp.Sub(lastSecondChance) > minSecondChanceInterval { + m.lastSecondChance[pair] = timestamp + + log.Debugf("Second chance granted for %v->%v", fromNode, toNode) + + return true + } + + // Otherwise penalize the channel, because we don't allow channel + // updates that are that frequent. This is to prevent nodes from keeping + // us busy by continuously sending new channel updates. + log.Debugf("Second chance denied for %v->%v, remaining interval: %v", + fromNode, toNode, timestamp.Sub(lastSecondChance)) + + return false +} + +// GetHistorySnapshot takes a snapshot from the current mission control state +// and actual probability estimates. +func (m *missionControlState) getSnapshot() *MissionControlSnapshot { + log.Debugf("Requesting history snapshot from mission control: "+ + "pair_result_count=%v", len(m.lastPairResult)) + + pairs := make([]MissionControlPairSnapshot, 0, len(m.lastPairResult)) + + for fromNode, fromPairs := range m.lastPairResult { + for toNode, result := range fromPairs { + pair := NewDirectedNodePair(fromNode, toNode) + + pairSnapshot := MissionControlPairSnapshot{ + Pair: pair, + TimedPairResult: result, + } + + pairs = append(pairs, pairSnapshot) + } + } + + snapshot := MissionControlSnapshot{ + Pairs: pairs, + } + + return &snapshot +} diff --git a/routing/missioncontrol_state_test.go b/routing/missioncontrol_state_test.go new file mode 100644 index 0000000000..28635d4375 --- /dev/null +++ b/routing/missioncontrol_state_test.go @@ -0,0 +1,47 @@ +package routing + +import ( + "testing" + "time" + + "github.com/lightningnetwork/lnd/routing/route" +) + +// TestMissionControlStateFailureResult tests setting failure results on the +// mission control state. +func TestMissionControlStateFailureResult(t *testing.T) { + const minFailureRelaxInterval = time.Minute + state := newMissionControlState(minFailureRelaxInterval) + + var ( + from = route.Vertex{1} + to = route.Vertex{2} + timestamp = testTime + ) + + // Report a 1000 sat failure. + state.setLastPairResult(from, to, timestamp, &pairResult{amt: 1000}) + result, _ := state.getLastPairResult(from) + if result[to].FailAmt != 1000 { + t.Fatalf("unexpected fail amount %v", result[to].FailAmt) + } + + // Report an 1100 sat failure one hour later. It is expected to + // overwrite the previous failure. + timestamp = timestamp.Add(time.Hour) + state.setLastPairResult(from, to, timestamp, &pairResult{amt: 1100}) + result, _ = state.getLastPairResult(from) + if result[to].FailAmt != 1100 { + t.Fatalf("unexpected fail amount %v", result[to].FailAmt) + } + + // Report a 1200 sat failure one second later. Because this increase of + // the failure amount is too soon after the previous failure, the result + // is not applied. + timestamp = timestamp.Add(time.Second) + state.setLastPairResult(from, to, timestamp, &pairResult{amt: 1200}) + result, _ = state.getLastPairResult(from) + if result[to].FailAmt != 1100 { + t.Fatalf("unexpected fail amount %v", result[to].FailAmt) + } +} diff --git a/routing/missioncontrol_store.go b/routing/missioncontrol_store.go index 329d819f75..9cd99d9545 100644 --- a/routing/missioncontrol_store.go +++ b/routing/missioncontrol_store.go @@ -7,8 +7,8 @@ import ( "time" "github.com/btcsuite/btcd/wire" - "github.com/coreos/bbolt" "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/lnwire" ) @@ -35,20 +35,20 @@ const ( // Also changes to mission control parameters can be applied to historical data. // Finally, it enables importing raw data from an external source. type missionControlStore struct { - db *bbolt.DB + db kvdb.Backend maxRecords int numRecords int } -func newMissionControlStore(db *bbolt.DB, maxRecords int) (*missionControlStore, error) { +func newMissionControlStore(db kvdb.Backend, maxRecords int) (*missionControlStore, error) { store := &missionControlStore{ db: db, maxRecords: maxRecords, } // Create buckets if not yet existing. - err := db.Update(func(tx *bbolt.Tx) error { - resultsBucket, err := tx.CreateBucketIfNotExists(resultsKey) + err := kvdb.Update(db, func(tx kvdb.RwTx) error { + resultsBucket, err := tx.CreateTopLevelBucket(resultsKey) if err != nil { return fmt.Errorf("cannot create results bucket: %v", err) @@ -58,7 +58,7 @@ func newMissionControlStore(db *bbolt.DB, maxRecords int) (*missionControlStore, // memory to avoid calling Stats().KeyN. The reliability of // Stats() is doubtful and seemed to have caused crashes in the // past (see #1874). - c := resultsBucket.Cursor() + c := resultsBucket.ReadCursor() for k, _ := c.First(); k != nil; k, _ = c.Next() { store.numRecords++ } @@ -74,12 +74,12 @@ func newMissionControlStore(db *bbolt.DB, maxRecords int) (*missionControlStore, // clear removes all results from the db. func (b *missionControlStore) clear() error { - return b.db.Update(func(tx *bbolt.Tx) error { - if err := tx.DeleteBucket(resultsKey); err != nil { + return kvdb.Update(b.db, func(tx kvdb.RwTx) error { + if err := tx.DeleteTopLevelBucket(resultsKey); err != nil { return err } - _, err := tx.CreateBucket(resultsKey) + _, err := tx.CreateTopLevelBucket(resultsKey) return err }) } @@ -88,8 +88,8 @@ func (b *missionControlStore) clear() error { func (b *missionControlStore) fetchAll() ([]*paymentResult, error) { var results []*paymentResult - err := b.db.View(func(tx *bbolt.Tx) error { - resultBucket := tx.Bucket(resultsKey) + err := kvdb.View(b.db, func(tx kvdb.ReadTx) error { + resultBucket := tx.ReadBucket(resultsKey) results = make([]*paymentResult, 0) return resultBucket.ForEach(func(k, v []byte) error { @@ -218,13 +218,13 @@ func deserializeResult(k, v []byte) (*paymentResult, error) { // AddResult adds a new result to the db. func (b *missionControlStore) AddResult(rp *paymentResult) error { - return b.db.Update(func(tx *bbolt.Tx) error { - bucket := tx.Bucket(resultsKey) + return kvdb.Update(b.db, func(tx kvdb.RwTx) error { + bucket := tx.ReadWriteBucket(resultsKey) // Prune oldest entries. if b.maxRecords > 0 { for b.numRecords >= b.maxRecords { - cursor := bucket.Cursor() + cursor := bucket.ReadWriteCursor() cursor.First() if err := cursor.Delete(); err != nil { return err diff --git a/routing/missioncontrol_store_test.go b/routing/missioncontrol_store_test.go index 9d070f4dea..497affd52b 100644 --- a/routing/missioncontrol_store_test.go +++ b/routing/missioncontrol_store_test.go @@ -8,16 +8,20 @@ import ( "time" "github.com/davecgh/go-spew/spew" + "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/lnwire" - "github.com/coreos/bbolt" "github.com/lightningnetwork/lnd/routing/route" ) const testMaxRecords = 2 +// TestMissionControlStore tests the recording of payment failure events +// in mission control. It tests encoding and decoding of differing lnwire +// failures (FailIncorrectDetails and FailMppTimeout), pruning of results +// and idempotent writes. func TestMissionControlStore(t *testing.T) { - // Set time zone explictly to keep test deterministic. + // Set time zone explicitly to keep test deterministic. time.Local = time.UTC file, err := ioutil.TempFile("", "*.db") @@ -27,7 +31,7 @@ func TestMissionControlStore(t *testing.T) { dbPath := file.Name() - db, err := bbolt.Open(dbPath, 0600, nil) + db, err := kvdb.Create(kvdb.BoltBackendName, dbPath, true) if err != nil { t.Fatal(err) } @@ -115,11 +119,12 @@ func TestMissionControlStore(t *testing.T) { t.Fatal(err) } - // Add a newer result. + // Add a newer result which failed due to mpp timeout. result3 := result1 result3.timeReply = result1.timeReply.Add(2 * time.Hour) result3.timeFwd = result1.timeReply.Add(2 * time.Hour) result3.id = 3 + result3.failure = &lnwire.FailMPPTimeout{} err = store.AddResult(&result3) if err != nil { diff --git a/routing/missioncontrol_test.go b/routing/missioncontrol_test.go index df96e3140e..3a9c93a75b 100644 --- a/routing/missioncontrol_test.go +++ b/routing/missioncontrol_test.go @@ -6,14 +6,14 @@ import ( "testing" "time" - "github.com/coreos/bbolt" + "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/routing/route" ) var ( mcTestRoute = &route.Route{ - SourcePubKey: route.Vertex{10}, + SourcePubKey: mcTestSelf, Hops: []*route.Hop{ { ChannelID: 1, @@ -30,8 +30,13 @@ var ( } mcTestTime = time.Date(2018, time.January, 9, 14, 00, 00, 0, time.UTC) + mcTestSelf = route.Vertex{10} mcTestNode1 = mcTestRoute.Hops[0].PubKeyBytes mcTestNode2 = mcTestRoute.Hops[1].PubKeyBytes + + testPenaltyHalfLife = 30 * time.Minute + testAprioriHopProbability = 0.9 + testAprioriWeight = 0.5 ) type mcTestContext struct { @@ -39,7 +44,7 @@ type mcTestContext struct { mc *MissionControl now time.Time - db *bbolt.DB + db kvdb.Backend dbPath string pid uint64 @@ -58,7 +63,7 @@ func createMcTestContext(t *testing.T) *mcTestContext { ctx.dbPath = file.Name() - ctx.db, err = bbolt.Open(ctx.dbPath, 0600, nil) + ctx.db, err = kvdb.Open(kvdb.BoltBackendName, ctx.dbPath, true) if err != nil { t.Fatal(err) } @@ -73,8 +78,10 @@ func (ctx *mcTestContext) restartMc() { mc, err := NewMissionControl( ctx.db, &MissionControlConfig{ - PenaltyHalfLife: 30 * time.Minute, - AprioriHopProbability: 0.8, + PenaltyHalfLife: testPenaltyHalfLife, + AprioriHopProbability: testAprioriHopProbability, + AprioriWeight: testAprioriWeight, + SelfNode: mcTestSelf, }, ) if err != nil { @@ -93,7 +100,6 @@ func (ctx *mcTestContext) cleanup() { // Assert that mission control returns a probability for an edge. func (ctx *mcTestContext) expectP(amt lnwire.MilliSatoshi, expected float64) { - ctx.t.Helper() p := ctx.mc.GetProbability(mcTestNode1, mcTestNode2, amt) @@ -133,20 +139,30 @@ func TestMissionControl(t *testing.T) { testTime := time.Date(2018, time.January, 9, 14, 00, 00, 0, time.UTC) - // Initial probability is expected to be 1. - ctx.expectP(1000, 0.8) + // For local channels, we expect a higher probability than our a prior + // test probability. + selfP := ctx.mc.GetProbability(mcTestSelf, mcTestNode1, 100) + if selfP != prevSuccessProbability { + t.Fatalf("expected prev success prob for untried local chans") + } + + // Initial probability is expected to be the a priori. + ctx.expectP(1000, testAprioriHopProbability) // Expect probability to be zero after reporting the edge as failed. ctx.reportFailure(1000, lnwire.NewTemporaryChannelFailure(nil)) ctx.expectP(1000, 0) // As we reported with a min penalization amt, a lower amt than reported - // should be unaffected. - ctx.expectP(500, 0.8) + // should return the node probability, which is the a priori + // probability. + ctx.expectP(500, testAprioriHopProbability) - // Edge decay started. + // Edge decay started. The node probability weighted average should now + // have shifted from 1:1 to 1:0.5 -> 60%. The connection probability is + // half way through the recovery, so we expect 30% here. ctx.now = testTime.Add(30 * time.Minute) - ctx.expectP(1000, 0.4) + ctx.expectP(1000, 0.3) // Edge fails again, this time without a min penalization amt. The edge // should be penalized regardless of amount. @@ -156,26 +172,22 @@ func TestMissionControl(t *testing.T) { // Edge decay started. ctx.now = testTime.Add(60 * time.Minute) - ctx.expectP(1000, 0.4) + ctx.expectP(1000, 0.3) // Restart mission control to test persistence. ctx.restartMc() - ctx.expectP(1000, 0.4) + ctx.expectP(1000, 0.3) - // A node level failure should bring probability of every channel back - // to zero. + // A node level failure should bring probability of all known channels + // back to zero. ctx.reportFailure(0, lnwire.NewExpiryTooSoon(lnwire.ChannelUpdate{})) ctx.expectP(1000, 0) // Check whether history snapshot looks sane. history := ctx.mc.GetHistorySnapshot() - if len(history.Nodes) != 1 { - t.Fatalf("unexpected number of nodes: expected 1 got %v", - len(history.Nodes)) - } - if len(history.Pairs) != 2 { - t.Fatalf("expected 2 pairs, but got %v", len(history.Pairs)) + if len(history.Pairs) != 4 { + t.Fatalf("expected 4 pairs, but got %v", len(history.Pairs)) } // Test reporting a success. @@ -192,12 +204,12 @@ func TestMissionControlChannelUpdate(t *testing.T) { ctx.reportFailure( 0, lnwire.NewFeeInsufficient(0, lnwire.ChannelUpdate{}), ) - ctx.expectP(0, 0.8) + ctx.expectP(100, testAprioriHopProbability) // Report another failure for the same channel. We expect it to be // pruned. ctx.reportFailure( 0, lnwire.NewFeeInsufficient(0, lnwire.ChannelUpdate{}), ) - ctx.expectP(0, 0) + ctx.expectP(100, 0) } diff --git a/routing/mock_graph_test.go b/routing/mock_graph_test.go new file mode 100644 index 0000000000..3834d9e51a --- /dev/null +++ b/routing/mock_graph_test.go @@ -0,0 +1,273 @@ +package routing + +import ( + "bytes" + "fmt" + "testing" + + "github.com/btcsuite/btcutil" + "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/routing/route" +) + +// createPubkey return a new test pubkey. +func createPubkey(id byte) route.Vertex { + pubkey := route.Vertex{id} + return pubkey +} + +// mockChannel holds the channel state of a channel in the mock graph. +type mockChannel struct { + id uint64 + capacity btcutil.Amount + balance lnwire.MilliSatoshi +} + +// mockNode holds a set of mock channels and routing policies for a node in the +// mock graph. +type mockNode struct { + channels map[route.Vertex]*mockChannel + baseFee lnwire.MilliSatoshi + pubkey route.Vertex +} + +// newMockNode instantiates a new mock node with a newly generated pubkey. +func newMockNode(id byte) *mockNode { + pubkey := createPubkey(id) + return &mockNode{ + channels: make(map[route.Vertex]*mockChannel), + pubkey: pubkey, + } +} + +// fwd simulates an htlc forward through this node. If the from parameter is +// nil, this node is considered to be the sender of the payment. The route +// parameter describes the remaining route from this node onwards. If route.next +// is nil, this node is the final hop. +func (m *mockNode) fwd(from *mockNode, route *hop) (htlcResult, error) { + next := route.next + + // Get the incoming channel, if any. + var inChan *mockChannel + if from != nil { + inChan = m.channels[from.pubkey] + } + + // If there is no next node, this is the final node and we can settle the htlc. + if next == nil { + // Update the incoming balance. + inChan.balance += route.amtToFwd + + return htlcResult{}, nil + } + + // Check if the outgoing channel has enough balance. + outChan, ok := m.channels[next.node.pubkey] + if !ok { + return htlcResult{}, + fmt.Errorf("%v: unknown next %v", + m.pubkey, next.node.pubkey) + } + if outChan.balance < route.amtToFwd { + return htlcResult{ + failureSource: m.pubkey, + failure: lnwire.NewTemporaryChannelFailure(nil), + }, nil + } + + // Htlc can be forwarded, update channel balances. + outChan.balance -= route.amtToFwd + if inChan != nil { + inChan.balance += route.amtToFwd + } + + // Recursively forward down the given route. + result, err := next.node.fwd(m, route.next) + if err != nil { + return htlcResult{}, err + } + + // Revert balances when a failure occurs. + if result.failure != nil { + outChan.balance += route.amtToFwd + if inChan != nil { + inChan.balance -= route.amtToFwd + } + } + + return result, nil +} + +// mockGraph contains a set of nodes that together for a mocked graph. +type mockGraph struct { + t *testing.T + nodes map[route.Vertex]*mockNode + source *mockNode +} + +// newMockGraph instantiates a new mock graph. +func newMockGraph(t *testing.T) *mockGraph { + return &mockGraph{ + nodes: make(map[route.Vertex]*mockNode), + t: t, + } +} + +// addNode adds the given mock node to the network. +func (m *mockGraph) addNode(node *mockNode) { + m.t.Helper() + + if _, exists := m.nodes[node.pubkey]; exists { + m.t.Fatal("node already exists") + } + m.nodes[node.pubkey] = node +} + +// addChannel adds a new channel between two existing nodes on the network. It +// sets the channel balance to 50/50%. +// +// Ignore linter error because addChannel isn't yet called with different +// capacities. +// nolint:unparam +func (m *mockGraph) addChannel(id uint64, node1id, node2id byte, + capacity btcutil.Amount) { + + node1pubkey := createPubkey(node1id) + node2pubkey := createPubkey(node2id) + + if _, exists := m.nodes[node1pubkey].channels[node2pubkey]; exists { + m.t.Fatal("channel already exists") + } + if _, exists := m.nodes[node2pubkey].channels[node1pubkey]; exists { + m.t.Fatal("channel already exists") + } + + m.nodes[node1pubkey].channels[node2pubkey] = &mockChannel{ + capacity: capacity, + id: id, + balance: lnwire.NewMSatFromSatoshis(capacity / 2), + } + m.nodes[node2pubkey].channels[node1pubkey] = &mockChannel{ + capacity: capacity, + id: id, + balance: lnwire.NewMSatFromSatoshis(capacity / 2), + } +} + +// forEachNodeChannel calls the callback for every channel of the given node. +// +// NOTE: Part of the routingGraph interface. +func (m *mockGraph) forEachNodeChannel(nodePub route.Vertex, + cb func(*channeldb.ChannelEdgeInfo, *channeldb.ChannelEdgePolicy, + *channeldb.ChannelEdgePolicy) error) error { + + // Look up the mock node. + node, ok := m.nodes[nodePub] + if !ok { + return channeldb.ErrGraphNodeNotFound + } + + // Iterate over all of its channels. + for peer, channel := range node.channels { + // Lexicographically sort the pubkeys. + var node1, node2 route.Vertex + if bytes.Compare(nodePub[:], peer[:]) == -1 { + node1, node2 = peer, nodePub + } else { + node1, node2 = nodePub, peer + } + + peerNode := m.nodes[peer] + + // Call the per channel callback. + err := cb( + &channeldb.ChannelEdgeInfo{ + NodeKey1Bytes: node1, + NodeKey2Bytes: node2, + }, + &channeldb.ChannelEdgePolicy{ + ChannelID: channel.id, + Node: &channeldb.LightningNode{ + PubKeyBytes: peer, + Features: lnwire.EmptyFeatureVector(), + }, + FeeBaseMSat: node.baseFee, + }, + &channeldb.ChannelEdgePolicy{ + ChannelID: channel.id, + Node: &channeldb.LightningNode{ + PubKeyBytes: nodePub, + Features: lnwire.EmptyFeatureVector(), + }, + FeeBaseMSat: peerNode.baseFee, + }, + ) + if err != nil { + return err + } + } + return nil +} + +// sourceNode returns the source node of the graph. +// +// NOTE: Part of the routingGraph interface. +func (m *mockGraph) sourceNode() route.Vertex { + return m.source.pubkey +} + +// fetchNodeFeatures returns the features of the given node. +// +// NOTE: Part of the routingGraph interface. +func (m *mockGraph) fetchNodeFeatures(nodePub route.Vertex) ( + *lnwire.FeatureVector, error) { + + return lnwire.EmptyFeatureVector(), nil +} + +// htlcResult describes the resolution of an htlc. If failure is nil, the htlc +// was settled. +type htlcResult struct { + failureSource route.Vertex + failure lnwire.FailureMessage +} + +// hop describes one hop of a route. +type hop struct { + node *mockNode + amtToFwd lnwire.MilliSatoshi + next *hop +} + +// sendHtlc sends out an htlc on the mock network and synchronously returns the +// final resolution of the htlc. +func (m *mockGraph) sendHtlc(route *route.Route) (htlcResult, error) { + var next *hop + + // Convert the route into a structure that is suitable for recursive + // processing. + for i := len(route.Hops) - 1; i >= 0; i-- { + routeHop := route.Hops[i] + node := m.nodes[routeHop.PubKeyBytes] + next = &hop{ + node: node, + next: next, + amtToFwd: routeHop.AmtToForward, + } + } + + // Create the starting hop instance. + source := m.nodes[route.SourcePubKey] + next = &hop{ + node: source, + next: next, + amtToFwd: route.TotalAmount, + } + + // Recursively walk the path and obtain the htlc resolution. + return source.fwd(nil, next) +} + +// Compile-time check for the routingGraph interface. +var _ routingGraph = &mockGraph{} diff --git a/routing/mock_test.go b/routing/mock_test.go index d2f7448c7b..ae27bc7e7d 100644 --- a/routing/mock_test.go +++ b/routing/mock_test.go @@ -10,12 +10,13 @@ import ( "github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/routing/route" - "github.com/lightningnetwork/lnd/zpay32" ) type mockPaymentAttemptDispatcher struct { onPayment func(firstHop lnwire.ShortChannelID) ([32]byte, error) results map[uint64]*htlcswitch.PaymentResult + + sync.Mutex } var _ PaymentAttemptDispatcher = (*mockPaymentAttemptDispatcher)(nil) @@ -28,25 +29,27 @@ func (m *mockPaymentAttemptDispatcher) SendHTLC(firstHop lnwire.ShortChannelID, return nil } - if m.results == nil { - m.results = make(map[uint64]*htlcswitch.PaymentResult) - } - var result *htlcswitch.PaymentResult preimage, err := m.onPayment(firstHop) if err != nil { - fwdErr, ok := err.(*htlcswitch.ForwardingError) + rtErr, ok := err.(htlcswitch.ClearTextError) if !ok { return err } result = &htlcswitch.PaymentResult{ - Error: fwdErr, + Error: rtErr, } } else { result = &htlcswitch.PaymentResult{Preimage: preimage} } + m.Lock() + if m.results == nil { + m.results = make(map[uint64]*htlcswitch.PaymentResult) + } + m.results[pid] = result + m.Unlock() return nil } @@ -56,7 +59,11 @@ func (m *mockPaymentAttemptDispatcher) GetPaymentResult(paymentID uint64, <-chan *htlcswitch.PaymentResult, error) { c := make(chan *htlcswitch.PaymentResult, 1) + + m.Lock() res, ok := m.results[paymentID] + m.Unlock() + if !ok { return nil, htlcswitch.ErrPaymentIDNotFound } @@ -78,8 +85,8 @@ type mockPaymentSessionSource struct { var _ PaymentSessionSource = (*mockPaymentSessionSource)(nil) -func (m *mockPaymentSessionSource) NewPaymentSession(routeHints [][]zpay32.HopHint, - target route.Vertex) (PaymentSession, error) { +func (m *mockPaymentSessionSource) NewPaymentSession( + _ *LightningPayment) (PaymentSession, error) { return &mockPaymentSession{m.routes}, nil } @@ -102,6 +109,13 @@ func (m *mockMissionControl) ReportPaymentFail(paymentID uint64, rt *route.Route failureSourceIdx *int, failure lnwire.FailureMessage) ( *channeldb.FailureReason, error) { + // Report a permanent failure if this is an error caused + // by incorrect details. + if failure.Code() == lnwire.CodeIncorrectOrUnknownPaymentDetails { + reason := channeldb.FailureReasonPaymentDetails + return &reason, nil + } + return nil, nil } @@ -123,11 +137,11 @@ type mockPaymentSession struct { var _ PaymentSession = (*mockPaymentSession)(nil) -func (m *mockPaymentSession) RequestRoute(payment *LightningPayment, - height uint32, finalCltvDelta uint16) (*route.Route, error) { +func (m *mockPaymentSession) RequestRoute(_, _ lnwire.MilliSatoshi, + _, height uint32) (*route.Route, error) { if len(m.routes) == 0 { - return nil, fmt.Errorf("no routes") + return nil, errNoPathFound } r := m.routes[0] @@ -177,27 +191,38 @@ type initArgs struct { c *channeldb.PaymentCreationInfo } -type registerArgs struct { - a *channeldb.PaymentAttemptInfo +type registerAttemptArgs struct { + a *channeldb.HTLCAttemptInfo } -type successArgs struct { +type settleAttemptArgs struct { preimg lntypes.Preimage } -type failArgs struct { +type failAttemptArgs struct { + reason *channeldb.HTLCFailInfo +} + +type failPaymentArgs struct { reason channeldb.FailureReason } +type testPayment struct { + info channeldb.PaymentCreationInfo + attempts []channeldb.HTLCAttempt +} + type mockControlTower struct { - inflights map[lntypes.Hash]channeldb.InFlightPayment + payments map[lntypes.Hash]*testPayment successful map[lntypes.Hash]struct{} + failed map[lntypes.Hash]channeldb.FailureReason - init chan initArgs - register chan registerArgs - success chan successArgs - fail chan failArgs - fetchInFlight chan struct{} + init chan initArgs + registerAttempt chan registerAttemptArgs + settleAttempt chan settleAttemptArgs + failAttempt chan failAttemptArgs + failPayment chan failPaymentArgs + fetchInFlight chan struct{} sync.Mutex } @@ -206,8 +231,9 @@ var _ ControlTower = (*mockControlTower)(nil) func makeMockControlTower() *mockControlTower { return &mockControlTower{ - inflights: make(map[lntypes.Hash]channeldb.InFlightPayment), + payments: make(map[lntypes.Hash]*testPayment), successful: make(map[lntypes.Hash]struct{}), + failed: make(map[lntypes.Hash]channeldb.FailureReason), } } @@ -221,56 +247,132 @@ func (m *mockControlTower) InitPayment(phash lntypes.Hash, m.init <- initArgs{c} } + // Don't allow re-init a successful payment. if _, ok := m.successful[phash]; ok { - return fmt.Errorf("already successful") + return channeldb.ErrAlreadyPaid } - _, ok := m.inflights[phash] - if ok { - return fmt.Errorf("in flight") + _, failed := m.failed[phash] + _, ok := m.payments[phash] + + // If the payment is known, only allow re-init if failed. + if ok && !failed { + return channeldb.ErrPaymentInFlight } - m.inflights[phash] = channeldb.InFlightPayment{ - Info: c, + delete(m.failed, phash) + m.payments[phash] = &testPayment{ + info: *c, } return nil } func (m *mockControlTower) RegisterAttempt(phash lntypes.Hash, - a *channeldb.PaymentAttemptInfo) error { + a *channeldb.HTLCAttemptInfo) error { m.Lock() defer m.Unlock() - if m.register != nil { - m.register <- registerArgs{a} + if m.registerAttempt != nil { + m.registerAttempt <- registerAttemptArgs{a} + } + + // Cannot register attempts for successful or failed payments. + if _, ok := m.successful[phash]; ok { + return channeldb.ErrPaymentAlreadySucceeded + } + + if _, ok := m.failed[phash]; ok { + return channeldb.ErrPaymentAlreadyFailed } - p, ok := m.inflights[phash] + p, ok := m.payments[phash] if !ok { - return fmt.Errorf("not in flight") + return channeldb.ErrPaymentNotInitiated } - p.Attempt = a - m.inflights[phash] = p + p.attempts = append(p.attempts, channeldb.HTLCAttempt{ + HTLCAttemptInfo: *a, + }) + m.payments[phash] = p return nil } -func (m *mockControlTower) Success(phash lntypes.Hash, - preimg lntypes.Preimage) error { +func (m *mockControlTower) SettleAttempt(phash lntypes.Hash, + pid uint64, settleInfo *channeldb.HTLCSettleInfo) error { m.Lock() defer m.Unlock() - if m.success != nil { - m.success <- successArgs{preimg} + if m.settleAttempt != nil { + m.settleAttempt <- settleAttemptArgs{settleInfo.Preimage} } - delete(m.inflights, phash) - m.successful[phash] = struct{}{} - return nil + // Only allow setting attempts if the payment is known. + p, ok := m.payments[phash] + if !ok { + return channeldb.ErrPaymentNotInitiated + } + + // Find the attempt with this pid, and set the settle info. + for i, a := range p.attempts { + if a.AttemptID != pid { + continue + } + + if a.Settle != nil { + return channeldb.ErrAttemptAlreadySettled + } + if a.Failure != nil { + return channeldb.ErrAttemptAlreadyFailed + } + + p.attempts[i].Settle = settleInfo + + // Mark the payment successful on first settled attempt. + m.successful[phash] = struct{}{} + return nil + } + + return fmt.Errorf("pid not found") +} + +func (m *mockControlTower) FailAttempt(phash lntypes.Hash, pid uint64, + failInfo *channeldb.HTLCFailInfo) error { + + m.Lock() + defer m.Unlock() + + if m.failAttempt != nil { + m.failAttempt <- failAttemptArgs{failInfo} + } + + // Only allow failing attempts if the payment is known. + p, ok := m.payments[phash] + if !ok { + return channeldb.ErrPaymentNotInitiated + } + + // Find the attempt with this pid, and set the failure info. + for i, a := range p.attempts { + if a.AttemptID != pid { + continue + } + + if a.Settle != nil { + return channeldb.ErrAttemptAlreadySettled + } + if a.Failure != nil { + return channeldb.ErrAttemptAlreadyFailed + } + + p.attempts[i].Failure = failInfo + return nil + } + + return fmt.Errorf("pid not found") } func (m *mockControlTower) Fail(phash lntypes.Hash, @@ -279,14 +381,46 @@ func (m *mockControlTower) Fail(phash lntypes.Hash, m.Lock() defer m.Unlock() - if m.fail != nil { - m.fail <- failArgs{reason} + if m.failPayment != nil { + m.failPayment <- failPaymentArgs{reason} + } + + // Payment must be known. + if _, ok := m.payments[phash]; !ok { + return channeldb.ErrPaymentNotInitiated } - delete(m.inflights, phash) + m.failed[phash] = reason + return nil } +func (m *mockControlTower) FetchPayment(phash lntypes.Hash) ( + *channeldb.MPPayment, error) { + + m.Lock() + defer m.Unlock() + + p, ok := m.payments[phash] + if !ok { + return nil, channeldb.ErrPaymentNotInitiated + } + + mp := &channeldb.MPPayment{ + Info: &p.info, + } + + reason, ok := m.failed[phash] + if ok { + mp.FailureReason = &reason + } + + // Return a copy of the current attempts. + mp.HTLCs = append(mp.HTLCs, p.attempts...) + + return mp, nil +} + func (m *mockControlTower) FetchInFlightPayments() ( []*channeldb.InFlightPayment, error) { @@ -297,8 +431,25 @@ func (m *mockControlTower) FetchInFlightPayments() ( m.fetchInFlight <- struct{}{} } + // In flight are all payments not successful or failed. var fl []*channeldb.InFlightPayment - for _, ifl := range m.inflights { + for hash, p := range m.payments { + if _, ok := m.successful[hash]; ok { + continue + } + if _, ok := m.failed[hash]; ok { + continue + } + + var attempts []channeldb.HTLCAttemptInfo + for _, a := range p.attempts { + attempts = append(attempts, a.HTLCAttemptInfo) + } + ifl := channeldb.InFlightPayment{ + Info: &p.info, + Attempts: attempts, + } + fl = append(fl, &ifl) } @@ -306,7 +457,7 @@ func (m *mockControlTower) FetchInFlightPayments() ( } func (m *mockControlTower) SubscribePayment(paymentHash lntypes.Hash) ( - bool, chan PaymentResult, error) { + *ControlTowerSubscriber, error) { - return false, nil, errors.New("not implemented") + return nil, errors.New("not implemented") } diff --git a/routing/notifications.go b/routing/notifications.go index 4d64b4366f..2014121c64 100644 --- a/routing/notifications.go +++ b/routing/notifications.go @@ -118,10 +118,9 @@ type topologyClient struct { // graph topology in a non-blocking. func (r *ChannelRouter) notifyTopologyChange(topologyDiff *TopologyChange) { r.RLock() - numClients := len(r.topologyClients) - r.RUnlock() + defer r.RUnlock() - // Do not reacquire the lock twice unnecessarily. + numClients := len(r.topologyClients) if numClients == 0 { return } @@ -133,7 +132,6 @@ func (r *ChannelRouter) notifyTopologyChange(topologyDiff *TopologyChange) { }), ) - r.RLock() for _, client := range r.topologyClients { client.wg.Add(1) @@ -157,7 +155,6 @@ func (r *ChannelRouter) notifyTopologyChange(topologyDiff *TopologyChange) { } }(client) } - r.RUnlock() } // TopologyChange represents a new set of modifications to the channel graph. diff --git a/routing/notifications_test.go b/routing/notifications_test.go index a18f5c90c1..2371a4e1c0 100644 --- a/routing/notifications_test.go +++ b/routing/notifications_test.go @@ -28,7 +28,7 @@ var ( Port: 9000} testAddrs = []net.Addr{testAddr} - testFeatures = lnwire.NewFeatureVector(nil, lnwire.GlobalFeatures) + testFeatures = lnwire.NewFeatureVector(nil, lnwire.Features) testHash = [32]byte{ 0xb7, 0x94, 0x38, 0x5f, 0x2d, 0x1e, 0xf7, 0xab, diff --git a/routing/pathfind.go b/routing/pathfind.go index 33c452f0b3..e5def09b53 100644 --- a/routing/pathfind.go +++ b/routing/pathfind.go @@ -2,25 +2,20 @@ package routing import ( "container/heap" + "errors" "fmt" "math" "time" - "github.com/coreos/bbolt" - + sphinx "github.com/lightningnetwork/lightning-onion" "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/feature" "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/record" "github.com/lightningnetwork/lnd/routing/route" - "github.com/lightningnetwork/lnd/tlv" ) const ( - // HopLimit is the maximum number hops that is permissible as a route. - // Any potential paths found that lie above this limit will be rejected - // with an error. This value is computed using the current fixed-size - // packet length of the Sphinx construction. - HopLimit = 20 - // infinity is used as a starting distance in our shortest path search. infinity = math.MaxInt64 @@ -36,12 +31,18 @@ const ( // some effect with smaller time lock values. The value may need // tweaking and/or be made configurable in the future. RiskFactorBillionths = 15 + + // estimatedNodeCount is used to preallocate the path finding structures + // to avoid resizing and copies. It should be number on the same order as + // the number of active nodes in the network. + estimatedNodeCount = 10000 ) // pathFinder defines the interface of a path finding algorithm. type pathFinder = func(g *graphParams, r *RestrictParams, cfg *PathFindingConfig, source, target route.Vertex, - amt lnwire.MilliSatoshi) ([]*channeldb.ChannelEdgePolicy, error) + amt lnwire.MilliSatoshi, finalHtlcExpiry int32) ( + []*channeldb.ChannelEdgePolicy, error) var ( // DefaultPaymentAttemptPenalty is the virtual cost in path finding weight @@ -67,17 +68,31 @@ type edgePolicyWithSource struct { edge *channeldb.ChannelEdgePolicy } -// newRoute returns a fully valid route between the source and target that's -// capable of supporting a payment of `amtToSend` after fees are fully -// computed. If the route is too long, or the selected path cannot support the -// fully payment including fees, then a non-nil error is returned. +// finalHopParams encapsulates various parameters for route construction that +// apply to the final hop in a route. These features include basic payment data +// such as amounts and cltvs, as well as more complex features like destination +// custom records and payment address. +type finalHopParams struct { + amt lnwire.MilliSatoshi + totalAmt lnwire.MilliSatoshi + cltvDelta uint16 + records record.CustomSet + paymentAddr *[32]byte +} + +// newRoute constructs a route using the provided path and final hop constraints. +// Any destination specific fields from the final hop params will be attached +// assuming the destination's feature vector signals support, otherwise this +// method will fail. If the route is too long, or the selected path cannot +// support the fully payment including fees, then a non-nil error is returned. // // NOTE: The passed slice of ChannelHops MUST be sorted in forward order: from -// the source to the target node of the path finding attempt. -func newRoute(amtToSend lnwire.MilliSatoshi, sourceVertex route.Vertex, +// the source to the target node of the path finding attempt. It is assumed that +// any feature vectors on all hops have been validated for transitive +// dependencies. +func newRoute(sourceVertex route.Vertex, pathEdges []*channeldb.ChannelEdgePolicy, currentHeight uint32, - finalCLTVDelta uint16, - finalDestRecords []tlv.Record) (*route.Route, error) { + finalHop finalHopParams) (*route.Route, error) { var ( hops []*route.Hop @@ -100,18 +115,74 @@ func newRoute(amtToSend lnwire.MilliSatoshi, sourceVertex route.Vertex, // payload for the hop this edge is leading to. edge := pathEdges[i] - // If this is the last hop, then the hop payload will contain - // the exact amount. In BOLT #4: Onion Routing - // Protocol / "Payload for the Last Node", this is detailed. - amtToForward := amtToSend + // We'll calculate the amounts, timelocks, and fees for each hop + // in the route. The base case is the final hop which includes + // their amount and timelocks. These values will accumulate + // contributions from the preceding hops back to the sender as + // we compute the route in reverse. + var ( + amtToForward lnwire.MilliSatoshi + fee lnwire.MilliSatoshi + outgoingTimeLock uint32 + tlvPayload bool + customRecords record.CustomSet + mpp *record.MPP + ) + + // Define a helper function that checks this edge's feature + // vector for support for a given feature. We assume at this + // point that the feature vectors transitive dependencies have + // been validated. + supports := edge.Node.Features.HasFeature + + // We start by assuming the node doesn't support TLV. We'll now + // inspect the node's feature vector to see if we can promote + // the hop. We assume already that the feature vector's + // transitive dependencies have already been validated by path + // finding or some other means. + tlvPayload = supports(lnwire.TLVOnionPayloadOptional) + + if i == len(pathEdges)-1 { + // If this is the last hop, then the hop payload will + // contain the exact amount. In BOLT #4: Onion Routing + // Protocol / "Payload for the Last Node", this is + // detailed. + amtToForward = finalHop.amt + + // Fee is not part of the hop payload, but only used for + // reporting through RPC. Set to zero for the final hop. + fee = lnwire.MilliSatoshi(0) - // Fee is not part of the hop payload, but only used for - // reporting through RPC. Set to zero for the final hop. - fee := lnwire.MilliSatoshi(0) + // As this is the last hop, we'll use the specified + // final CLTV delta value instead of the value from the + // last link in the route. + totalTimeLock += uint32(finalHop.cltvDelta) + outgoingTimeLock = totalTimeLock + + // Attach any custom records to the final hop if the + // receiver supports TLV. + if !tlvPayload && finalHop.records != nil { + return nil, errors.New("cannot attach " + + "custom records") + } + customRecords = finalHop.records + + // If we're attaching a payment addr but the receiver + // doesn't support both TLV and payment addrs, fail. + payAddr := supports(lnwire.PaymentAddrOptional) + if !payAddr && finalHop.paymentAddr != nil { + return nil, errors.New("cannot attach " + + "payment addr") + } - // If the current hop isn't the last hop, then add enough funds - // to pay for transit over the next link. - if i != len(pathEdges)-1 { + // Otherwise attach the mpp record if it exists. + if finalHop.paymentAddr != nil { + mpp = record.NewMPP( + finalHop.totalAmt, + *finalHop.paymentAddr, + ) + } + } else { // The amount that the current hop needs to forward is // equal to the incoming amount of the next hop. amtToForward = nextIncomingAmount @@ -122,32 +193,12 @@ func newRoute(amtToSend lnwire.MilliSatoshi, sourceVertex route.Vertex, // is stored as part of the incoming channel of // the next hop. fee = pathEdges[i+1].ComputeFee(amtToForward) - } - - // If this is the last hop, then for verification purposes, the - // value of the outgoing time-lock should be _exactly_ the - // absolute time out they'd expect in the HTLC. - var outgoingTimeLock uint32 - if i == len(pathEdges)-1 { - // As this is the last hop, we'll use the specified - // final CLTV delta value instead of the value from the - // last link in the route. - totalTimeLock += uint32(finalCLTVDelta) - outgoingTimeLock = currentHeight + uint32(finalCLTVDelta) - } else { - // Next, increment the total timelock of the entire - // route such that each hops time lock increases as we - // walk backwards in the route, using the delta of the - // previous hop. - delta := uint32(pathEdges[i+1].TimeLockDelta) - totalTimeLock += delta - - // Otherwise, the value of the outgoing time-lock will - // be the value of the time-lock for the _outgoing_ - // HTLC, so we factor in their specified grace period - // (time lock delta). - outgoingTimeLock = totalTimeLock - delta + // We'll take the total timelock of the preceding hop as + // the outgoing timelock or this hop. Then we'll + // increment the total timelock incurred by this hop. + outgoingTimeLock = totalTimeLock + totalTimeLock += uint32(pathEdges[i+1].TimeLockDelta) } // Since we're traversing the path backwards atm, we prepend @@ -158,25 +209,9 @@ func newRoute(amtToSend lnwire.MilliSatoshi, sourceVertex route.Vertex, ChannelID: edge.ChannelID, AmtToForward: amtToForward, OutgoingTimeLock: outgoingTimeLock, - LegacyPayload: true, - } - - // We start out above by assuming that this node needs the - // legacy payload, as if we don't have the full - // NodeAnnouncement information for this node, then we can't - // assume it knows the latest features. If we do have a feature - // vector for this node, then we'll update the info now. - if edge.Node.Features != nil { - features := edge.Node.Features - currentHop.LegacyPayload = !features.HasFeature( - lnwire.TLVOnionPayloadOptional, - ) - } - - // If this is the last hop, then we'll populate any TLV records - // destined for it. - if i == len(pathEdges)-1 && len(finalDestRecords) != 0 { - currentHop.TLVRecords = finalDestRecords + LegacyPayload: !tlvPayload, + CustomRecords: customRecords, + MPP: mpp, } hops = append([]*route.Hop{currentHop}, hops...) @@ -219,12 +254,8 @@ func edgeWeight(lockedAmt lnwire.MilliSatoshi, fee lnwire.MilliSatoshi, // graphParams wraps the set of graph parameters passed to findPath. type graphParams struct { - // tx can be set to an existing db transaction. If not set, a new - // transaction will be started. - tx *bbolt.Tx - // graph is the ChannelGraph to be used during path finding. - graph *channeldb.ChannelGraph + graph routingGraph // additionalEdges is an optional set of edges that should be // considered during path finding, that is not already found in the @@ -257,15 +288,28 @@ type RestrictParams struct { // hop. If nil, any channel may be used. OutgoingChannelID *uint64 + // LastHop is the pubkey of the last node before the final destination + // is reached. If nil, any node may be used. + LastHop *route.Vertex + // CltvLimit is the maximum time lock of the route excluding the final // ctlv. After path finding is complete, the caller needs to increase // all cltv expiry heights with the required final cltv delta. CltvLimit uint32 - // DestPayloadTLV should be set to true if we need to drop off a TLV - // payload at the final hop in order to properly complete this payment - // attempt. - DestPayloadTLV bool + // DestCustomRecords contains the custom records to drop off at the + // final hop, if any. + DestCustomRecords record.CustomSet + + // DestFeatures is a feature vector describing what the final hop + // supports. If none are provided, pathfinding will try to inspect any + // features on the node announcement instead. + DestFeatures *lnwire.FeatureVector + + // PaymentAddr is a random 32-byte value generated by the receiver to + // mitigate probing vectors and payment sniping attacks on overpaid + // invoices. + PaymentAddr *[32]byte } // PathFindingConfig defines global parameters that control the trade-off in @@ -282,20 +326,70 @@ type PathFindingConfig struct { MinProbability float64 } -// findPath attempts to find a path from the source node within the -// ChannelGraph to the target node that's capable of supporting a payment of -// `amt` value. The current approach implemented is modified version of -// Dijkstra's algorithm to find a single shortest path between the source node -// and the destination. The distance metric used for edges is related to the -// time-lock+fee costs along a particular edge. If a path is found, this -// function returns a slice of ChannelHop structs which encoded the chosen path -// from the target to the source. The search is performed backwards from -// destination node back to source. This is to properly accumulate fees -// that need to be paid along the path and accurately check the amount -// to forward at every node against the available bandwidth. +// getOutgoingBalance returns the maximum available balance in any of the +// channels of the given node. The second return parameters is the total +// available balance. +func getOutgoingBalance(node route.Vertex, outgoingChan *uint64, + bandwidthHints map[uint64]lnwire.MilliSatoshi, + g routingGraph) (lnwire.MilliSatoshi, lnwire.MilliSatoshi, error) { + + var max, total lnwire.MilliSatoshi + cb := func(edgeInfo *channeldb.ChannelEdgeInfo, outEdge, + _ *channeldb.ChannelEdgePolicy) error { + + if outEdge == nil { + return nil + } + + chanID := outEdge.ChannelID + + // Enforce outgoing channel restriction. + if outgoingChan != nil && chanID != *outgoingChan { + return nil + } + + bandwidth, ok := bandwidthHints[chanID] + + // If the bandwidth is not available, use the channel capacity. + // This can happen when a channel is added to the graph after + // we've already queried the bandwidth hints. + if !ok { + bandwidth = lnwire.NewMSatFromSatoshis( + edgeInfo.Capacity, + ) + } + + if bandwidth > max { + max = bandwidth + } + + total += bandwidth + + return nil + } + + // Iterate over all channels of the to node. + err := g.forEachNodeChannel(node, cb) + if err != nil { + return 0, 0, err + } + return max, total, err +} + +// findPath attempts to find a path from the source node within the ChannelGraph +// to the target node that's capable of supporting a payment of `amt` value. The +// current approach implemented is modified version of Dijkstra's algorithm to +// find a single shortest path between the source node and the destination. The +// distance metric used for edges is related to the time-lock+fee costs along a +// particular edge. If a path is found, this function returns a slice of +// ChannelHop structs which encoded the chosen path from the target to the +// source. The search is performed backwards from destination node back to +// source. This is to properly accumulate fees that need to be paid along the +// path and accurately check the amount to forward at every node against the +// available bandwidth. func findPath(g *graphParams, r *RestrictParams, cfg *PathFindingConfig, - source, target route.Vertex, amt lnwire.MilliSatoshi) ( - []*channeldb.ChannelEdgePolicy, error) { + source, target route.Vertex, amt lnwire.MilliSatoshi, + finalHtlcExpiry int32) ([]*channeldb.ChannelEdgePolicy, error) { // Pathfinding can be a significant portion of the total payment // latency, especially on low-powered devices. Log several metrics to @@ -309,72 +403,85 @@ func findPath(g *graphParams, r *RestrictParams, cfg *PathFindingConfig, "time=%v", nodesVisited, edgesExpanded, timeElapsed) }() - var err error - tx := g.tx - if tx == nil { - tx, err = g.graph.Database().Begin(false) + // If no destination features are provided, we will load what features + // we have for the target node from our graph. + features := r.DestFeatures + if features == nil { + var err error + features, err = g.graph.fetchNodeFeatures(target) if err != nil { return nil, err } - defer tx.Rollback() } - // First we'll initialize an empty heap which'll help us to quickly - // locate the next edge we should visit next during our graph - // traversal. - nodeHeap := newDistanceHeap() - - // For each node in the graph, we create an entry in the distance map - // for the node set with a distance of "infinity". graph.ForEachNode - // also returns the source node, so there is no need to add the source - // node explicitly. - distance := make(map[route.Vertex]nodeWithDist) - if err := g.graph.ForEachNode(tx, func(_ *bbolt.Tx, - node *channeldb.LightningNode) error { - // TODO(roasbeef): with larger graph can just use disk seeks - // with a visited map - vertex := route.Vertex(node.PubKeyBytes) - distance[vertex] = nodeWithDist{ - dist: infinity, - node: route.Vertex(node.PubKeyBytes), - } - - // If we don't have any features for this node, then we can - // stop here. - if node.Features == nil || !r.DestPayloadTLV { - return nil - } + // Ensure that the destination's features don't include unknown + // required features. + err := feature.ValidateRequired(features) + if err != nil { + return nil, err + } - // We only need to perform this check for the final node, so we - // can exit here if this isn't them. - if vertex != target { - return nil - } + // Ensure that all transitive dependencies are set. + err = feature.ValidateDeps(features) + if err != nil { + return nil, err + } - // If we have any records for the final hop, then we'll check - // not to ensure that they are actually able to interpret them. - supportsTLV := node.Features.HasFeature( - lnwire.TLVOnionPayloadOptional, + // Now that we know the feature vector is well formed, we'll proceed in + // checking that it supports the features we need, given our + // restrictions on the final hop. + + // If the caller needs to send custom records, check that our + // destination feature vector supports TLV. + if len(r.DestCustomRecords) > 0 && + !features.HasFeature(lnwire.TLVOnionPayloadOptional) { + + return nil, errNoTlvPayload + } + + // If the caller has a payment address to attach, check that our + // destination feature vector supports them. + if r.PaymentAddr != nil && + !features.HasFeature(lnwire.PaymentAddrOptional) { + + return nil, errNoPaymentAddr + } + + // If we are routing from ourselves, check that we have enough local + // balance available. + self := g.graph.sourceNode() + + if source == self { + max, total, err := getOutgoingBalance( + self, r.OutgoingChannelID, g.bandwidthHints, g.graph, ) - if !supportsTLV { - return fmt.Errorf("destination hop doesn't " + - "understand new TLV paylods") + if err != nil { + return nil, err } - return nil - }); err != nil { - return nil, err + // If the total outgoing balance isn't sufficient, it will be + // impossible to complete the payment. + if total < amt { + return nil, errInsufficientBalance + } + + // If there is only not enough capacity on a single route, it + // may still be possible to complete the payment by splitting. + if max < amt { + return nil, errNoPathFound + } } + // First we'll initialize an empty heap which'll help us to quickly + // locate the next edge we should visit next during our graph + // traversal. + nodeHeap := newDistanceHeap(estimatedNodeCount) + + // Holds the current best distance for a given node. + distance := make(map[route.Vertex]*nodeWithDist, estimatedNodeCount) + additionalEdgesWithSrc := make(map[route.Vertex][]*edgePolicyWithSource) for vertex, outgoingEdgePolicies := range g.additionalEdges { - // We'll also include all the nodes found within the additional - // edges that are not known to us yet in the distance map. - distance[vertex] = nodeWithDist{ - dist: infinity, - node: vertex, - } - // Build reverse lookup to find incoming edges. Needed because // search is taken place from target to source. for _, outgoingEdgePolicy := range outgoingEdgePolicies { @@ -390,91 +497,75 @@ func findPath(g *graphParams, r *RestrictParams, cfg *PathFindingConfig, } } + // Build a preliminary destination hop structure to obtain the payload + // size. + var mpp *record.MPP + if r.PaymentAddr != nil { + mpp = record.NewMPP(amt, *r.PaymentAddr) + } + + finalHop := route.Hop{ + AmtToForward: amt, + OutgoingTimeLock: uint32(finalHtlcExpiry), + CustomRecords: r.DestCustomRecords, + LegacyPayload: !features.HasFeature( + lnwire.TLVOnionPayloadOptional, + ), + MPP: mpp, + } + // We can't always assume that the end destination is publicly - // advertised to the network and included in the graph.ForEachNode call - // above, so we'll manually include the target node. The target node - // charges no fee. Distance is set to 0, because this is the starting - // point of the graph traversal. We are searching backwards to get the - // fees first time right and correctly match channel bandwidth. - distance[target] = nodeWithDist{ + // advertised to the network so we'll manually include the target node. + // The target node charges no fee. Distance is set to 0, because this is + // the starting point of the graph traversal. We are searching backwards + // to get the fees first time right and correctly match channel + // bandwidth. + // + // Don't record the initial partial path in the distance map and reserve + // that key for the source key in the case we route to ourselves. + partialPath := &nodeWithDist{ dist: 0, weight: 0, node: target, amountToReceive: amt, - incomingCltv: 0, + incomingCltv: finalHtlcExpiry, probability: 1, + routingInfoSize: finalHop.PayloadSize(0), } - // We'll use this map as a series of "next" hop pointers. So to get - // from `Vertex` to the target node, we'll take the edge that it's - // mapped to within `next`. - next := make(map[route.Vertex]*channeldb.ChannelEdgePolicy) + // Calculate the absolute cltv limit. Use uint64 to prevent an overflow + // if the cltv limit is MaxUint32. + absoluteCltvLimit := uint64(r.CltvLimit) + uint64(finalHtlcExpiry) // processEdge is a helper closure that will be used to make sure edges // satisfy our specific requirements. - processEdge := func(fromVertex route.Vertex, bandwidth lnwire.MilliSatoshi, - edge *channeldb.ChannelEdgePolicy, toNode route.Vertex) { + processEdge := func(fromVertex route.Vertex, + fromFeatures *lnwire.FeatureVector, + edge *channeldb.ChannelEdgePolicy, toNodeDist *nodeWithDist) { edgesExpanded++ - // If this is not a local channel and it is disabled, we will - // skip it. - // TODO(halseth): also ignore disable flags for non-local - // channels if bandwidth hint is set? - isSourceChan := fromVertex == source - - edgeFlags := edge.ChannelFlags - isDisabled := edgeFlags&lnwire.ChanUpdateDisabled != 0 - - if !isSourceChan && isDisabled { - return - } - - // If we have an outgoing channel restriction and this is not - // the specified channel, skip it. - if isSourceChan && r.OutgoingChannelID != nil && - *r.OutgoingChannelID != edge.ChannelID { - - return - } - - // Calculate amount that the candidate node would have to sent + // Calculate amount that the candidate node would have to send // out. - toNodeDist := distance[toNode] amountToSend := toNodeDist.amountToReceive // Request the success probability for this edge. edgeProbability := r.ProbabilitySource( - fromVertex, toNode, amountToSend, + fromVertex, toNodeDist.node, amountToSend, ) - log.Tracef("path finding probability: fromnode=%v, tonode=%v, "+ - "probability=%v", fromVertex, toNode, edgeProbability) + log.Trace(newLogClosure(func() string { + return fmt.Sprintf("path finding probability: fromnode=%v,"+ + " tonode=%v, amt=%v, probability=%v", + fromVertex, toNodeDist.node, amountToSend, + edgeProbability) + })) // If the probability is zero, there is no point in trying. if edgeProbability == 0 { return } - // If the estimated bandwidth of the channel edge is not able - // to carry the amount that needs to be send, return. - if bandwidth < amountToSend { - return - } - - // If the amountToSend is less than the minimum required - // amount, return. - if amountToSend < edge.MinHTLC { - return - } - - // If this edge was constructed from a hop hint, we won't have access to - // its max HTLC. Therefore, only consider discarding this edge here if - // the field is set. - if edge.MaxHTLC != 0 && edge.MaxHTLC < amountToSend { - return - } - // Compute fee that fromVertex is charging. It is based on the // amount that needs to be sent to the next node in the route. // @@ -492,11 +583,10 @@ func findPath(g *graphParams, r *RestrictParams, cfg *PathFindingConfig, timeLockDelta = edge.TimeLockDelta } - incomingCltv := toNodeDist.incomingCltv + - uint32(timeLockDelta) + incomingCltv := toNodeDist.incomingCltv + int32(timeLockDelta) // Check that we are within our CLTV limit. - if incomingCltv > r.CltvLimit { + if uint64(incomingCltv) > absoluteCltvLimit { return } @@ -544,12 +634,25 @@ func findPath(g *graphParams, r *RestrictParams, cfg *PathFindingConfig, int64(cfg.PaymentAttemptPenalty), ) - // If the current best route is better than this candidate - // route, return. It is important to also return if the distance - // is equal, because otherwise the algorithm could run into an - // endless loop. - if tempDist >= distance[fromVertex].dist { - return + // If there is already a best route stored, compare this + // candidate route with the best route so far. + current, ok := distance[fromVertex] + if ok { + // If this route is worse than what we already found, + // skip this route. + if tempDist > current.dist { + return + } + + // If the route is equally good and the probability + // isn't better, skip this route. It is important to + // also return if both cost and probability are equal, + // because otherwise the algorithm could run into an + // endless loop. + probNotBetter := probability <= current.probability + if tempDist == current.dist && probNotBetter { + return + } } // Every edge should have a positive time lock delta. If we @@ -559,147 +662,220 @@ func findPath(g *graphParams, r *RestrictParams, cfg *PathFindingConfig, edge.ChannelID) } + // Calculate the total routing info size if this hop were to be + // included. If we are coming from the source hop, the payload + // size is zero, because the original htlc isn't in the onion + // blob. + var payloadSize uint64 + if fromVertex != source { + supportsTlv := fromFeatures.HasFeature( + lnwire.TLVOnionPayloadOptional, + ) + + hop := route.Hop{ + AmtToForward: amountToSend, + OutgoingTimeLock: uint32( + toNodeDist.incomingCltv, + ), + LegacyPayload: !supportsTlv, + } + + payloadSize = hop.PayloadSize(edge.ChannelID) + } + + routingInfoSize := toNodeDist.routingInfoSize + payloadSize + + // Skip paths that would exceed the maximum routing info size. + if routingInfoSize > sphinx.MaxPayloadSize { + return + } + // All conditions are met and this new tentative distance is // better than the current best known distance to this node. // The new better distance is recorded, and also our "next hop" // map is populated with this edge. - distance[fromVertex] = nodeWithDist{ + withDist := &nodeWithDist{ dist: tempDist, weight: tempWeight, node: fromVertex, amountToReceive: amountToReceive, incomingCltv: incomingCltv, probability: probability, + nextHop: edge, + routingInfoSize: routingInfoSize, } + distance[fromVertex] = withDist - next[fromVertex] = edge - - // Either push distance[fromVertex] onto the heap if the node + // Either push withDist onto the heap if the node // represented by fromVertex is not already on the heap OR adjust // its position within the heap via heap.Fix. - nodeHeap.PushOrFix(distance[fromVertex]) + nodeHeap.PushOrFix(withDist) } // TODO(roasbeef): also add path caching // * similar to route caching, but doesn't factor in the amount - // To start, our target node will the sole item within our distance - // heap. - heap.Push(&nodeHeap, distance[target]) + // Cache features because we visit nodes multiple times. + featureCache := make(map[route.Vertex]*lnwire.FeatureVector) + + // getGraphFeatures returns (cached) node features from the graph. + getGraphFeatures := func(node route.Vertex) (*lnwire.FeatureVector, + error) { - for nodeHeap.Len() != 0 { + // Check cache for features of the fromNode. + fromFeatures, ok := featureCache[node] + if ok { + return fromFeatures, nil + } + + // Fetch node features fresh from the graph. + fromFeatures, err := g.graph.fetchNodeFeatures(node) + if err != nil { + return nil, err + } + + // Don't route through nodes that contain unknown required + // features and mark as nil in the cache. + err = feature.ValidateRequired(fromFeatures) + if err != nil { + featureCache[node] = nil + return nil, nil + } + + // Don't route through nodes that don't properly set all + // transitive feature dependencies and mark as nil in the cache. + err = feature.ValidateDeps(fromFeatures) + if err != nil { + featureCache[node] = nil + return nil, nil + } + + // Update cache. + featureCache[node] = fromFeatures + + return fromFeatures, nil + } + + routeToSelf := source == target + for { nodesVisited++ - // Fetch the node within the smallest distance from our source - // from the heap. - partialPath := heap.Pop(&nodeHeap).(nodeWithDist) pivot := partialPath.node - // If we've reached our source (or we don't have any incoming - // edges), then we're done here and can exit the graph - // traversal early. - if pivot == source { - break + // Create unified policies for all incoming connections. + u := newUnifiedPolicies(self, pivot, r.OutgoingChannelID) + + err := u.addGraphPolicies(g.graph) + if err != nil { + return nil, err } - cb := func(_ *bbolt.Tx, edgeInfo *channeldb.ChannelEdgeInfo, _, - inEdge *channeldb.ChannelEdgePolicy) error { + for _, reverseEdge := range additionalEdgesWithSrc[pivot] { + u.addPolicy(reverseEdge.sourceNode, reverseEdge.edge, 0) + } + + amtToSend := partialPath.amountToReceive + + // Expand all connections using the optimal policy for each + // connection. + for fromNode, unifiedPolicy := range u.policies { + // The target node is not recorded in the distance map. + // Therefore we need to have this check to prevent + // creating a cycle. Only when we intend to route to + // self, we allow this cycle to form. In that case we'll + // also break out of the search loop below. + if !routeToSelf && fromNode == target { + continue + } + + // Apply last hop restriction if set. + if r.LastHop != nil && + pivot == target && fromNode != *r.LastHop { - // If there is no edge policy for this candidate - // node, skip. Note that we are searching backwards - // so this node would have come prior to the pivot - // node in the route. - if inEdge == nil { - return nil + continue } - // We'll query the lower layer to see if we can obtain - // any more up to date information concerning the - // bandwidth of this edge. - edgeBandwidth, ok := g.bandwidthHints[edgeInfo.ChannelID] - if !ok { - // If we don't have a hint for this edge, then - // we'll just use the known Capacity/MaxHTLC as - // the available bandwidth. It's possible for - // the capacity to be unknown when operating - // under a light client. - edgeBandwidth = inEdge.MaxHTLC - if edgeBandwidth == 0 { - edgeBandwidth = lnwire.NewMSatFromSatoshis( - edgeInfo.Capacity, - ) - } + policy := unifiedPolicy.getPolicy( + amtToSend, g.bandwidthHints, + ) + + if policy == nil { + continue } - // Before we can process the edge, we'll need to fetch - // the node on the _other_ end of this channel as we - // may later need to iterate over the incoming edges of - // this node if we explore it further. - chanSource, err := edgeInfo.OtherNodeKeyBytes(pivot[:]) + // Get feature vector for fromNode. + fromFeatures, err := getGraphFeatures(fromNode) if err != nil { - return err + return nil, err + } + + // If there are no valid features, skip this node. + if fromFeatures == nil { + continue } // Check if this candidate node is better than what we // already have. - processEdge(route.Vertex(chanSource), edgeBandwidth, inEdge, pivot) - return nil + processEdge(fromNode, fromFeatures, policy, partialPath) } - // Now that we've found the next potential step to take we'll - // examine all the incoming edges (channels) from this node to - // further our graph traversal. - err := g.graph.ForEachNodeChannel(tx, pivot[:], cb) - if err != nil { - return nil, err + if nodeHeap.Len() == 0 { + break } - // Then, we'll examine all the additional edges from the node - // we're currently visiting. Since we don't know the capacity - // of the private channel, we'll assume it was selected as a - // routing hint due to having enough capacity for the payment - // and use the payment amount as its capacity. - bandWidth := partialPath.amountToReceive - for _, reverseEdge := range additionalEdgesWithSrc[pivot] { - processEdge(reverseEdge.sourceNode, bandWidth, - reverseEdge.edge, pivot) - } - } + // Fetch the node within the smallest distance from our source + // from the heap. + partialPath = heap.Pop(&nodeHeap).(*nodeWithDist) - // If the source node isn't found in the next hop map, then a path - // doesn't exist, so we terminate in an error. - if _, ok := next[source]; !ok { - return nil, newErrf(ErrNoPathFound, "unable to find a path to "+ - "destination") + // If we've reached our source (or we don't have any incoming + // edges), then we're done here and can exit the graph + // traversal early. + if partialPath.node == source { + break + } } - // Use the nextHop map to unravel the forward path from source to + // Use the distance map to unravel the forward path from source to // target. - pathEdges := make([]*channeldb.ChannelEdgePolicy, 0, len(next)) + var pathEdges []*channeldb.ChannelEdgePolicy currentNode := source - for currentNode != target { // TODO(roasbeef): assumes no cycles + for { // Determine the next hop forward using the next map. - nextNode := next[currentNode] + currentNodeWithDist, ok := distance[currentNode] + if !ok { + // If the node doesnt have a next hop it means we didn't find a path. + return nil, errNoPathFound + } // Add the next hop to the list of path edges. - pathEdges = append(pathEdges, nextNode) + pathEdges = append(pathEdges, currentNodeWithDist.nextHop) // Advance current node. - currentNode = route.Vertex(nextNode.Node.PubKeyBytes) - } + currentNode = currentNodeWithDist.nextHop.Node.PubKeyBytes - // The route is invalid if it spans more than 20 hops. The current - // Sphinx (onion routing) implementation can only encode up to 20 hops - // as the entire packet is fixed size. If this route is more than 20 - // hops, then it's invalid. - numEdges := len(pathEdges) - if numEdges > HopLimit { - return nil, newErr(ErrMaxHopsExceeded, "potential path has "+ - "too many hops") + // Check stop condition at the end of this loop. This prevents + // breaking out too soon for self-payments that have target set + // to source. + if currentNode == target { + break + } } - log.Debugf("Found route: probability=%v, hops=%v, fee=%v\n", - distance[source].probability, numEdges, + // For the final hop, we'll set the node features to those determined + // above. These are either taken from the destination features, e.g. + // virtual or invoice features, or loaded as a fallback from the graph. + // The transitive dependencies were already validated above, so no need + // to do so now. + // + // NOTE: This may overwrite features loaded from the graph if + // destination features were provided. This is fine though, since our + // route construction does not care where the features are actually + // taken from. In the future we may wish to do route construction within + // findPath, and avoid using ChannelEdgePolicy altogether. + pathEdges[len(pathEdges)-1].Node.Features = features + + log.Debugf("Found route: probability=%v, hops=%v, fee=%v", + distance[source].probability, len(pathEdges), distance[source].amountToReceive-amt) return pathEdges, nil diff --git a/routing/pathfind_test.go b/routing/pathfind_test.go index d5e35ee4f9..deaccb2898 100644 --- a/routing/pathfind_test.go +++ b/routing/pathfind_test.go @@ -13,6 +13,7 @@ import ( "math/big" "net" "os" + "reflect" "strings" "testing" "time" @@ -22,7 +23,9 @@ import ( "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcutil" "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/feature" "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/record" "github.com/lightningnetwork/lnd/routing/route" "github.com/lightningnetwork/lnd/zpay32" ) @@ -58,6 +61,35 @@ var ( } testPathFindingConfig = &PathFindingConfig{} + + tlvFeatures = lnwire.NewFeatureVector( + lnwire.NewRawFeatureVector( + lnwire.TLVOnionPayloadOptional, + ), lnwire.Features, + ) + + payAddrFeatures = lnwire.NewFeatureVector( + lnwire.NewRawFeatureVector( + lnwire.PaymentAddrOptional, + ), lnwire.Features, + ) + + tlvPayAddrFeatures = lnwire.NewFeatureVector( + lnwire.NewRawFeatureVector( + lnwire.TLVOnionPayloadOptional, + lnwire.PaymentAddrOptional, + ), lnwire.Features, + ) + + mppFeatures = lnwire.NewRawFeatureVector( + lnwire.TLVOnionPayloadOptional, + lnwire.PaymentAddrOptional, + lnwire.MPPOptional, + ) + + unknownRequiredFeatures = lnwire.NewFeatureVector( + lnwire.NewRawFeatureVector(100), lnwire.Features, + ) ) var ( @@ -317,7 +349,7 @@ type testChannelPolicy struct { FeeRate lnwire.MilliSatoshi LastUpdate time.Time Disabled bool - Direction bool + Features *lnwire.FeatureVector } type testChannelEnd struct { @@ -325,7 +357,7 @@ type testChannelEnd struct { *testChannelPolicy } -func symmetricTestChannel(alias1 string, alias2 string, capacity btcutil.Amount, +func symmetricTestChannel(alias1, alias2 string, capacity btcutil.Amount, policy *testChannelPolicy, chanID ...uint64) *testChannel { // Leaving id zero will result in auto-generation of a channel id during @@ -335,18 +367,25 @@ func symmetricTestChannel(alias1 string, alias2 string, capacity btcutil.Amount, id = chanID[0] } - node2Policy := *policy - node2Policy.Direction = !policy.Direction + policy2 := *policy + + return asymmetricTestChannel( + alias1, alias2, capacity, policy, &policy2, id, + ) +} + +func asymmetricTestChannel(alias1, alias2 string, capacity btcutil.Amount, + policy1, policy2 *testChannelPolicy, id uint64) *testChannel { return &testChannel{ Capacity: capacity, Node1: &testChannelEnd{ Alias: alias1, - testChannelPolicy: policy, + testChannelPolicy: policy1, }, Node2: &testChannelEnd{ Alias: alias2, - testChannelPolicy: &node2Policy, + testChannelPolicy: policy2, }, ChannelID: id, } @@ -401,7 +440,9 @@ func createTestGraphFromChannels(testChannels []*testChannel, source string) ( privKeyMap := make(map[string]*btcec.PrivateKey) nodeIndex := byte(0) - addNodeWithAlias := func(alias string) (*channeldb.LightningNode, error) { + addNodeWithAlias := func(alias string, features *lnwire.FeatureVector) ( + *channeldb.LightningNode, error) { + keyBytes := make([]byte, 32) keyBytes = []byte{ 0, 0, 0, 0, 0, 0, 0, 0, @@ -413,13 +454,17 @@ func createTestGraphFromChannels(testChannels []*testChannel, source string) ( privKey, pubKey := btcec.PrivKeyFromBytes(btcec.S256(), keyBytes) + if features == nil { + features = lnwire.EmptyFeatureVector() + } + dbNode := &channeldb.LightningNode{ HaveNodeAnnouncement: true, AuthSigBytes: testSig.Serialize(), LastUpdate: testTime, Addresses: testAddrs, Alias: alias, - Features: testFeatures, + Features: features, } copy(dbNode.PubKeyBytes[:], pubKey.SerializeCompressed()) @@ -439,7 +484,7 @@ func createTestGraphFromChannels(testChannels []*testChannel, source string) ( } // Add the source node. - dbNode, err := addNodeWithAlias(source) + dbNode, err := addNodeWithAlias(source, lnwire.EmptyFeatureVector()) if err != nil { return nil, err } @@ -453,12 +498,19 @@ func createTestGraphFromChannels(testChannels []*testChannel, source string) ( nextUnassignedChannelID := uint64(100000) for _, testChannel := range testChannels { - for _, alias := range []string{ - testChannel.Node1.Alias, testChannel.Node2.Alias} { + for _, node := range []*testChannelEnd{ + testChannel.Node1, testChannel.Node2} { - _, exists := aliasMap[alias] + _, exists := aliasMap[node.Alias] if !exists { - _, err := addNodeWithAlias(alias) + var features *lnwire.FeatureVector + if node.testChannelPolicy != nil { + features = + node.testChannelPolicy.Features + } + _, err := addNodeWithAlias( + node.Alias, features, + ) if err != nil { return nil, err } @@ -510,18 +562,16 @@ func createTestGraphFromChannels(testChannels []*testChannel, source string) ( return nil, err } - if testChannel.Node1.testChannelPolicy != nil { + if node1.testChannelPolicy != nil { var msgFlags lnwire.ChanUpdateMsgFlags - if testChannel.Node1.MaxHTLC != 0 { + if node1.MaxHTLC != 0 { msgFlags |= lnwire.ChanUpdateOptionMaxHtlc } var channelFlags lnwire.ChanUpdateChanFlags - if testChannel.Node1.Disabled { + if node1.Disabled { channelFlags |= lnwire.ChanUpdateDisabled } - if testChannel.Node1.Direction { - channelFlags |= lnwire.ChanUpdateDirection - } + edgePolicy := &channeldb.ChannelEdgePolicy{ SigBytes: testSig.Serialize(), MessageFlags: msgFlags, @@ -539,18 +589,17 @@ func createTestGraphFromChannels(testChannels []*testChannel, source string) ( } } - if testChannel.Node2.testChannelPolicy != nil { + if node2.testChannelPolicy != nil { var msgFlags lnwire.ChanUpdateMsgFlags - if testChannel.Node2.MaxHTLC != 0 { + if node2.MaxHTLC != 0 { msgFlags |= lnwire.ChanUpdateOptionMaxHtlc } - channelFlags := lnwire.ChanUpdateChanFlags(0) - if testChannel.Node2.Disabled { + var channelFlags lnwire.ChanUpdateChanFlags + if node2.Disabled { channelFlags |= lnwire.ChanUpdateDisabled } - if testChannel.Node2.Direction { - channelFlags |= lnwire.ChanUpdateDirection - } + channelFlags |= lnwire.ChanUpdateDirection + edgePolicy := &channeldb.ChannelEdgePolicy{ SigBytes: testSig.Serialize(), MessageFlags: msgFlags, @@ -622,19 +671,8 @@ func TestFindLowestFeePath(t *testing.T) { }), } - testGraphInstance, err := createTestGraphFromChannels( - testChannels, "roasbeef", - ) - if err != nil { - t.Fatalf("unable to create graph: %v", err) - } - defer testGraphInstance.cleanUp() - - sourceNode, err := testGraphInstance.graph.SourceNode() - if err != nil { - t.Fatalf("unable to fetch source node: %v", err) - } - sourceVertex := route.Vertex(sourceNode.PubKeyBytes) + ctx := newPathFindingTestContext(t, testChannels, "roasbeef") + defer ctx.cleanup() const ( startingHeight = 100 @@ -642,32 +680,28 @@ func TestFindLowestFeePath(t *testing.T) { ) paymentAmt := lnwire.NewMSatFromSatoshis(100) - target := testGraphInstance.aliasMap["target"] - path, err := findPath( - &graphParams{ - graph: testGraphInstance.graph, - }, - noRestrictions, - testPathFindingConfig, - sourceNode.PubKeyBytes, target, paymentAmt, - ) + target := ctx.keyFromAlias("target") + path, err := ctx.findPath(target, paymentAmt) if err != nil { t.Fatalf("unable to find path: %v", err) } route, err := newRoute( - paymentAmt, sourceVertex, path, startingHeight, - finalHopCLTV, nil, + ctx.source, path, startingHeight, + finalHopParams{ + amt: paymentAmt, + cltvDelta: finalHopCLTV, + records: nil, + }, ) if err != nil { t.Fatalf("unable to create path: %v", err) } // Assert that the lowest fee route is returned. - if route.Hops[1].PubKeyBytes != testGraphInstance.aliasMap["b"] { + if route.Hops[1].PubKeyBytes != ctx.keyFromAlias("b") { t.Fatalf("expected route to pass through b, "+ "but got a route through %v", - getAliasFromPubKey(route.Hops[1].PubKeyBytes, - testGraphInstance.aliasMap)) + ctx.aliasFromKey(route.Hops[1].PubKeyBytes)) } } @@ -783,10 +817,8 @@ func testBasicGraphPathFindingCase(t *testing.T, graphInstance *testGraphInstanc paymentAmt := lnwire.NewMSatFromSatoshis(test.paymentAmt) target := graphInstance.aliasMap[test.target] - path, err := findPath( - &graphParams{ - graph: graphInstance.graph, - }, + path, err := dbFindPath( + graphInstance.graph, nil, nil, &RestrictParams{ FeeLimit: test.feeLimit, ProbabilitySource: noProbabilitySource, @@ -794,6 +826,7 @@ func testBasicGraphPathFindingCase(t *testing.T, graphInstance *testGraphInstanc }, testPathFindingConfig, sourceNode.PubKeyBytes, target, paymentAmt, + startingHeight+finalHopCLTV, ) if test.expectFailureNoPath { if err == nil { @@ -806,8 +839,12 @@ func testBasicGraphPathFindingCase(t *testing.T, graphInstance *testGraphInstanc } route, err := newRoute( - paymentAmt, sourceVertex, path, startingHeight, - finalHopCLTV, nil, + sourceVertex, path, startingHeight, + finalHopParams{ + amt: paymentAmt, + cltvDelta: finalHopCLTV, + records: nil, + }, ) if err != nil { t.Fatalf("unable to create path: %v", err) @@ -914,6 +951,10 @@ func testBasicGraphPathFindingCase(t *testing.T, graphInstance *testGraphInstanc } } +// TestPathFindingWithAdditionalEdges asserts that we are able to find paths to +// nodes that do not exist in the graph by way of hop hints. We also test that +// the path can support custom TLV records for the receiver under the +// appropriate circumstances. func TestPathFindingWithAdditionalEdges(t *testing.T) { t.Parallel() @@ -965,15 +1006,19 @@ func TestPathFindingWithAdditionalEdges(t *testing.T) { graph.aliasMap["songoku"]: {songokuToDoge}, } + find := func(r *RestrictParams) ( + []*channeldb.ChannelEdgePolicy, error) { + + return dbFindPath( + graph.graph, additionalEdges, nil, + r, testPathFindingConfig, + sourceNode.PubKeyBytes, doge.PubKeyBytes, paymentAmt, + 0, + ) + } + // We should now be able to find a path from roasbeef to doge. - path, err := findPath( - &graphParams{ - graph: graph.graph, - additionalEdges: additionalEdges, - }, - noRestrictions, testPathFindingConfig, - sourceNode.PubKeyBytes, doge.PubKeyBytes, paymentAmt, - ) + path, err := find(noRestrictions) if err != nil { t.Fatalf("unable to find private path to doge: %v", err) } @@ -981,6 +1026,35 @@ func TestPathFindingWithAdditionalEdges(t *testing.T) { // The path should represent the following hops: // roasbeef -> songoku -> doge assertExpectedPath(t, graph.aliasMap, path, "songoku", "doge") + + // Now, set custom records for the final hop. This should fail since no + // dest features are set, and we won't have a node ann to fall back on. + restrictions := *noRestrictions + restrictions.DestCustomRecords = record.CustomSet{70000: []byte{}} + + _, err = find(&restrictions) + if err != errNoTlvPayload { + t.Fatalf("path shouldn't have been found: %v", err) + } + + // Set empty dest features so we don't try the fallback. We should still + // fail since the tlv feature isn't set. + restrictions.DestFeatures = lnwire.EmptyFeatureVector() + + _, err = find(&restrictions) + if err != errNoTlvPayload { + t.Fatalf("path shouldn't have been found: %v", err) + } + + // Finally, set the tlv feature in the payload and assert we found the + // same path as before. + restrictions.DestFeatures = tlvFeatures + + path, err = find(&restrictions) + if err != nil { + t.Fatalf("path should have been found: %v", err) + } + assertExpectedPath(t, graph.aliasMap, path, "songoku", "doge") } // TestNewRoute tests whether the construction of hop payloads by newRoute @@ -990,6 +1064,8 @@ func TestNewRoute(t *testing.T) { var sourceKey [33]byte sourceVertex := route.Vertex(sourceKey) + testPaymentAddr := [32]byte{0x01, 0x02, 0x03} + const ( startingHeight = 100 finalHopCLTV = 1 @@ -1024,6 +1100,12 @@ func TestNewRoute(t *testing.T) { // indicated by hops. paymentAmount lnwire.MilliSatoshi + // destFeatures is a feature vector, that if non-nil, will + // overwrite the final hop's feature vector in the graph. + destFeatures *lnwire.FeatureVector + + paymentAddr *[32]byte + // expectedFees is a list of fees that every hop is expected // to charge for forwarding. expectedFees []lnwire.MilliSatoshi @@ -1052,6 +1134,10 @@ func TestNewRoute(t *testing.T) { // expectedErrorCode indicates the expected error code when // expectError is true. expectedErrorCode errorCode + + expectedTLVPayload bool + + expectedMPP *record.MPP }{ { // For a single hop payment, no fees are expected to be paid. @@ -1078,6 +1164,42 @@ func TestNewRoute(t *testing.T) { expectedTimeLocks: []uint32{1, 1}, expectedTotalAmount: 100130, expectedTotalTimeLock: 6, + }, { + // For a two hop payment, only the fee for the first hop + // needs to be paid. The destination hop does not require + // a fee to receive the payment. + name: "two hop tlv onion feature", + destFeatures: tlvFeatures, + paymentAmount: 100000, + hops: []*channeldb.ChannelEdgePolicy{ + createHop(0, 1000, 1000000, 10), + createHop(30, 1000, 1000000, 5), + }, + expectedFees: []lnwire.MilliSatoshi{130, 0}, + expectedTimeLocks: []uint32{1, 1}, + expectedTotalAmount: 100130, + expectedTotalTimeLock: 6, + expectedTLVPayload: true, + }, { + // For a two hop payment, only the fee for the first hop + // needs to be paid. The destination hop does not require + // a fee to receive the payment. + name: "two hop single shot mpp", + destFeatures: tlvPayAddrFeatures, + paymentAddr: &testPaymentAddr, + paymentAmount: 100000, + hops: []*channeldb.ChannelEdgePolicy{ + createHop(0, 1000, 1000000, 10), + createHop(30, 1000, 1000000, 5), + }, + expectedFees: []lnwire.MilliSatoshi{130, 0}, + expectedTimeLocks: []uint32{1, 1}, + expectedTotalAmount: 100130, + expectedTotalTimeLock: 6, + expectedTLVPayload: true, + expectedMPP: record.NewMPP( + 100000, testPaymentAddr, + ), }, { // A three hop payment where the first and second hop // will both charge 1 msat. The fee for the first hop @@ -1134,6 +1256,15 @@ func TestNewRoute(t *testing.T) { }} for _, testCase := range testCases { + testCase := testCase + + // Overwrite the final hop's features if the test requires a + // custom feature vector. + if testCase.destFeatures != nil { + finalHop := testCase.hops[len(testCase.hops)-1] + finalHop.Node.Features = testCase.destFeatures + } + assertRoute := func(t *testing.T, route *route.Route) { if route.TotalAmount != testCase.expectedTotalAmount { t.Errorf("Expected total amount is be %v"+ @@ -1177,13 +1308,36 @@ func TestNewRoute(t *testing.T) { route.Hops[i].OutgoingTimeLock) } } + + finalHop := route.Hops[len(route.Hops)-1] + if !finalHop.LegacyPayload != + testCase.expectedTLVPayload { + + t.Errorf("Expected final hop tlv payload: %t, "+ + "but got: %t instead", + testCase.expectedTLVPayload, + !finalHop.LegacyPayload) + } + + if !reflect.DeepEqual( + finalHop.MPP, testCase.expectedMPP, + ) { + t.Errorf("Expected final hop mpp field: %v, "+ + " but got: %v instead", + testCase.expectedMPP, finalHop.MPP) + } } t.Run(testCase.name, func(t *testing.T) { route, err := newRoute( - testCase.paymentAmount, sourceVertex, - testCase.hops, startingHeight, finalHopCLTV, - nil, + sourceVertex, testCase.hops, startingHeight, + finalHopParams{ + amt: testCase.paymentAmount, + totalAmt: testCase.paymentAmount, + cltvDelta: finalHopCLTV, + records: nil, + paymentAddr: testCase.paymentAddr, + }, ) if testCase.expectError { @@ -1207,53 +1361,53 @@ func TestNewRoute(t *testing.T) { } func TestNewRoutePathTooLong(t *testing.T) { - t.Skip() + t.Parallel() - // Ensure that potential paths which are over the maximum hop-limit are - // rejected. - graph, err := parseTestGraph(excessiveHopsGraphFilePath) - if err != nil { - t.Fatalf("unable to create graph: %v", err) - } - defer graph.cleanUp() + var testChannels []*testChannel - sourceNode, err := graph.graph.SourceNode() - if err != nil { - t.Fatalf("unable to fetch source node: %v", err) + // Setup a linear network of 21 hops. + fromNode := "start" + for i := 0; i < 21; i++ { + toNode := fmt.Sprintf("node-%v", i+1) + c := symmetricTestChannel(fromNode, toNode, 100000, &testChannelPolicy{ + Expiry: 144, + FeeRate: 400, + MinHTLC: 1, + MaxHTLC: 100000001, + }) + testChannels = append(testChannels, c) + + fromNode = toNode } - paymentAmt := lnwire.NewMSatFromSatoshis(100) + ctx := newPathFindingTestContext(t, testChannels, "start") + defer ctx.cleanup() - // We start by confirming that routing a payment 20 hops away is - // possible. Alice should be able to find a valid route to ursula. - target := graph.aliasMap["ursula"] - _, err = findPath( - &graphParams{ - graph: graph.graph, - }, - noRestrictions, testPathFindingConfig, - sourceNode.PubKeyBytes, target, paymentAmt, - ) + // Assert that we can find 20 hop routes. + node20 := ctx.keyFromAlias("node-20") + payAmt := lnwire.MilliSatoshi(100001) + _, err := ctx.findPath(node20, payAmt) if err != nil { - t.Fatalf("path should have been found") + t.Fatalf("unexpected pathfinding failure: %v", err) } - // Vincent is 21 hops away from Alice, and thus no valid route should be - // presented to Alice. - target = graph.aliasMap["vincent"] - path, err := findPath( - &graphParams{ - graph: graph.graph, - }, - noRestrictions, testPathFindingConfig, - sourceNode.PubKeyBytes, target, paymentAmt, - ) - if err == nil { - t.Fatalf("should not have been able to find path, supposed to be "+ - "greater than 20 hops, found route with %v hops", - len(path)) + // Assert that finding a 21 hop route fails. + node21 := ctx.keyFromAlias("node-21") + _, err = ctx.findPath(node21, payAmt) + if err != errNoPathFound { + t.Fatalf("not route error expected, but got %v", err) } + // Assert that we can't find a 20 hop route if custom records make it + // exceed the maximum payload size. + ctx.restrictParams.DestFeatures = tlvFeatures + ctx.restrictParams.DestCustomRecords = map[uint64][]byte{ + 100000: bytes.Repeat([]byte{1}, 100), + } + _, err = ctx.findPath(node20, payAmt) + if err != errNoPathFound { + t.Fatalf("not route error expected, but got %v", err) + } } func TestPathNotAvailable(t *testing.T) { @@ -1281,16 +1435,307 @@ func TestPathNotAvailable(t *testing.T) { var unknownNode route.Vertex copy(unknownNode[:], unknownNodeBytes) - _, err = findPath( - &graphParams{ - graph: graph.graph, - }, + _, err = dbFindPath( + graph.graph, nil, nil, noRestrictions, testPathFindingConfig, - sourceNode.PubKeyBytes, unknownNode, 100, + sourceNode.PubKeyBytes, unknownNode, 100, 0, ) - if !IsError(err, ErrNoPathFound) { + if err != errNoPathFound { + t.Fatalf("path shouldn't have been found: %v", err) + } +} + +// TestDestTLVGraphFallback asserts that we properly detect when we can send TLV +// records to a receiver, and also that we fallback to the receiver's node +// announcement if we don't have an invoice features. +func TestDestTLVGraphFallback(t *testing.T) { + t.Parallel() + + testChannels := []*testChannel{ + asymmetricTestChannel("roasbeef", "luoji", 100000, + &testChannelPolicy{ + Expiry: 144, + FeeRate: 400, + MinHTLC: 1, + MaxHTLC: 100000000, + }, &testChannelPolicy{ + Expiry: 144, + FeeRate: 400, + MinHTLC: 1, + MaxHTLC: 100000000, + }, 0), + asymmetricTestChannel("roasbeef", "satoshi", 100000, + &testChannelPolicy{ + Expiry: 144, + FeeRate: 400, + MinHTLC: 1, + MaxHTLC: 100000000, + }, &testChannelPolicy{ + Expiry: 144, + FeeRate: 400, + MinHTLC: 1, + MaxHTLC: 100000000, + Features: tlvFeatures, + }, 0), + } + + ctx := newPathFindingTestContext(t, testChannels, "roasbeef") + defer ctx.cleanup() + + sourceNode, err := ctx.graph.SourceNode() + if err != nil { + t.Fatalf("unable to fetch source node: %v", err) + + } + + find := func(r *RestrictParams, + target route.Vertex) ([]*channeldb.ChannelEdgePolicy, error) { + + return dbFindPath( + ctx.graph, nil, nil, + r, testPathFindingConfig, + sourceNode.PubKeyBytes, target, 100, 0, + ) + } + + // Luoji's node ann has an empty feature vector. + luoji := ctx.testGraphInstance.aliasMap["luoji"] + + // Satoshi's node ann supports TLV. + satoshi := ctx.testGraphInstance.aliasMap["satoshi"] + + restrictions := *noRestrictions + + // Add custom records w/o any dest features. + restrictions.DestCustomRecords = record.CustomSet{70000: []byte{}} + + // Path to luoji should fail because his node ann features are empty. + _, err = find(&restrictions, luoji) + if err != errNoTlvPayload { + t.Fatalf("path shouldn't have been found: %v", err) + } + + // However, path to satoshi should succeed via the fallback because his + // node ann features have the TLV bit. + path, err := find(&restrictions, satoshi) + if err != nil { + t.Fatalf("path should have been found: %v", err) + } + assertExpectedPath(t, ctx.testGraphInstance.aliasMap, path, "satoshi") + + // Add empty destination features. This should cause both paths to fail, + // since this override anything in the graph. + restrictions.DestFeatures = lnwire.EmptyFeatureVector() + + _, err = find(&restrictions, luoji) + if err != errNoTlvPayload { + t.Fatalf("path shouldn't have been found: %v", err) + } + _, err = find(&restrictions, satoshi) + if err != errNoTlvPayload { + t.Fatalf("path shouldn't have been found: %v", err) + } + + // Finally, set the TLV dest feature. We should succeed in finding a + // path to luoji. + restrictions.DestFeatures = tlvFeatures + + path, err = find(&restrictions, luoji) + if err != nil { + t.Fatalf("path should have been found: %v", err) + } + assertExpectedPath(t, ctx.testGraphInstance.aliasMap, path, "luoji") +} + +// TestMissingFeatureDep asserts that we fail path finding when the +// destination's features are broken, in that the feature vector doesn't signal +// all transitive dependencies. +func TestMissingFeatureDep(t *testing.T) { + t.Parallel() + + testChannels := []*testChannel{ + asymmetricTestChannel("roasbeef", "conner", 100000, + &testChannelPolicy{ + Expiry: 144, + FeeRate: 400, + MinHTLC: 1, + MaxHTLC: 100000000, + }, + &testChannelPolicy{ + Expiry: 144, + FeeRate: 400, + MinHTLC: 1, + MaxHTLC: 100000000, + Features: payAddrFeatures, + }, 0, + ), + asymmetricTestChannel("conner", "joost", 100000, + &testChannelPolicy{ + Expiry: 144, + FeeRate: 400, + MinHTLC: 1, + MaxHTLC: 100000000, + Features: payAddrFeatures, + }, + &testChannelPolicy{ + Expiry: 144, + FeeRate: 400, + MinHTLC: 1, + MaxHTLC: 100000000, + }, 0, + ), + } + + ctx := newPathFindingTestContext(t, testChannels, "roasbeef") + defer ctx.cleanup() + + // Conner's node in the graph has a broken feature vector, since it + // signals payment addresses without signaling tlv onions. Pathfinding + // should fail since we validate transitive feature dependencies for the + // final node. + conner := ctx.keyFromAlias("conner") + joost := ctx.keyFromAlias("joost") + + _, err := ctx.findPath(conner, 100) + if err != feature.NewErrMissingFeatureDep( + lnwire.TLVOnionPayloadOptional, + ) { + t.Fatalf("path shouldn't have been found: %v", err) + } + + // Now, set the TLV and payment addresses features to override the + // broken features found in the graph. We should succeed in finding a + // path to conner. + ctx.restrictParams.DestFeatures = tlvPayAddrFeatures + + path, err := ctx.findPath(conner, 100) + if err != nil { + t.Fatalf("path should have been found: %v", err) + } + assertExpectedPath(t, ctx.testGraphInstance.aliasMap, path, "conner") + + // Finally, try to find a route to joost through conner. The + // destination features are set properly from the previous assertions, + // but conner's feature vector in the graph is still broken. We expect + // errNoPathFound and not the missing feature dep err above since + // intermediate hops are simply skipped if they have invalid feature + // vectors, leaving no possible route to joost. + _, err = ctx.findPath(joost, 100) + if err != errNoPathFound { + t.Fatalf("path shouldn't have been found: %v", err) + } +} + +// TestUnknownRequiredFeatures asserts that we fail path finding when the +// destination requires an unknown required feature, and that we skip +// intermediaries that signal unknown required features. +func TestUnknownRequiredFeatures(t *testing.T) { + t.Parallel() + + testChannels := []*testChannel{ + asymmetricTestChannel("roasbeef", "conner", 100000, + &testChannelPolicy{ + Expiry: 144, + FeeRate: 400, + MinHTLC: 1, + MaxHTLC: 100000000, + }, + &testChannelPolicy{ + Expiry: 144, + FeeRate: 400, + MinHTLC: 1, + MaxHTLC: 100000000, + Features: unknownRequiredFeatures, + }, 0, + ), + asymmetricTestChannel("conner", "joost", 100000, + &testChannelPolicy{ + Expiry: 144, + FeeRate: 400, + MinHTLC: 1, + MaxHTLC: 100000000, + Features: unknownRequiredFeatures, + }, + &testChannelPolicy{ + Expiry: 144, + FeeRate: 400, + MinHTLC: 1, + MaxHTLC: 100000000, + }, 0, + ), + } + + ctx := newPathFindingTestContext(t, testChannels, "roasbeef") + defer ctx.cleanup() + + conner := ctx.keyFromAlias("conner") + joost := ctx.keyFromAlias("joost") + + // Conner's node in the graph has an unknown required feature (100). + // Pathfinding should fail since we check the destination's features for + // unknown required features before beginning pathfinding. + expErr := feature.NewErrUnknownRequired([]lnwire.FeatureBit{100}) + _, err := ctx.findPath(conner, 100) + if !reflect.DeepEqual(err, expErr) { + t.Fatalf("path shouldn't have been found: %v", err) + } + + // Now, try to find a route to joost through conner. The destination + // features are valid, but conner's feature vector in the graph still + // requires feature 100. We expect errNoPathFound and not the error + // above since intermediate hops are simply skipped if they have invalid + // feature vectors, leaving no possible route to joost. This asserts + // that we don't try to route _through_ nodes with unknown required + // features. + _, err = ctx.findPath(joost, 100) + if err != errNoPathFound { + t.Fatalf("path shouldn't have been found: %v", err) + } +} + +// TestDestPaymentAddr asserts that we properly detect when we can send a +// payment address to a receiver, and also that we fallback to the receiver's +// node announcement if we don't have an invoice features. +func TestDestPaymentAddr(t *testing.T) { + t.Parallel() + + testChannels := []*testChannel{ + symmetricTestChannel("roasbeef", "luoji", 100000, + &testChannelPolicy{ + Expiry: 144, + FeeRate: 400, + MinHTLC: 1, + MaxHTLC: 100000000, + }, + ), + } + + ctx := newPathFindingTestContext(t, testChannels, "roasbeef") + defer ctx.cleanup() + + luoji := ctx.keyFromAlias("luoji") + + // Add payment address w/o any invoice features. + ctx.restrictParams.PaymentAddr = &[32]byte{1} + + // Add empty destination features. This should cause us to fail, since + // this overrides anything in the graph. + ctx.restrictParams.DestFeatures = lnwire.EmptyFeatureVector() + + _, err := ctx.findPath(luoji, 100) + if err != errNoPaymentAddr { t.Fatalf("path shouldn't have been found: %v", err) } + + // Now, set the TLV and payment address features for the destination. We + // should succeed in finding a path to luoji. + ctx.restrictParams.DestFeatures = tlvPayAddrFeatures + + path, err := ctx.findPath(luoji, 100) + if err != nil { + t.Fatalf("path should have been found: %v", err) + } + assertExpectedPath(t, ctx.testGraphInstance.aliasMap, path, "luoji") } func TestPathInsufficientCapacity(t *testing.T) { @@ -1318,14 +1763,12 @@ func TestPathInsufficientCapacity(t *testing.T) { target := graph.aliasMap["sophon"] payAmt := lnwire.NewMSatFromSatoshis(btcutil.SatoshiPerBitcoin) - _, err = findPath( - &graphParams{ - graph: graph.graph, - }, + _, err = dbFindPath( + graph.graph, nil, nil, noRestrictions, testPathFindingConfig, - sourceNode.PubKeyBytes, target, payAmt, + sourceNode.PubKeyBytes, target, payAmt, 0, ) - if !IsError(err, ErrNoPathFound) { + if err != errInsufficientBalance { t.Fatalf("graph shouldn't be able to support payment: %v", err) } } @@ -1351,14 +1794,12 @@ func TestRouteFailMinHTLC(t *testing.T) { // attempt should fail. target := graph.aliasMap["songoku"] payAmt := lnwire.MilliSatoshi(10) - _, err = findPath( - &graphParams{ - graph: graph.graph, - }, + _, err = dbFindPath( + graph.graph, nil, nil, noRestrictions, testPathFindingConfig, - sourceNode.PubKeyBytes, target, payAmt, + sourceNode.PubKeyBytes, target, payAmt, 0, ) - if !IsError(err, ErrNoPathFound) { + if err != errNoPathFound { t.Fatalf("graph shouldn't be able to support payment: %v", err) } } @@ -1394,54 +1835,35 @@ func TestRouteFailMaxHTLC(t *testing.T) { }), } - graph, err := createTestGraphFromChannels(testChannels, "roasbeef") - if err != nil { - t.Fatalf("unable to create graph: %v", err) - } - defer graph.cleanUp() - - sourceNode, err := graph.graph.SourceNode() - if err != nil { - t.Fatalf("unable to fetch source node: %v", err) - } + ctx := newPathFindingTestContext(t, testChannels, "roasbeef") + defer ctx.cleanup() // First, attempt to send a payment greater than the max HTLC we are // about to set, which should succeed. - target := graph.aliasMap["target"] + target := ctx.keyFromAlias("target") payAmt := lnwire.MilliSatoshi(100001) - _, err = findPath( - &graphParams{ - graph: graph.graph, - }, - noRestrictions, testPathFindingConfig, - sourceNode.PubKeyBytes, target, payAmt, - ) + _, err := ctx.findPath(target, payAmt) if err != nil { t.Fatalf("graph should've been able to support payment: %v", err) } // Next, update the middle edge policy to only allow payments up to 100k // msat. - _, midEdge, _, err := graph.graph.FetchChannelEdgesByID(firstToSecondID) + graph := ctx.testGraphInstance.graph + _, midEdge, _, err := graph.FetchChannelEdgesByID(firstToSecondID) if err != nil { t.Fatalf("unable to fetch channel edges by ID: %v", err) } midEdge.MessageFlags = 1 midEdge.MaxHTLC = payAmt - 1 - if err := graph.graph.UpdateEdgePolicy(midEdge); err != nil { + if err := graph.UpdateEdgePolicy(midEdge); err != nil { t.Fatalf("unable to update edge: %v", err) } // We'll now attempt to route through that edge with a payment above // 100k msat, which should fail. - _, err = findPath( - &graphParams{ - graph: graph.graph, - }, - noRestrictions, testPathFindingConfig, - sourceNode.PubKeyBytes, target, payAmt, - ) - if !IsError(err, ErrNoPathFound) { + _, err = ctx.findPath(target, payAmt) + if err != errNoPathFound { t.Fatalf("graph shouldn't be able to support payment: %v", err) } } @@ -1469,12 +1891,10 @@ func TestRouteFailDisabledEdge(t *testing.T) { // succeed without issue, and return a single path via phamnuwen target := graph.aliasMap["sophon"] payAmt := lnwire.NewMSatFromSatoshis(105000) - _, err = findPath( - &graphParams{ - graph: graph.graph, - }, + _, err = dbFindPath( + graph.graph, nil, nil, noRestrictions, testPathFindingConfig, - sourceNode.PubKeyBytes, target, payAmt, + sourceNode.PubKeyBytes, target, payAmt, 0, ) if err != nil { t.Fatalf("unable to find path: %v", err) @@ -1497,12 +1917,10 @@ func TestRouteFailDisabledEdge(t *testing.T) { t.Fatalf("unable to update edge: %v", err) } - _, err = findPath( - &graphParams{ - graph: graph.graph, - }, + _, err = dbFindPath( + graph.graph, nil, nil, noRestrictions, testPathFindingConfig, - sourceNode.PubKeyBytes, target, payAmt, + sourceNode.PubKeyBytes, target, payAmt, 0, ) if err != nil { t.Fatalf("unable to find path: %v", err) @@ -1522,14 +1940,12 @@ func TestRouteFailDisabledEdge(t *testing.T) { // If we attempt to route through that edge, we should get a failure as // it is no longer eligible. - _, err = findPath( - &graphParams{ - graph: graph.graph, - }, + _, err = dbFindPath( + graph.graph, nil, nil, noRestrictions, testPathFindingConfig, - sourceNode.PubKeyBytes, target, payAmt, + sourceNode.PubKeyBytes, target, payAmt, 0, ) - if !IsError(err, ErrNoPathFound) { + if err != errNoPathFound { t.Fatalf("graph shouldn't be able to support payment: %v", err) } } @@ -1556,12 +1972,10 @@ func TestPathSourceEdgesBandwidth(t *testing.T) { // cheapest path. target := graph.aliasMap["sophon"] payAmt := lnwire.NewMSatFromSatoshis(50000) - path, err := findPath( - &graphParams{ - graph: graph.graph, - }, + path, err := dbFindPath( + graph.graph, nil, nil, noRestrictions, testPathFindingConfig, - sourceNode.PubKeyBytes, target, payAmt, + sourceNode.PubKeyBytes, target, payAmt, 0, ) if err != nil { t.Fatalf("unable to find path: %v", err) @@ -1579,15 +1993,12 @@ func TestPathSourceEdgesBandwidth(t *testing.T) { // Since both these edges has a bandwidth of zero, no path should be // found. - _, err = findPath( - &graphParams{ - graph: graph.graph, - bandwidthHints: bandwidths, - }, + _, err = dbFindPath( + graph.graph, nil, bandwidths, noRestrictions, testPathFindingConfig, - sourceNode.PubKeyBytes, target, payAmt, + sourceNode.PubKeyBytes, target, payAmt, 0, ) - if !IsError(err, ErrNoPathFound) { + if err != errNoPathFound { t.Fatalf("graph shouldn't be able to support payment: %v", err) } @@ -1597,13 +2008,10 @@ func TestPathSourceEdgesBandwidth(t *testing.T) { // Now, if we attempt to route again, we should find the path via // phamnuven, as the other source edge won't be considered. - path, err = findPath( - &graphParams{ - graph: graph.graph, - bandwidthHints: bandwidths, - }, + path, err = dbFindPath( + graph.graph, nil, bandwidths, noRestrictions, testPathFindingConfig, - sourceNode.PubKeyBytes, target, payAmt, + sourceNode.PubKeyBytes, target, payAmt, 0, ) if err != nil { t.Fatalf("unable to find path: %v", err) @@ -1628,13 +2036,10 @@ func TestPathSourceEdgesBandwidth(t *testing.T) { // Since we ignore disable flags for local channels, a path should // still be found. - path, err = findPath( - &graphParams{ - graph: graph.graph, - bandwidthHints: bandwidths, - }, + path, err = dbFindPath( + graph.graph, nil, bandwidths, noRestrictions, testPathFindingConfig, - sourceNode.PubKeyBytes, target, payAmt, + sourceNode.PubKeyBytes, target, payAmt, 0, ) if err != nil { t.Fatalf("unable to find path: %v", err) @@ -1669,11 +2074,7 @@ func TestPathFindSpecExample(t *testing.T) { // Carol, so we set "B" as the source node so path finding starts from // Bob. bob := ctx.aliases["B"] - bobKey, err := btcec.ParsePubKey(bob[:], btcec.S256()) - if err != nil { - t.Fatal(err) - } - bobNode, err := ctx.graph.FetchLightningNode(bobKey) + bobNode, err := ctx.graph.FetchLightningNode(nil, bob) if err != nil { t.Fatalf("unable to find bob: %v", err) } @@ -1685,7 +2086,8 @@ func TestPathFindSpecExample(t *testing.T) { carol := ctx.aliases["C"] const amt lnwire.MilliSatoshi = 4999999 route, err := ctx.router.FindRoute( - bobNode.PubKeyBytes, carol, amt, noRestrictions, nil, + bobNode.PubKeyBytes, carol, amt, noRestrictions, nil, nil, + zpay32.DefaultFinalCLTVDelta, ) if err != nil { t.Fatalf("unable to find route: %v", err) @@ -1722,11 +2124,7 @@ func TestPathFindSpecExample(t *testing.T) { // Next, we'll set A as the source node so we can assert that we create // the proper route for any queries starting with Alice. alice := ctx.aliases["A"] - aliceKey, err := btcec.ParsePubKey(alice[:], btcec.S256()) - if err != nil { - t.Fatal(err) - } - aliceNode, err := ctx.graph.FetchLightningNode(aliceKey) + aliceNode, err := ctx.graph.FetchLightningNode(nil, alice) if err != nil { t.Fatalf("unable to find alice: %v", err) } @@ -1744,7 +2142,8 @@ func TestPathFindSpecExample(t *testing.T) { // We'll now request a route from A -> B -> C. route, err = ctx.router.FindRoute( - source.PubKeyBytes, carol, amt, noRestrictions, nil, + source.PubKeyBytes, carol, amt, noRestrictions, nil, nil, + zpay32.DefaultFinalCLTVDelta, ) if err != nil { t.Fatalf("unable to find routes: %v", err) @@ -1859,45 +2258,26 @@ func TestRestrictOutgoingChannel(t *testing.T) { // target. The path through channel 2 is the highest cost path. testChannels := []*testChannel{ symmetricTestChannel("roasbeef", "a", 100000, &testChannelPolicy{ - Expiry: 144, - FeeRate: 400, - MinHTLC: 1, + Expiry: 144, }, 1), symmetricTestChannel("a", "target", 100000, &testChannelPolicy{ Expiry: 144, FeeRate: 400, - MinHTLC: 1, - }), + }, 4), symmetricTestChannel("roasbeef", "b", 100000, &testChannelPolicy{ - Expiry: 144, - FeeRate: 800, - MinHTLC: 1, + Expiry: 144, }, 2), symmetricTestChannel("roasbeef", "b", 100000, &testChannelPolicy{ - Expiry: 144, - FeeRate: 600, - MinHTLC: 1, + Expiry: 144, }, 3), symmetricTestChannel("b", "target", 100000, &testChannelPolicy{ Expiry: 144, - FeeRate: 400, - MinHTLC: 1, - }), - } - - testGraphInstance, err := createTestGraphFromChannels( - testChannels, "roasbeef", - ) - if err != nil { - t.Fatalf("unable to create graph: %v", err) + FeeRate: 800, + }, 5), } - defer testGraphInstance.cleanUp() - sourceNode, err := testGraphInstance.graph.SourceNode() - if err != nil { - t.Fatalf("unable to fetch source node: %v", err) - } - sourceVertex := route.Vertex(sourceNode.PubKeyBytes) + ctx := newPathFindingTestContext(t, testChannels, "roasbeef") + defer ctx.cleanup() const ( startingHeight = 100 @@ -1905,30 +2285,23 @@ func TestRestrictOutgoingChannel(t *testing.T) { ) paymentAmt := lnwire.NewMSatFromSatoshis(100) - target := testGraphInstance.aliasMap["target"] + target := ctx.keyFromAlias("target") outgoingChannelID := uint64(2) // Find the best path given the restriction to only use channel 2 as the // outgoing channel. - path, err := findPath( - &graphParams{ - graph: testGraphInstance.graph, - }, - &RestrictParams{ - FeeLimit: noFeeLimit, - OutgoingChannelID: &outgoingChannelID, - ProbabilitySource: noProbabilitySource, - CltvLimit: math.MaxUint32, - }, - testPathFindingConfig, - sourceVertex, target, paymentAmt, - ) + ctx.restrictParams.OutgoingChannelID = &outgoingChannelID + path, err := ctx.findPath(target, paymentAmt) if err != nil { t.Fatalf("unable to find path: %v", err) } route, err := newRoute( - paymentAmt, sourceVertex, path, startingHeight, - finalHopCLTV, nil, + ctx.source, path, startingHeight, + finalHopParams{ + amt: paymentAmt, + cltvDelta: finalHopCLTV, + records: nil, + }, ) if err != nil { t.Fatalf("unable to create path: %v", err) @@ -1942,6 +2315,51 @@ func TestRestrictOutgoingChannel(t *testing.T) { } } +// TestRestrictLastHop asserts that a last hop restriction is obeyed by the path +// finding algorithm. +func TestRestrictLastHop(t *testing.T) { + t.Parallel() + + // Set up a test graph with three possible paths from roasbeef to + // target. The path via channel 1 and 2 is the lowest cost path. + testChannels := []*testChannel{ + symmetricTestChannel("source", "a", 100000, &testChannelPolicy{ + Expiry: 144, + }, 1), + symmetricTestChannel("a", "target", 100000, &testChannelPolicy{ + Expiry: 144, + FeeRate: 400, + }, 2), + symmetricTestChannel("source", "b", 100000, &testChannelPolicy{ + Expiry: 144, + }, 3), + symmetricTestChannel("b", "target", 100000, &testChannelPolicy{ + Expiry: 144, + FeeRate: 800, + }, 4), + } + + ctx := newPathFindingTestContext(t, testChannels, "source") + defer ctx.cleanup() + + paymentAmt := lnwire.NewMSatFromSatoshis(100) + target := ctx.keyFromAlias("target") + lastHop := ctx.keyFromAlias("b") + + // Find the best path given the restriction to use b as the last hop. + // This should force pathfinding to not take the lowest cost option. + ctx.restrictParams.LastHop = &lastHop + path, err := ctx.findPath(target, paymentAmt) + if err != nil { + t.Fatalf("unable to find path: %v", err) + } + if path[0].ChannelID != 3 { + t.Fatalf("expected route to pass through channel 3, "+ + "but channel %v was selected instead", + path[0].ChannelID) + } +} + // TestCltvLimit asserts that a cltv limit is obeyed by the path finding // algorithm. func TestCltvLimit(t *testing.T) { @@ -1984,38 +2402,17 @@ func testCltvLimit(t *testing.T, limit uint32, expectedChannel uint64) { }), } - testGraphInstance, err := createTestGraphFromChannels( - testChannels, "roasbeef", - ) - if err != nil { - t.Fatalf("unable to create graph: %v", err) - } - defer testGraphInstance.cleanUp() - - sourceNode, err := testGraphInstance.graph.SourceNode() - if err != nil { - t.Fatalf("unable to fetch source node: %v", err) - } - sourceVertex := route.Vertex(sourceNode.PubKeyBytes) + ctx := newPathFindingTestContext(t, testChannels, "roasbeef") + defer ctx.cleanup() paymentAmt := lnwire.NewMSatFromSatoshis(100) - target := testGraphInstance.aliasMap["target"] + target := ctx.keyFromAlias("target") - path, err := findPath( - &graphParams{ - graph: testGraphInstance.graph, - }, - &RestrictParams{ - FeeLimit: noFeeLimit, - CltvLimit: limit, - ProbabilitySource: noProbabilitySource, - }, - testPathFindingConfig, - sourceVertex, target, paymentAmt, - ) + ctx.restrictParams.CltvLimit = limit + path, err := ctx.findPath(target, paymentAmt) if expectedChannel == 0 { // Finish test if we expect no route. - if IsError(err, ErrNoPathFound) { + if err == errNoPathFound { return } t.Fatal("expected no path to be found") @@ -2029,8 +2426,12 @@ func testCltvLimit(t *testing.T, limit uint32, expectedChannel uint64) { finalHopCLTV = 1 ) route, err := newRoute( - paymentAmt, sourceVertex, path, startingHeight, finalHopCLTV, - nil, + ctx.source, path, startingHeight, + finalHopParams{ + amt: paymentAmt, + cltvDelta: finalHopCLTV, + records: nil, + }, ) if err != nil { t.Fatalf("unable to create path: %v", err) @@ -2146,27 +2547,16 @@ func testProbabilityRouting(t *testing.T, p10, p11, p20, minProbability float64, }, 20), } - testGraphInstance, err := createTestGraphFromChannels( - testChannels, "roasbeef", - ) - if err != nil { - t.Fatalf("unable to create graph: %v", err) - } - defer testGraphInstance.cleanUp() - - alias := testGraphInstance.aliasMap + ctx := newPathFindingTestContext(t, testChannels, "roasbeef") + defer ctx.cleanup() - sourceNode, err := testGraphInstance.graph.SourceNode() - if err != nil { - t.Fatalf("unable to fetch source node: %v", err) - } - sourceVertex := route.Vertex(sourceNode.PubKeyBytes) + alias := ctx.testGraphInstance.aliasMap paymentAmt := lnwire.NewMSatFromSatoshis(100) - target := testGraphInstance.aliasMap["target"] + target := ctx.testGraphInstance.aliasMap["target"] // Configure a probability source with the test parameters. - probabilitySource := func(fromNode, toNode route.Vertex, + ctx.restrictParams.ProbabilitySource = func(fromNode, toNode route.Vertex, amt lnwire.MilliSatoshi) float64 { if amt == 0 { @@ -2185,23 +2575,14 @@ func testProbabilityRouting(t *testing.T, p10, p11, p20, minProbability float64, } } - path, err := findPath( - &graphParams{ - graph: testGraphInstance.graph, - }, - &RestrictParams{ - FeeLimit: noFeeLimit, - ProbabilitySource: probabilitySource, - CltvLimit: math.MaxUint32, - }, - &PathFindingConfig{ - PaymentAttemptPenalty: lnwire.NewMSatFromSatoshis(10), - MinProbability: minProbability, - }, - sourceVertex, target, paymentAmt, - ) + ctx.pathFindingConfig = PathFindingConfig{ + PaymentAttemptPenalty: lnwire.NewMSatFromSatoshis(10), + MinProbability: minProbability, + } + + path, err := ctx.findPath(target, paymentAmt) if expectedChan == 0 { - if err == nil || !IsError(err, ErrNoPathFound) { + if err != errNoPathFound { t.Fatalf("expected no path found, but got %v", err) } return @@ -2217,3 +2598,298 @@ func testProbabilityRouting(t *testing.T, p10, p11, p20, minProbability float64, path[1].ChannelID) } } + +// TestEqualCostRouteSelection asserts that route probability will be used as a +// tie breaker in case the path finding probabilities are equal. +func TestEqualCostRouteSelection(t *testing.T) { + t.Parallel() + + // Set up a test graph with two possible paths to the target: via a and + // via b. The routing fees and probabilities are chosen such that the + // algorithm will first explore target->a->source (backwards search). + // This route has fee 6 and a penality of 4 for the 25% success + // probability. The algorithm will then proceed with evaluating + // target->b->source, which has a fee of 8 and a penalty of 2 for the + // 50% success probability. Both routes have the same path finding cost + // of 10. It is expected that in that case, the highest probability + // route (through b) is chosen. + testChannels := []*testChannel{ + symmetricTestChannel("source", "a", 100000, &testChannelPolicy{}), + symmetricTestChannel("source", "b", 100000, &testChannelPolicy{}), + symmetricTestChannel("a", "target", 100000, &testChannelPolicy{ + Expiry: 144, + FeeBaseMsat: lnwire.NewMSatFromSatoshis(6), + MinHTLC: 1, + }, 1), + symmetricTestChannel("b", "target", 100000, &testChannelPolicy{ + Expiry: 100, + FeeBaseMsat: lnwire.NewMSatFromSatoshis(8), + MinHTLC: 1, + }, 2), + } + + ctx := newPathFindingTestContext(t, testChannels, "source") + defer ctx.cleanup() + + alias := ctx.testGraphInstance.aliasMap + + paymentAmt := lnwire.NewMSatFromSatoshis(100) + target := ctx.testGraphInstance.aliasMap["target"] + + ctx.restrictParams.ProbabilitySource = func(fromNode, toNode route.Vertex, + amt lnwire.MilliSatoshi) float64 { + + switch { + case fromNode == alias["source"] && toNode == alias["a"]: + return 0.25 + case fromNode == alias["source"] && toNode == alias["b"]: + return 0.5 + default: + return 1 + } + } + + ctx.pathFindingConfig = PathFindingConfig{ + PaymentAttemptPenalty: lnwire.NewMSatFromSatoshis(1), + } + + path, err := ctx.findPath(target, paymentAmt) + if err != nil { + t.Fatal(err) + } + + if path[1].ChannelID != 2 { + t.Fatalf("expected route to pass through channel %v, "+ + "but channel %v was selected instead", 2, + path[1].ChannelID) + } +} + +// TestNoCycle tries to guide the path finding algorithm into reconstructing an +// endless route. It asserts that the algorithm is able to handle this properly. +func TestNoCycle(t *testing.T) { + t.Parallel() + + // Set up a test graph with two paths: source->a->target and + // source->b->c->target. The fees are setup such that, searching + // backwards, the algorithm will evaluate the following end of the route + // first: ->target->c->target. This does not make sense, because if + // target is reached, there is no need to continue to c. A proper + // implementation will then go on with alternative routes. It will then + // consider ->a->target because its cost is lower than the alternative + // ->b->c->target and finally find source->a->target as the best route. + testChannels := []*testChannel{ + symmetricTestChannel("source", "a", 100000, &testChannelPolicy{ + Expiry: 144, + }, 1), + symmetricTestChannel("source", "b", 100000, &testChannelPolicy{ + Expiry: 144, + }, 2), + symmetricTestChannel("b", "c", 100000, &testChannelPolicy{ + Expiry: 144, + FeeBaseMsat: 2000, + }, 3), + symmetricTestChannel("c", "target", 100000, &testChannelPolicy{ + Expiry: 144, + FeeBaseMsat: 0, + }, 4), + symmetricTestChannel("a", "target", 100000, &testChannelPolicy{ + Expiry: 144, + FeeBaseMsat: 600, + }, 5), + } + + ctx := newPathFindingTestContext(t, testChannels, "source") + defer ctx.cleanup() + + const ( + startingHeight = 100 + finalHopCLTV = 1 + ) + + paymentAmt := lnwire.NewMSatFromSatoshis(100) + target := ctx.keyFromAlias("target") + + // Find the best path given the restriction to only use channel 2 as the + // outgoing channel. + path, err := ctx.findPath(target, paymentAmt) + if err != nil { + t.Fatalf("unable to find path: %v", err) + } + route, err := newRoute( + ctx.source, path, startingHeight, + finalHopParams{ + amt: paymentAmt, + cltvDelta: finalHopCLTV, + records: nil, + }, + ) + if err != nil { + t.Fatalf("unable to create path: %v", err) + } + + if len(route.Hops) != 2 { + t.Fatalf("unexpected route") + } + if route.Hops[0].ChannelID != 1 { + t.Fatalf("unexpected first hop") + } + if route.Hops[1].ChannelID != 5 { + t.Fatalf("unexpected second hop") + } +} + +// TestRouteToSelf tests that it is possible to find a route to the self node. +func TestRouteToSelf(t *testing.T) { + t.Parallel() + + testChannels := []*testChannel{ + symmetricTestChannel("source", "a", 100000, &testChannelPolicy{ + Expiry: 144, + FeeBaseMsat: 500, + }, 1), + symmetricTestChannel("source", "b", 100000, &testChannelPolicy{ + Expiry: 144, + FeeBaseMsat: 1000, + }, 2), + symmetricTestChannel("a", "b", 100000, &testChannelPolicy{ + Expiry: 144, + FeeBaseMsat: 1000, + }, 3), + } + + ctx := newPathFindingTestContext(t, testChannels, "source") + defer ctx.cleanup() + + paymentAmt := lnwire.NewMSatFromSatoshis(100) + target := ctx.source + + // Find the best path to self. We expect this to be source->a->source, + // because a charges the lowest forwarding fee. + path, err := ctx.findPath(target, paymentAmt) + if err != nil { + t.Fatalf("unable to find path: %v", err) + } + ctx.assertPath(path, []uint64{1, 1}) + + outgoingChanID := uint64(1) + lastHop := ctx.keyFromAlias("b") + ctx.restrictParams.OutgoingChannelID = &outgoingChanID + ctx.restrictParams.LastHop = &lastHop + + // Find the best path to self given that we want to go out via channel 1 + // and return through node b. + path, err = ctx.findPath(target, paymentAmt) + if err != nil { + t.Fatalf("unable to find path: %v", err) + } + ctx.assertPath(path, []uint64{1, 3, 2}) +} + +type pathFindingTestContext struct { + t *testing.T + graph *channeldb.ChannelGraph + restrictParams RestrictParams + bandwidthHints map[uint64]lnwire.MilliSatoshi + pathFindingConfig PathFindingConfig + testGraphInstance *testGraphInstance + source route.Vertex +} + +func newPathFindingTestContext(t *testing.T, testChannels []*testChannel, + source string) *pathFindingTestContext { + + testGraphInstance, err := createTestGraphFromChannels( + testChannels, source, + ) + if err != nil { + t.Fatalf("unable to create graph: %v", err) + } + + sourceNode, err := testGraphInstance.graph.SourceNode() + if err != nil { + t.Fatalf("unable to fetch source node: %v", err) + } + + ctx := &pathFindingTestContext{ + t: t, + testGraphInstance: testGraphInstance, + source: route.Vertex(sourceNode.PubKeyBytes), + pathFindingConfig: *testPathFindingConfig, + graph: testGraphInstance.graph, + restrictParams: *noRestrictions, + } + + return ctx +} + +func (c *pathFindingTestContext) keyFromAlias(alias string) route.Vertex { + return c.testGraphInstance.aliasMap[alias] +} + +func (c *pathFindingTestContext) aliasFromKey(pubKey route.Vertex) string { + for alias, key := range c.testGraphInstance.aliasMap { + if key == pubKey { + return alias + } + } + return "" +} + +func (c *pathFindingTestContext) cleanup() { + c.testGraphInstance.cleanUp() +} + +func (c *pathFindingTestContext) findPath(target route.Vertex, + amt lnwire.MilliSatoshi) ([]*channeldb.ChannelEdgePolicy, + error) { + + return dbFindPath( + c.graph, nil, c.bandwidthHints, &c.restrictParams, + &c.pathFindingConfig, c.source, target, amt, 0, + ) +} + +func (c *pathFindingTestContext) assertPath(path []*channeldb.ChannelEdgePolicy, expected []uint64) { + if len(path) != len(expected) { + c.t.Fatalf("expected path of length %v, but got %v", + len(expected), len(path)) + } + + for i, edge := range path { + if edge.ChannelID != expected[i] { + c.t.Fatalf("expected hop %v to be channel %v, "+ + "but got %v", i, expected[i], edge.ChannelID) + } + } +} + +// dbFindPath calls findPath after getting a db transaction from the database +// graph. +func dbFindPath(graph *channeldb.ChannelGraph, + additionalEdges map[route.Vertex][]*channeldb.ChannelEdgePolicy, + bandwidthHints map[uint64]lnwire.MilliSatoshi, + r *RestrictParams, cfg *PathFindingConfig, + source, target route.Vertex, amt lnwire.MilliSatoshi, + finalHtlcExpiry int32) ([]*channeldb.ChannelEdgePolicy, error) { + + routingTx, err := newDbRoutingTx(graph) + if err != nil { + return nil, err + } + defer func() { + err := routingTx.close() + if err != nil { + log.Errorf("Error closing db tx: %v", err) + } + }() + + return findPath( + &graphParams{ + additionalEdges: additionalEdges, + bandwidthHints: bandwidthHints, + graph: routingTx, + }, + r, cfg, source, target, amt, finalHtlcExpiry, + ) +} diff --git a/routing/payment_lifecycle.go b/routing/payment_lifecycle.go index 695a0d4980..dfda814a54 100644 --- a/routing/payment_lifecycle.go +++ b/routing/payment_lifecycle.go @@ -2,281 +2,607 @@ package routing import ( "fmt" + "sync" "time" "github.com/davecgh/go-spew/spew" sphinx "github.com/lightningnetwork/lightning-onion" "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/htlcswitch" + "github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/routing/route" ) -// errNoRoute is returned when all routes from the payment session have been -// attempted. -type errNoRoute struct { - // lastError is the error encountered during the last payment attempt, - // if at least one attempt has been made. - lastError error +// paymentLifecycle holds all information about the current state of a payment +// needed to resume if from any point. +type paymentLifecycle struct { + router *ChannelRouter + totalAmount lnwire.MilliSatoshi + feeLimit lnwire.MilliSatoshi + paymentHash lntypes.Hash + paySession PaymentSession + timeoutChan <-chan time.Time + currentHeight int32 } -// Error returns a string representation of the error. -func (e errNoRoute) Error() string { - return fmt.Sprintf("unable to route payment to destination: %v", - e.lastError) +// payemntState holds a number of key insights learned from a given MPPayment +// that we use to determine what to do on each payment loop iteration. +type paymentState struct { + numShardsInFlight int + remainingAmt lnwire.MilliSatoshi + remainingFees lnwire.MilliSatoshi + terminate bool } -// paymentLifecycle holds all information about the current state of a payment -// needed to resume if from any point. -type paymentLifecycle struct { - router *ChannelRouter - payment *LightningPayment - paySession PaymentSession - timeoutChan <-chan time.Time - currentHeight int32 - finalCLTVDelta uint16 - attempt *channeldb.PaymentAttemptInfo - circuit *sphinx.Circuit - lastError error +// paymentState uses the passed payment to find the latest information we need +// to act on every iteration of the payment loop. +func (p *paymentLifecycle) paymentState(payment *channeldb.MPPayment) ( + *paymentState, error) { + + // Fetch the total amount and fees that has already been sent in + // settled and still in-flight shards. + sentAmt, fees := payment.SentAmt() + + // Sanity check we haven't sent a value larger than the payment amount. + if sentAmt > p.totalAmount { + return nil, fmt.Errorf("amount sent %v exceeds "+ + "total amount %v", sentAmt, p.totalAmount) + } + + // We'll subtract the used fee from our fee budget, but allow the fees + // of the already sent shards to exceed our budget (can happen after + // restarts). + feeBudget := p.feeLimit + if fees <= feeBudget { + feeBudget -= fees + } else { + feeBudget = 0 + } + + // Get any terminal info for this payment. + settle, failure := payment.TerminalInfo() + + // If either an HTLC settled, or the payment has a payment level + // failure recorded, it means we should terminate the moment all shards + // have returned with a result. + terminate := settle != nil || failure != nil + + activeShards := payment.InFlightHTLCs() + return &paymentState{ + numShardsInFlight: len(activeShards), + remainingAmt: p.totalAmount - sentAmt, + remainingFees: feeBudget, + terminate: terminate, + }, nil } // resumePayment resumes the paymentLifecycle from the current state. func (p *paymentLifecycle) resumePayment() ([32]byte, *route.Route, error) { + shardHandler := &shardHandler{ + router: p.router, + paymentHash: p.paymentHash, + shardErrors: make(chan error), + quit: make(chan struct{}), + } + + // When the payment lifecycle loop exits, we make sure to signal any + // sub goroutine of the shardHandler to exit, then wait for them to + // return. + defer shardHandler.stop() + + // If we had any existing attempts outstanding, we'll start by spinning + // up goroutines that'll collect their results and deliver them to the + // lifecycle loop below. + payment, err := p.router.cfg.Control.FetchPayment( + p.paymentHash, + ) + if err != nil { + return [32]byte{}, nil, err + } + + for _, a := range payment.InFlightHTLCs() { + a := a + + log.Debugf("Resuming payment shard %v for hash %v", + a.AttemptID, p.paymentHash) + + shardHandler.collectResultAsync(&a.HTLCAttemptInfo) + } + // We'll continue until either our payment succeeds, or we encounter a // critical error during path finding. for { + // Start by quickly checking if there are any outcomes already + // available to handle before we reevaluate our state. + if err := shardHandler.checkShards(); err != nil { + return [32]byte{}, nil, err + } - // If this payment had no existing payment attempt, we create - // and send one now. - if p.attempt == nil { - firstHop, htlcAdd, err := p.createNewPaymentAttempt() - if err != nil { - return [32]byte{}, nil, err - } + // We start every iteration by fetching the lastest state of + // the payment from the ControlTower. This ensures that we will + // act on the latest available information, whether we are + // resuming an existing payment or just sent a new attempt. + payment, err := p.router.cfg.Control.FetchPayment( + p.paymentHash, + ) + if err != nil { + return [32]byte{}, nil, err + } - // Now that the attempt is created and checkpointed to - // the DB, we send it. - sendErr := p.sendPaymentAttempt(firstHop, htlcAdd) - if sendErr != nil { - // We must inspect the error to know whether it - // was critical or not, to decide whether we - // should continue trying. - err := p.handleSendError(sendErr) - if err != nil { - return [32]byte{}, nil, err - } + // Using this latest state of the payment, calculate + // information about our active shards and terminal conditions. + state, err := p.paymentState(payment) + if err != nil { + return [32]byte{}, nil, err + } - // Error was handled successfully, reset the - // attempt to indicate we want to make a new - // attempt. - p.attempt = nil - continue + log.Debugf("Payment %v in state terminate=%v, "+ + "active_shards=%v, rem_value=%v, fee_limit=%v", + p.paymentHash, state.terminate, state.numShardsInFlight, + state.remainingAmt, state.remainingFees) + + switch { + + // We have a terminal condition and no active shards, we are + // ready to exit. + case state.terminate && state.numShardsInFlight == 0: + // Find the first successful shard and return + // the preimage and route. + for _, a := range payment.HTLCs { + if a.Settle != nil { + return a.Settle.Preimage, &a.Route, nil + } } - } else { - // If this was a resumed attempt, we must regenerate the - // circuit. - _, c, err := generateSphinxPacket( - &p.attempt.Route, p.payment.PaymentHash[:], - p.attempt.SessionKey, - ) - if err != nil { + + // Payment failed. + return [32]byte{}, nil, *payment.FailureReason + + // If we either reached a terminal error condition (but had + // active shards still) or there is no remaining value to send, + // we'll wait for a shard outcome. + case state.terminate || state.remainingAmt == 0: + // We still have outstanding shards, so wait for a new + // outcome to be available before re-evaluating our + // state. + if err := shardHandler.waitForShard(); err != nil { return [32]byte{}, nil, err } - p.circuit = c - } - - // Using the created circuit, initialize the error decrypter so we can - // parse+decode any failures incurred by this payment within the - // switch. - errorDecryptor := &htlcswitch.SphinxErrorDecrypter{ - OnionErrorDecrypter: sphinx.NewOnionErrorDecrypter(p.circuit), + continue } - // Now ask the switch to return the result of the payment when - // available. - resultChan, err := p.router.cfg.Payer.GetPaymentResult( - p.attempt.PaymentID, p.payment.PaymentHash, errorDecryptor, - ) - switch { + // Before we attempt any new shard, we'll check to see if + // either we've gone past the payment attempt timeout, or the + // router is exiting. In either case, we'll stop this payment + // attempt short. If a timeout is not applicable, timeoutChan + // will be nil. + select { + case <-p.timeoutChan: + log.Warnf("payment attempt not completed before " + + "timeout") + + // By marking the payment failed with the control + // tower, no further shards will be launched and we'll + // return with an error the moment all active shards + // have finished. + saveErr := p.router.cfg.Control.Fail( + p.paymentHash, channeldb.FailureReasonTimeout, + ) + if saveErr != nil { + return [32]byte{}, nil, saveErr + } - // If this payment ID is unknown to the Switch, it means it was - // never checkpointed and forwarded by the switch before a - // restart. In this case we can safely send a new payment - // attempt, and wait for its result to be available. - case err == htlcswitch.ErrPaymentIDNotFound: - log.Debugf("Payment ID %v for hash %x not found in "+ - "the Switch, retrying.", p.attempt.PaymentID, - p.payment.PaymentHash) - - // Reset the attempt to indicate we want to make a new - // attempt. - p.attempt = nil continue - // A critical, unexpected error was encountered. - case err != nil: - log.Errorf("Failed getting result for paymentID %d "+ - "from switch: %v", p.attempt.PaymentID, err) + case <-p.router.quit: + return [32]byte{}, nil, ErrRouterShuttingDown - return [32]byte{}, nil, err + // Fall through if we haven't hit our time limit. + default: } - // The switch knows about this payment, we'll wait for a result - // to be available. - var ( - result *htlcswitch.PaymentResult - ok bool + // Create a new payment attempt from the given payment session. + rt, err := p.paySession.RequestRoute( + state.remainingAmt, state.remainingFees, + uint32(state.numShardsInFlight), uint32(p.currentHeight), ) + if err != nil { + log.Warnf("Failed to find route for payment %v: %v", + p.paymentHash, err) - select { - case result, ok = <-resultChan: + routeErr, ok := err.(noRouteError) if !ok { - return [32]byte{}, nil, htlcswitch.ErrSwitchExiting + return [32]byte{}, nil, err } - case <-p.router.quit: - return [32]byte{}, nil, ErrRouterShuttingDown + // There is no route to try, and we have no active + // shards. This means that there is no way for us to + // send the payment, so mark it failed with no route. + if state.numShardsInFlight == 0 { + failureCode := routeErr.FailureReason() + log.Debugf("Marking payment %v permanently "+ + "failed with no route: %v", + p.paymentHash, failureCode) + + saveErr := p.router.cfg.Control.Fail( + p.paymentHash, failureCode, + ) + if saveErr != nil { + return [32]byte{}, nil, saveErr + } + + continue + } + + // We still have active shards, we'll wait for an + // outcome to be available before retrying. + if err := shardHandler.waitForShard(); err != nil { + return [32]byte{}, nil, err + } + continue + } + + // We found a route to try, launch a new shard. + attempt, outcome, err := shardHandler.launchShard(rt) + if err != nil { + return [32]byte{}, nil, err } - // In case of a payment failure, we use the error to decide - // whether we should retry. - if result.Error != nil { - log.Errorf("Attempt to send payment %x failed: %v", - p.payment.PaymentHash, result.Error) + // If we encountered a non-critical error when launching the + // shard, handle it. + if outcome.err != nil { + log.Warnf("Failed to launch shard %v for "+ + "payment %v: %v", attempt.AttemptID, + p.paymentHash, outcome.err) // We must inspect the error to know whether it was // critical or not, to decide whether we should // continue trying. - if err := p.handleSendError(result.Error); err != nil { + err := shardHandler.handleSendError( + attempt, outcome.err, + ) + if err != nil { return [32]byte{}, nil, err } - // Error was handled successfully, reset the attempt to - // indicate we want to make a new attempt. - p.attempt = nil + // Error was handled successfully, continue to make a + // new attempt. continue } - // We successfully got a payment result back from the switch. - log.Debugf("Payment %x succeeded with pid=%v", - p.payment.PaymentHash, p.attempt.PaymentID) + // Now that the shard was successfully sent, launch a go + // routine that will handle its result when its back. + shardHandler.collectResultAsync(attempt) + } +} - // Report success to mission control. - err = p.router.cfg.MissionControl.ReportPaymentSuccess( - p.attempt.PaymentID, &p.attempt.Route, - ) - if err != nil { - log.Errorf("Error reporting payment success to mc: %v", - err) +// shardHandler holds what is necessary to send and collect the result of +// shards. +type shardHandler struct { + paymentHash lntypes.Hash + router *ChannelRouter + + // shardErrors is a channel where errors collected by calling + // collectResultAsync will be delivered. These results are meant to be + // inspected by calling waitForShard or checkShards, and the channel + // doesn't need to be initiated if the caller is using the sync + // collectResult directly. + shardErrors chan error + + // quit is closed to signal the sub goroutines of the payment lifecycle + // to stop. + quit chan struct{} + wg sync.WaitGroup +} + +// stop signals any active shard goroutine to exit and waits for them to exit. +func (p *shardHandler) stop() { + close(p.quit) + p.wg.Wait() +} + +// waitForShard blocks until any of the outstanding shards return. +func (p *shardHandler) waitForShard() error { + select { + case err := <-p.shardErrors: + return err + + case <-p.quit: + return fmt.Errorf("shard handler quitting") + + case <-p.router.quit: + return ErrRouterShuttingDown + } +} + +// checkShards is a non-blocking method that check if any shards has finished +// their execution. +func (p *shardHandler) checkShards() error { + for { + select { + case err := <-p.shardErrors: + if err != nil { + return err + } + + case <-p.quit: + return fmt.Errorf("shard handler quitting") + + case <-p.router.quit: + return ErrRouterShuttingDown + + default: + return nil } + } +} + +// launchOutcome is a type returned from launchShard that indicates whether the +// shard was successfully send onto the network. +type launchOutcome struct { + // err is non-nil if a non-critical error was encountered when trying + // to send the shard, and we successfully updated the control tower to + // reflect this error. This can be errors like not enough local + // balance for the given route etc. + err error +} + +// launchShard creates and sends an HTLC attempt along the given route, +// registering it with the control tower before sending it. It returns the +// HTLCAttemptInfo that was created for the shard, along with a launchOutcome. +// The launchOutcome is used to indicate whether the attempt was successfully +// sent. If the launchOutcome wraps a non-nil error, it means that the attempt +// was not sent onto the network, so no result will be available in the future +// for it. +func (p *shardHandler) launchShard(rt *route.Route) (*channeldb.HTLCAttemptInfo, + *launchOutcome, error) { + + // Using the route received from the payment session, create a new + // shard to send. + firstHop, htlcAdd, attempt, err := p.createNewPaymentAttempt( + rt, + ) + if err != nil { + return nil, nil, err + } + + // Before sending this HTLC to the switch, we checkpoint the fresh + // paymentID and route to the DB. This lets us know on startup the ID + // of the payment that we attempted to send, such that we can query the + // Switch for its whereabouts. The route is needed to handle the result + // when it eventually comes back. + err = p.router.cfg.Control.RegisterAttempt(p.paymentHash, attempt) + if err != nil { + return nil, nil, err + } - // In case of success we atomically store the db payment and - // move the payment to the success state. - err = p.router.cfg.Control.Success(p.payment.PaymentHash, result.Preimage) + // Now that the attempt is created and checkpointed to the DB, we send + // it. + sendErr := p.sendPaymentAttempt(attempt, firstHop, htlcAdd) + if sendErr != nil { + // TODO(joostjager): Distinguish unexpected internal errors + // from real send errors. + err := p.failAttempt(attempt, sendErr) if err != nil { - log.Errorf("Unable to succeed payment "+ - "attempt: %v", err) - return [32]byte{}, nil, err + return nil, nil, err } - // Terminal state, return the preimage and the route - // taken. - return result.Preimage, &p.attempt.Route, nil + // Return a launchOutcome indicating the shard failed. + return attempt, &launchOutcome{ + err: sendErr, + }, nil } + return attempt, &launchOutcome{}, nil } -// createNewPaymentAttempt creates and stores a new payment attempt to the -// database. -func (p *paymentLifecycle) createNewPaymentAttempt() (lnwire.ShortChannelID, - *lnwire.UpdateAddHTLC, error) { +// shardResult holds the resulting outcome of a shard sent. +type shardResult struct { + // preimage is the payment preimage in case of a settled HTLC. Only set + // if err is non-nil. + preimage lntypes.Preimage - // Before we attempt this next payment, we'll check to see if either - // we've gone past the payment attempt timeout, or the router is - // exiting. In either case, we'll stop this payment attempt short. If a - // timeout is not applicable, timeoutChan will be nil. - select { - case <-p.timeoutChan: - // Mark the payment as failed because of the - // timeout. - err := p.router.cfg.Control.Fail( - p.payment.PaymentHash, channeldb.FailureReasonTimeout, - ) + // err indicates that the shard failed. + err error +} + +// collectResultAsync launches a goroutine that will wait for the result of the +// given HTLC attempt to be available then handle its result. Note that it will +// fail the payment with the control tower if a terminal error is encountered. +func (p *shardHandler) collectResultAsync(attempt *channeldb.HTLCAttemptInfo) { + p.wg.Add(1) + go func() { + defer p.wg.Done() + + // Block until the result is available. + result, err := p.collectResult(attempt) if err != nil { - return lnwire.ShortChannelID{}, nil, err + if err != ErrRouterShuttingDown && + err != htlcswitch.ErrSwitchExiting { + + log.Errorf("Error collecting result for "+ + "shard %v for payment %v: %v", + attempt.AttemptID, p.paymentHash, err) + } + + select { + case p.shardErrors <- err: + case <-p.router.quit: + case <-p.quit: + } + return } - errStr := fmt.Sprintf("payment attempt not completed " + - "before timeout") + // If a non-critical error was encountered handle it and mark + // the payment failed if the failure was terminal. + if result.err != nil { + err := p.handleSendError(attempt, result.err) + if err != nil { + select { + case p.shardErrors <- err: + case <-p.router.quit: + case <-p.quit: + } + return + } + } + + select { + case p.shardErrors <- nil: + case <-p.router.quit: + case <-p.quit: + } + }() +} - return lnwire.ShortChannelID{}, nil, - newErr(ErrPaymentAttemptTimeout, errStr) +// collectResult waits for the result for the given attempt to be available +// from the Switch, then records the attempt outcome with the control tower. A +// shardResult is returned, indicating the final outcome of this HTLC attempt. +func (p *shardHandler) collectResult(attempt *channeldb.HTLCAttemptInfo) ( + *shardResult, error) { - case <-p.router.quit: - // The payment will be resumed from the current state - // after restart. - return lnwire.ShortChannelID{}, nil, ErrRouterShuttingDown + // Regenerate the circuit for this attempt. + _, circuit, err := generateSphinxPacket( + &attempt.Route, p.paymentHash[:], + attempt.SessionKey, + ) + if err != nil { + return nil, err + } - default: - // Fall through if we haven't hit our time limit, or - // are expiring. + // Using the created circuit, initialize the error decrypter so we can + // parse+decode any failures incurred by this payment within the + // switch. + errorDecryptor := &htlcswitch.SphinxErrorDecrypter{ + OnionErrorDecrypter: sphinx.NewOnionErrorDecrypter(circuit), } - // Create a new payment attempt from the given payment session. - route, err := p.paySession.RequestRoute( - p.payment, uint32(p.currentHeight), p.finalCLTVDelta, + // Now ask the switch to return the result of the payment when + // available. + resultChan, err := p.router.cfg.Payer.GetPaymentResult( + attempt.AttemptID, p.paymentHash, errorDecryptor, ) - if err != nil { - log.Warnf("Failed to find route for payment %x: %v", - p.payment.PaymentHash, err) - - // If we're unable to successfully make a payment using - // any of the routes we've found, then mark the payment - // as permanently failed. - saveErr := p.router.cfg.Control.Fail( - p.payment.PaymentHash, channeldb.FailureReasonNoRoute, - ) - if saveErr != nil { - return lnwire.ShortChannelID{}, nil, saveErr + switch { + + // If this attempt ID is unknown to the Switch, it means it was never + // checkpointed and forwarded by the switch before a restart. In this + // case we can safely send a new payment attempt, and wait for its + // result to be available. + case err == htlcswitch.ErrPaymentIDNotFound: + log.Debugf("Payment ID %v for hash %v not found in "+ + "the Switch, retrying.", attempt.AttemptID, + p.paymentHash) + + cErr := p.failAttempt(attempt, err) + if cErr != nil { + return nil, cErr } - // If there was an error already recorded for this - // payment, we'll return that. - if p.lastError != nil { - return lnwire.ShortChannelID{}, nil, - errNoRoute{lastError: p.lastError} + return &shardResult{ + err: err, + }, nil + + // A critical, unexpected error was encountered. + case err != nil: + log.Errorf("Failed getting result for attemptID %d "+ + "from switch: %v", attempt.AttemptID, err) + + return nil, err + } + + // The switch knows about this payment, we'll wait for a result to be + // available. + var ( + result *htlcswitch.PaymentResult + ok bool + ) + + select { + case result, ok = <-resultChan: + if !ok { + return nil, htlcswitch.ErrSwitchExiting } - // Terminal state, return. - return lnwire.ShortChannelID{}, nil, err + + case <-p.router.quit: + return nil, ErrRouterShuttingDown + + case <-p.quit: + return nil, fmt.Errorf("shard handler exiting") + } + + // In case of a payment failure, fail the attempt with the control + // tower and return. + if result.Error != nil { + err := p.failAttempt(attempt, result.Error) + if err != nil { + return nil, err + } + + return &shardResult{ + err: result.Error, + }, nil + } + + // We successfully got a payment result back from the switch. + log.Debugf("Payment %v succeeded with pid=%v", + p.paymentHash, attempt.AttemptID) + + // Report success to mission control. + err = p.router.cfg.MissionControl.ReportPaymentSuccess( + attempt.AttemptID, &attempt.Route, + ) + if err != nil { + log.Errorf("Error reporting payment success to mc: %v", + err) } + // In case of success we atomically store settle result to the DB move + // the shard to the settled state. + err = p.router.cfg.Control.SettleAttempt( + p.paymentHash, attempt.AttemptID, + &channeldb.HTLCSettleInfo{ + Preimage: result.Preimage, + SettleTime: p.router.cfg.Clock.Now(), + }, + ) + if err != nil { + log.Errorf("Unable to succeed payment attempt: %v", err) + return nil, err + } + + return &shardResult{ + preimage: result.Preimage, + }, nil +} + +// createNewPaymentAttempt creates a new payment attempt from the given route. +func (p *shardHandler) createNewPaymentAttempt(rt *route.Route) ( + lnwire.ShortChannelID, *lnwire.UpdateAddHTLC, + *channeldb.HTLCAttemptInfo, error) { + // Generate a new key to be used for this attempt. sessionKey, err := generateNewSessionKey() if err != nil { - return lnwire.ShortChannelID{}, nil, err + return lnwire.ShortChannelID{}, nil, nil, err } // Generate the raw encoded sphinx packet to be included along // with the htlcAdd message that we send directly to the // switch. - onionBlob, c, err := generateSphinxPacket( - route, p.payment.PaymentHash[:], sessionKey, + onionBlob, _, err := generateSphinxPacket( + rt, p.paymentHash[:], sessionKey, ) if err != nil { - return lnwire.ShortChannelID{}, nil, err + return lnwire.ShortChannelID{}, nil, nil, err } - // Update our cached circuit with the newly generated - // one. - p.circuit = c - // Craft an HTLC packet to send to the layer 2 switch. The // metadata within this packet will be used to route the // payment through the network, starting with the first-hop. htlcAdd := &lnwire.UpdateAddHTLC{ - Amount: route.TotalAmount, - Expiry: route.TotalTimeLock, - PaymentHash: p.payment.PaymentHash, + Amount: rt.TotalAmount, + Expiry: rt.TotalTimeLock, + PaymentHash: p.paymentHash, } copy(htlcAdd.OnionBlob[:], onionBlob) @@ -284,46 +610,37 @@ func (p *paymentLifecycle) createNewPaymentAttempt() (lnwire.ShortChannelID, // the payment. If this attempt fails, then we'll continue on // to the next available route. firstHop := lnwire.NewShortChanIDFromInt( - route.Hops[0].ChannelID, + rt.Hops[0].ChannelID, ) // We generate a new, unique payment ID that we will use for // this HTLC. - paymentID, err := p.router.cfg.NextPaymentID() + attemptID, err := p.router.cfg.NextPaymentID() if err != nil { - return lnwire.ShortChannelID{}, nil, err + return lnwire.ShortChannelID{}, nil, nil, err } // We now have all the information needed to populate // the current attempt information. - p.attempt = &channeldb.PaymentAttemptInfo{ - PaymentID: paymentID, - SessionKey: sessionKey, - Route: *route, - } - - // Before sending this HTLC to the switch, we checkpoint the - // fresh paymentID and route to the DB. This lets us know on - // startup the ID of the payment that we attempted to send, - // such that we can query the Switch for its whereabouts. The - // route is needed to handle the result when it eventually - // comes back. - err = p.router.cfg.Control.RegisterAttempt(p.payment.PaymentHash, p.attempt) - if err != nil { - return lnwire.ShortChannelID{}, nil, err + attempt := &channeldb.HTLCAttemptInfo{ + AttemptID: attemptID, + AttemptTime: p.router.cfg.Clock.Now(), + SessionKey: sessionKey, + Route: *rt, } - return firstHop, htlcAdd, nil + return firstHop, htlcAdd, attempt, nil } // sendPaymentAttempt attempts to send the current attempt to the switch. -func (p *paymentLifecycle) sendPaymentAttempt(firstHop lnwire.ShortChannelID, +func (p *shardHandler) sendPaymentAttempt( + attempt *channeldb.HTLCAttemptInfo, firstHop lnwire.ShortChannelID, htlcAdd *lnwire.UpdateAddHTLC) error { - log.Tracef("Attempting to send payment %x (pid=%v), "+ - "using route: %v", p.payment.PaymentHash, p.attempt.PaymentID, + log.Tracef("Attempting to send payment %v (pid=%v), "+ + "using route: %v", p.paymentHash, attempt.AttemptID, newLogClosure(func() string { - return spew.Sdump(p.attempt.Route) + return spew.Sdump(attempt.Route) }), ) @@ -332,50 +649,106 @@ func (p *paymentLifecycle) sendPaymentAttempt(firstHop lnwire.ShortChannelID, // such that we can resume waiting for the result after a // restart. err := p.router.cfg.Payer.SendHTLC( - firstHop, p.attempt.PaymentID, htlcAdd, + firstHop, attempt.AttemptID, htlcAdd, ) if err != nil { log.Errorf("Failed sending attempt %d for payment "+ - "%x to switch: %v", p.attempt.PaymentID, - p.payment.PaymentHash, err) + "%v to switch: %v", attempt.AttemptID, + p.paymentHash, err) return err } - log.Debugf("Payment %x (pid=%v) successfully sent to switch", - p.payment.PaymentHash, p.attempt.PaymentID) + log.Debugf("Payment %v (pid=%v) successfully sent to switch, route: %v", + p.paymentHash, attempt.AttemptID, &attempt.Route) return nil } // handleSendError inspects the given error from the Switch and determines -// whether we should make another payment attempt. -func (p *paymentLifecycle) handleSendError(sendErr error) error { +// whether we should make another payment attempt, or if it should be +// considered a terminal error. Terminal errors will be recorded with the +// control tower. +func (p *shardHandler) handleSendError(attempt *channeldb.HTLCAttemptInfo, + sendErr error) error { reason := p.router.processSendError( - p.attempt.PaymentID, &p.attempt.Route, sendErr, + attempt.AttemptID, &attempt.Route, sendErr, ) if reason == nil { - // Save the forwarding error so it can be returned if - // this turns out to be the last attempt. - p.lastError = sendErr - return nil } - log.Debugf("Payment %x failed: final_outcome=%v, raw_err=%v", - p.payment.PaymentHash, *reason, sendErr) + log.Debugf("Payment %v failed: final_outcome=%v, raw_err=%v", + p.paymentHash, *reason, sendErr) - // Mark the payment failed with no route. - // - // TODO(halseth): make payment codes for the actual reason we don't - // continue path finding. - err := p.router.cfg.Control.Fail( - p.payment.PaymentHash, *reason, - ) + err := p.router.cfg.Control.Fail(p.paymentHash, *reason) if err != nil { return err } - // Terminal state, return the error we encountered. - return sendErr + return nil +} + +// failAttempt calls control tower to fail the current payment attempt. +func (p *shardHandler) failAttempt(attempt *channeldb.HTLCAttemptInfo, + sendError error) error { + + log.Warnf("Attempt %v for payment %v failed: %v", attempt.AttemptID, + p.paymentHash, sendError) + + failInfo := marshallError( + sendError, + p.router.cfg.Clock.Now(), + ) + + return p.router.cfg.Control.FailAttempt( + p.paymentHash, attempt.AttemptID, + failInfo, + ) +} + +// marshallError marshall an error as received from the switch to a structure +// that is suitable for database storage. +func marshallError(sendError error, time time.Time) *channeldb.HTLCFailInfo { + response := &channeldb.HTLCFailInfo{ + FailTime: time, + } + + switch sendError { + + case htlcswitch.ErrPaymentIDNotFound: + response.Reason = channeldb.HTLCFailInternal + return response + + case htlcswitch.ErrUnreadableFailureMessage: + response.Reason = channeldb.HTLCFailUnreadable + return response + } + + rtErr, ok := sendError.(htlcswitch.ClearTextError) + if !ok { + response.Reason = channeldb.HTLCFailInternal + return response + } + + message := rtErr.WireMessage() + if message != nil { + response.Reason = channeldb.HTLCFailMessage + response.Message = message + } else { + response.Reason = channeldb.HTLCFailUnknown + } + + // If the ClearTextError received is a ForwardingError, the error + // originated from a node along the route, not locally on our outgoing + // link. We set failureSourceIdx to the index of the node where the + // failure occurred. If the error is not a ForwardingError, the failure + // occurred at our node, so we leave the index as 0 to indicate that + // we failed locally. + fErr, ok := rtErr.(*htlcswitch.ForwardingError) + if ok { + response.FailureSourceIndex = uint32(fErr.FailureSourceIdx) + } + + return response } diff --git a/routing/payment_lifecycle_test.go b/routing/payment_lifecycle_test.go new file mode 100644 index 0000000000..e83ac17f44 --- /dev/null +++ b/routing/payment_lifecycle_test.go @@ -0,0 +1,898 @@ +package routing + +import ( + "crypto/rand" + "fmt" + "sync/atomic" + "testing" + "time" + + "github.com/btcsuite/btcutil" + "github.com/go-errors/errors" + "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/clock" + "github.com/lightningnetwork/lnd/htlcswitch" + "github.com/lightningnetwork/lnd/lntypes" + "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/routing/route" +) + +const stepTimeout = 5 * time.Second + +// createTestRoute builds a route a->b->c paying the given amt to c. +func createTestRoute(amt lnwire.MilliSatoshi, + aliasMap map[string]route.Vertex) (*route.Route, error) { + + hopFee := lnwire.NewMSatFromSatoshis(3) + hop1 := aliasMap["b"] + hop2 := aliasMap["c"] + hops := []*route.Hop{ + { + ChannelID: 1, + PubKeyBytes: hop1, + LegacyPayload: true, + AmtToForward: amt + hopFee, + }, + { + ChannelID: 2, + PubKeyBytes: hop2, + LegacyPayload: true, + AmtToForward: amt, + }, + } + + // We create a simple route that we will supply every time the router + // requests one. + return route.NewRouteFromHops( + amt+2*hopFee, 100, aliasMap["a"], hops, + ) +} + +// TestRouterPaymentStateMachine tests that the router interacts as expected +// with the ControlTower during a payment lifecycle, such that it payment +// attempts are not sent twice to the switch, and results are handled after a +// restart. +func TestRouterPaymentStateMachine(t *testing.T) { + t.Parallel() + + const startingBlockHeight = 101 + + // Setup two simple channels such that we can mock sending along this + // route. + chanCapSat := btcutil.Amount(100000) + testChannels := []*testChannel{ + symmetricTestChannel("a", "b", chanCapSat, &testChannelPolicy{ + Expiry: 144, + FeeRate: 400, + MinHTLC: 1, + MaxHTLC: lnwire.NewMSatFromSatoshis(chanCapSat), + }, 1), + symmetricTestChannel("b", "c", chanCapSat, &testChannelPolicy{ + Expiry: 144, + FeeRate: 400, + MinHTLC: 1, + MaxHTLC: lnwire.NewMSatFromSatoshis(chanCapSat), + }, 2), + } + + testGraph, err := createTestGraphFromChannels(testChannels, "a") + if err != nil { + t.Fatalf("unable to create graph: %v", err) + } + defer testGraph.cleanUp() + + paymentAmt := lnwire.NewMSatFromSatoshis(1000) + + // We create a simple route that we will supply every time the router + // requests one. + rt, err := createTestRoute(paymentAmt, testGraph.aliasMap) + if err != nil { + t.Fatalf("unable to create route: %v", err) + } + + shard, err := createTestRoute(paymentAmt/4, testGraph.aliasMap) + if err != nil { + t.Fatalf("unable to create route: %v", err) + } + + // A payment state machine test case consists of several ordered steps, + // that we use for driving the scenario. + type testCase struct { + // steps is a list of steps to perform during the testcase. + steps []string + + // routes is the sequence of routes we will provide to the + // router when it requests a new route. + routes []*route.Route + } + + const ( + // routerInitPayment is a test step where we expect the router + // to call the InitPayment method on the control tower. + routerInitPayment = "Router:init-payment" + + // routerRegisterAttempt is a test step where we expect the + // router to call the RegisterAttempt method on the control + // tower. + routerRegisterAttempt = "Router:register-attempt" + + // routerSettleAttempt is a test step where we expect the + // router to call the SettleAttempt method on the control + // tower. + routerSettleAttempt = "Router:settle-attempt" + + // routerFailAttempt is a test step where we expect the router + // to call the FailAttempt method on the control tower. + routerFailAttempt = "Router:fail-attempt" + + // routerFailPayment is a test step where we expect the router + // to call the Fail method on the control tower. + routerFailPayment = "Router:fail-payment" + + // sendToSwitchSuccess is a step where we expect the router to + // call send the payment attempt to the switch, and we will + // respond with a non-error, indicating that the payment + // attempt was successfully forwarded. + sendToSwitchSuccess = "SendToSwitch:success" + + // sendToSwitchResultFailure is a step where we expect the + // router to send the payment attempt to the switch, and we + // will respond with a forwarding error. This can happen when + // forwarding fail on our local links. + sendToSwitchResultFailure = "SendToSwitch:failure" + + // getPaymentResultSuccess is a test step where we expect the + // router to call the GetPaymentResult method, and we will + // respond with a successful payment result. + getPaymentResultSuccess = "GetPaymentResult:success" + + // getPaymentResultTempFailure is a test step where we expect the + // router to call the GetPaymentResult method, and we will + // respond with a forwarding error, expecting the router to retry. + getPaymentResultTempFailure = "GetPaymentResult:temp-failure" + + // getPaymentResultTerminalFailure is a test step where we + // expect the router to call the GetPaymentResult method, and + // we will respond with a terminal error, expecting the router + // to stop making payment attempts. + getPaymentResultTerminalFailure = "GetPaymentResult:terminal-failure" + + // resendPayment is a test step where we manually try to resend + // the same payment, making sure the router responds with an + // error indicating that it is already in flight. + resendPayment = "ResendPayment" + + // startRouter is a step where we manually start the router, + // used to test that it automatically will resume payments at + // startup. + startRouter = "StartRouter" + + // stopRouter is a test step where we manually make the router + // shut down. + stopRouter = "StopRouter" + + // paymentSuccess is a step where assert that we receive a + // successful result for the original payment made. + paymentSuccess = "PaymentSuccess" + + // paymentError is a step where assert that we receive an error + // for the original payment made. + paymentError = "PaymentError" + + // resentPaymentSuccess is a step where assert that we receive + // a successful result for a payment that was resent. + resentPaymentSuccess = "ResentPaymentSuccess" + + // resentPaymentError is a step where assert that we receive an + // error for a payment that was resent. + resentPaymentError = "ResentPaymentError" + ) + + tests := []testCase{ + { + // Tests a normal payment flow that succeeds. + steps: []string{ + routerInitPayment, + routerRegisterAttempt, + sendToSwitchSuccess, + getPaymentResultSuccess, + routerSettleAttempt, + paymentSuccess, + }, + routes: []*route.Route{rt}, + }, + { + // A payment flow with a failure on the first attempt, + // but that succeeds on the second attempt. + steps: []string{ + routerInitPayment, + routerRegisterAttempt, + sendToSwitchSuccess, + + // Make the first sent attempt fail. + getPaymentResultTempFailure, + routerFailAttempt, + + // The router should retry. + routerRegisterAttempt, + sendToSwitchSuccess, + + // Make the second sent attempt succeed. + getPaymentResultSuccess, + routerSettleAttempt, + paymentSuccess, + }, + routes: []*route.Route{rt, rt}, + }, + { + // A payment flow with a forwarding failure first time + // sending to the switch, but that succeeds on the + // second attempt. + steps: []string{ + routerInitPayment, + routerRegisterAttempt, + + // Make the first sent attempt fail. + sendToSwitchResultFailure, + routerFailAttempt, + + // The router should retry. + routerRegisterAttempt, + sendToSwitchSuccess, + + // Make the second sent attempt succeed. + getPaymentResultSuccess, + routerSettleAttempt, + paymentSuccess, + }, + routes: []*route.Route{rt, rt}, + }, + { + // A payment that fails on the first attempt, and has + // only one route available to try. It will therefore + // fail permanently. + steps: []string{ + routerInitPayment, + routerRegisterAttempt, + sendToSwitchSuccess, + + // Make the first sent attempt fail. + getPaymentResultTempFailure, + routerFailAttempt, + + // Since there are no more routes to try, the + // payment should fail. + routerFailPayment, + paymentError, + }, + routes: []*route.Route{rt}, + }, + { + // We expect the payment to fail immediately if we have + // no routes to try. + steps: []string{ + routerInitPayment, + routerFailPayment, + paymentError, + }, + routes: []*route.Route{}, + }, + { + // A normal payment flow, where we attempt to resend + // the same payment after each step. This ensures that + // the router don't attempt to resend a payment already + // in flight. + steps: []string{ + routerInitPayment, + routerRegisterAttempt, + + // Manually resend the payment, the router + // should attempt to init with the control + // tower, but fail since it is already in + // flight. + resendPayment, + routerInitPayment, + resentPaymentError, + + // The original payment should proceed as + // normal. + sendToSwitchSuccess, + + // Again resend the payment and assert it's not + // allowed. + resendPayment, + routerInitPayment, + resentPaymentError, + + // Notify about a success for the original + // payment. + getPaymentResultSuccess, + routerSettleAttempt, + + // Now that the original payment finished, + // resend it again to ensure this is not + // allowed. + resendPayment, + routerInitPayment, + resentPaymentError, + paymentSuccess, + }, + routes: []*route.Route{rt}, + }, + { + // Tests that the router is able to handle the + // receieved payment result after a restart. + steps: []string{ + routerInitPayment, + routerRegisterAttempt, + sendToSwitchSuccess, + + // Shut down the router. The original caller + // should get notified about this. + stopRouter, + paymentError, + + // Start the router again, and ensure the + // router registers the success with the + // control tower. + startRouter, + getPaymentResultSuccess, + routerSettleAttempt, + }, + routes: []*route.Route{rt}, + }, + { + // Tests that we are allowed to resend a payment after + // it has permanently failed. + steps: []string{ + routerInitPayment, + routerRegisterAttempt, + sendToSwitchSuccess, + + // Resending the payment at this stage should + // not be allowed. + resendPayment, + routerInitPayment, + resentPaymentError, + + // Make the first attempt fail. + getPaymentResultTempFailure, + routerFailAttempt, + + // Since we have no more routes to try, the + // original payment should fail. + routerFailPayment, + paymentError, + + // Now resend the payment again. This should be + // allowed, since the payment has failed. + resendPayment, + routerInitPayment, + routerRegisterAttempt, + sendToSwitchSuccess, + getPaymentResultSuccess, + routerSettleAttempt, + resentPaymentSuccess, + }, + routes: []*route.Route{rt}, + }, + + // ===================================== + // || MPP scenarios || + // ===================================== + { + // Tests a simple successful MP payment of 4 shards. + steps: []string{ + routerInitPayment, + + // shard 0 + routerRegisterAttempt, + sendToSwitchSuccess, + + // shard 1 + routerRegisterAttempt, + sendToSwitchSuccess, + + // shard 2 + routerRegisterAttempt, + sendToSwitchSuccess, + + // shard 3 + routerRegisterAttempt, + sendToSwitchSuccess, + + // All shards succeed. + getPaymentResultSuccess, + getPaymentResultSuccess, + getPaymentResultSuccess, + getPaymentResultSuccess, + + // Router should settle them all. + routerSettleAttempt, + routerSettleAttempt, + routerSettleAttempt, + routerSettleAttempt, + + // And the final result is obviously + // successful. + paymentSuccess, + }, + routes: []*route.Route{shard, shard, shard, shard}, + }, + { + // An MP payment scenario where we need several extra + // attempts before the payment finally settle. + steps: []string{ + routerInitPayment, + + // shard 0 + routerRegisterAttempt, + sendToSwitchSuccess, + + // shard 1 + routerRegisterAttempt, + sendToSwitchSuccess, + + // shard 2 + routerRegisterAttempt, + sendToSwitchSuccess, + + // shard 3 + routerRegisterAttempt, + sendToSwitchSuccess, + + // First two shards fail, two new ones are sent. + getPaymentResultTempFailure, + getPaymentResultTempFailure, + routerFailAttempt, + routerFailAttempt, + + routerRegisterAttempt, + sendToSwitchSuccess, + routerRegisterAttempt, + sendToSwitchSuccess, + + // The four shards settle. + getPaymentResultSuccess, + getPaymentResultSuccess, + getPaymentResultSuccess, + getPaymentResultSuccess, + routerSettleAttempt, + routerSettleAttempt, + routerSettleAttempt, + routerSettleAttempt, + + // Overall payment succeeds. + paymentSuccess, + }, + routes: []*route.Route{ + shard, shard, shard, shard, shard, shard, + }, + }, + { + // An MP payment scenario where 3 of the shards fail. + // However the last shard settle, which means we get + // the preimage and should consider the overall payment + // a success. + steps: []string{ + routerInitPayment, + + // shard 0 + routerRegisterAttempt, + sendToSwitchSuccess, + + // shard 1 + routerRegisterAttempt, + sendToSwitchSuccess, + + // shard 2 + routerRegisterAttempt, + sendToSwitchSuccess, + + // shard 3 + routerRegisterAttempt, + sendToSwitchSuccess, + + // 3 shards fail, and should be failed by the + // router. + getPaymentResultTempFailure, + getPaymentResultTempFailure, + getPaymentResultTempFailure, + routerFailAttempt, + routerFailAttempt, + routerFailAttempt, + + // The fourth shard succeed against all odds, + // making the overall payment succeed. + getPaymentResultSuccess, + routerSettleAttempt, + paymentSuccess, + }, + routes: []*route.Route{shard, shard, shard, shard}, + }, + { + // An MP payment scenario a shard fail with a terminal + // error, causing the router to stop attempting. + steps: []string{ + routerInitPayment, + + // shard 0 + routerRegisterAttempt, + sendToSwitchSuccess, + + // shard 1 + routerRegisterAttempt, + sendToSwitchSuccess, + + // shard 2 + routerRegisterAttempt, + sendToSwitchSuccess, + + // shard 3 + routerRegisterAttempt, + sendToSwitchSuccess, + + // The first shard fail with a terminal error. + getPaymentResultTerminalFailure, + routerFailAttempt, + routerFailPayment, + + // Remaining 3 shards fail. + getPaymentResultTempFailure, + getPaymentResultTempFailure, + getPaymentResultTempFailure, + routerFailAttempt, + routerFailAttempt, + routerFailAttempt, + + // Payment fails. + paymentError, + }, + routes: []*route.Route{ + shard, shard, shard, shard, shard, shard, + }, + }, + } + + // Create a mock control tower with channels set up, that we use to + // synchronize and listen for events. + control := makeMockControlTower() + control.init = make(chan initArgs, 20) + control.registerAttempt = make(chan registerAttemptArgs, 20) + control.settleAttempt = make(chan settleAttemptArgs, 20) + control.failAttempt = make(chan failAttemptArgs, 20) + control.failPayment = make(chan failPaymentArgs, 20) + control.fetchInFlight = make(chan struct{}, 20) + + quit := make(chan struct{}) + defer close(quit) + + // setupRouter is a helper method that creates and starts the router in + // the desired configuration for this test. + setupRouter := func() (*ChannelRouter, chan error, + chan *htlcswitch.PaymentResult, chan error) { + + chain := newMockChain(startingBlockHeight) + chainView := newMockChainView(chain) + + // We set uo the use the following channels and a mock Payer to + // synchonize with the interaction to the Switch. + sendResult := make(chan error) + paymentResultErr := make(chan error) + paymentResult := make(chan *htlcswitch.PaymentResult) + + payer := &mockPayer{ + sendResult: sendResult, + paymentResult: paymentResult, + paymentResultErr: paymentResultErr, + } + + router, err := New(Config{ + Graph: testGraph.graph, + Chain: chain, + ChainView: chainView, + Control: control, + SessionSource: &mockPaymentSessionSource{}, + MissionControl: &mockMissionControl{}, + Payer: payer, + ChannelPruneExpiry: time.Hour * 24, + GraphPruneInterval: time.Hour * 2, + QueryBandwidth: func(e *channeldb.ChannelEdgeInfo) lnwire.MilliSatoshi { + return lnwire.NewMSatFromSatoshis(e.Capacity) + }, + NextPaymentID: func() (uint64, error) { + next := atomic.AddUint64(&uniquePaymentID, 1) + return next, nil + }, + Clock: clock.NewTestClock(time.Unix(1, 0)), + }) + if err != nil { + t.Fatalf("unable to create router %v", err) + } + + // On startup, the router should fetch all pending payments + // from the ControlTower, so assert that here. + errCh := make(chan error) + go func() { + close(errCh) + select { + case <-control.fetchInFlight: + return + case <-time.After(1 * time.Second): + errCh <- errors.New("router did not fetch in flight " + + "payments") + } + }() + + if err := router.Start(); err != nil { + t.Fatalf("unable to start router: %v", err) + } + + select { + case err := <-errCh: + if err != nil { + t.Fatalf("error in anonymous goroutine: %s", err) + } + case <-time.After(1 * time.Second): + t.Fatalf("did not fetch in flight payments at startup") + } + + return router, sendResult, paymentResult, paymentResultErr + } + + router, sendResult, getPaymentResult, getPaymentResultErr := setupRouter() + defer func() { + if err := router.Stop(); err != nil { + t.Fatal(err) + } + }() + + for _, test := range tests { + // Craft a LightningPayment struct. + var preImage lntypes.Preimage + if _, err := rand.Read(preImage[:]); err != nil { + t.Fatalf("unable to generate preimage") + } + + payHash := preImage.Hash() + + payment := LightningPayment{ + Target: testGraph.aliasMap["c"], + Amount: paymentAmt, + FeeLimit: noFeeLimit, + PaymentHash: payHash, + } + + router.cfg.SessionSource = &mockPaymentSessionSource{ + routes: test.routes, + } + + router.cfg.MissionControl = &mockMissionControl{} + + // Send the payment. Since this is new payment hash, the + // information should be registered with the ControlTower. + paymentResult := make(chan error) + go func() { + _, _, err := router.SendPayment(&payment) + paymentResult <- err + }() + + var resendResult chan error + for _, step := range test.steps { + switch step { + + case routerInitPayment: + var args initArgs + select { + case args = <-control.init: + case <-time.After(stepTimeout): + t.Fatalf("no init payment with control") + } + + if args.c == nil { + t.Fatalf("expected non-nil CreationInfo") + } + + // In this step we expect the router to make a call to + // register a new attempt with the ControlTower. + case routerRegisterAttempt: + var args registerAttemptArgs + select { + case args = <-control.registerAttempt: + case <-time.After(stepTimeout): + t.Fatalf("attempt not registered " + + "with control") + } + + if args.a == nil { + t.Fatalf("expected non-nil AttemptInfo") + } + + // In this step we expect the router to call the + // ControlTower's SettleAttempt method with the preimage. + case routerSettleAttempt: + select { + case <-control.settleAttempt: + case <-time.After(stepTimeout): + t.Fatalf("attempt settle not " + + "registered with control") + } + + // In this step we expect the router to call the + // ControlTower's FailAttempt method with a HTLC fail + // info. + case routerFailAttempt: + select { + case <-control.failAttempt: + case <-time.After(stepTimeout): + t.Fatalf("attempt fail not " + + "registered with control") + } + + // In this step we expect the router to call the + // ControlTower's Fail method, to indicate that the + // payment failed. + case routerFailPayment: + select { + case <-control.failPayment: + case <-time.After(stepTimeout): + t.Fatalf("payment fail not " + + "registered with control") + } + + // In this step we expect the SendToSwitch method to be + // called, and we respond with a nil-error. + case sendToSwitchSuccess: + select { + case sendResult <- nil: + case <-time.After(stepTimeout): + t.Fatalf("unable to send result") + } + + // In this step we expect the SendToSwitch method to be + // called, and we respond with a forwarding error + case sendToSwitchResultFailure: + select { + case sendResult <- htlcswitch.NewForwardingError( + &lnwire.FailTemporaryChannelFailure{}, + 1, + ): + case <-time.After(stepTimeout): + t.Fatalf("unable to send result") + } + + // In this step we expect the GetPaymentResult method + // to be called, and we respond with the preimage to + // complete the payment. + case getPaymentResultSuccess: + select { + case getPaymentResult <- &htlcswitch.PaymentResult{ + Preimage: preImage, + }: + case <-time.After(stepTimeout): + t.Fatalf("unable to send result") + } + + // In this state we expect the GetPaymentResult method + // to be called, and we respond with a forwarding + // error, indicating that the router should retry. + case getPaymentResultTempFailure: + failure := htlcswitch.NewForwardingError( + &lnwire.FailTemporaryChannelFailure{}, + 1, + ) + + select { + case getPaymentResult <- &htlcswitch.PaymentResult{ + Error: failure, + }: + case <-time.After(stepTimeout): + t.Fatalf("unable to get result") + } + + // In this state we expect the router to call the + // GetPaymentResult method, and we will respond with a + // terminal error, indiating the router should stop + // making payment attempts. + case getPaymentResultTerminalFailure: + failure := htlcswitch.NewForwardingError( + &lnwire.FailIncorrectDetails{}, + 1, + ) + + select { + case getPaymentResult <- &htlcswitch.PaymentResult{ + Error: failure, + }: + case <-time.After(stepTimeout): + t.Fatalf("unable to get result") + } + + // In this step we manually try to resend the same + // payment, making sure the router responds with an + // error indicating that it is already in flight. + case resendPayment: + resendResult = make(chan error) + go func() { + _, _, err := router.SendPayment(&payment) + resendResult <- err + }() + + // In this step we manually stop the router. + case stopRouter: + select { + case getPaymentResultErr <- fmt.Errorf( + "shutting down"): + case <-time.After(stepTimeout): + t.Fatalf("unable to send payment " + + "result error") + } + + if err := router.Stop(); err != nil { + t.Fatalf("unable to restart: %v", err) + } + + // In this step we manually start the router. + case startRouter: + router, sendResult, getPaymentResult, + getPaymentResultErr = setupRouter() + + // In this state we expect to receive an error for the + // original payment made. + case paymentError: + select { + case err := <-paymentResult: + if err == nil { + t.Fatalf("expected error") + } + + case <-time.After(stepTimeout): + t.Fatalf("got no payment result") + } + + // In this state we expect the original payment to + // succeed. + case paymentSuccess: + select { + case err := <-paymentResult: + if err != nil { + t.Fatalf("did not expect "+ + "error %v", err) + } + + case <-time.After(stepTimeout): + t.Fatalf("got no payment result") + } + + // In this state we expect to receive an error for the + // resent payment made. + case resentPaymentError: + select { + case err := <-resendResult: + if err == nil { + t.Fatalf("expected error") + } + + case <-time.After(stepTimeout): + t.Fatalf("got no payment result") + } + + // In this state we expect the resent payment to + // succeed. + case resentPaymentSuccess: + select { + case err := <-resendResult: + if err != nil { + t.Fatalf("did not expect error %v", err) + } + + case <-time.After(stepTimeout): + t.Fatalf("got no payment result") + } + + default: + t.Fatalf("unknown step %v", step) + } + } + } +} diff --git a/routing/payment_session.go b/routing/payment_session.go index 370cc87a6e..ed22b8c631 100644 --- a/routing/payment_session.go +++ b/routing/payment_session.go @@ -3,6 +3,8 @@ package routing import ( "fmt" + "github.com/btcsuite/btclog" + "github.com/lightningnetwork/lnd/build" "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/routing/route" @@ -12,14 +14,96 @@ import ( // to prevent an HTLC being failed if some blocks are mined while it's in-flight. const BlockPadding uint16 = 3 +// noRouteError encodes a non-critical error encountered during path finding. +type noRouteError uint8 + +const ( + // errNoTlvPayload is returned when the destination hop does not support + // a tlv payload. + errNoTlvPayload noRouteError = iota + + // errNoPaymentAddr is returned when the destination hop does not + // support payment addresses. + errNoPaymentAddr + + // errNoPathFound is returned when a path to the target destination does + // not exist in the graph. + errNoPathFound + + // errInsufficientLocalBalance is returned when none of the local + // channels have enough balance for the payment. + errInsufficientBalance + + // errEmptyPaySession is returned when the empty payment session is + // queried for a route. + errEmptyPaySession +) + +var ( + // DefaultShardMinAmt is the default amount beyond which we won't try to + // further split the payment if no route is found. It is the minimum + // amount that we use as the shard size when splitting. + DefaultShardMinAmt = lnwire.NewMSatFromSatoshis(10000) +) + +// Error returns the string representation of the noRouteError +func (e noRouteError) Error() string { + switch e { + case errNoTlvPayload: + return "destination hop doesn't understand new TLV payloads" + + case errNoPaymentAddr: + return "destination hop doesn't understand payment addresses" + + case errNoPathFound: + return "unable to find a path to destination" + + case errEmptyPaySession: + return "empty payment session" + + case errInsufficientBalance: + return "insufficient local balance" + + default: + return "unknown no-route error" + } +} + +// FailureReason converts a path finding error into a payment-level failure. +func (e noRouteError) FailureReason() channeldb.FailureReason { + switch e { + case + errNoTlvPayload, + errNoPaymentAddr, + errNoPathFound, + errEmptyPaySession: + + return channeldb.FailureReasonNoRoute + + case errInsufficientBalance: + return channeldb.FailureReasonInsufficientBalance + + default: + return channeldb.FailureReasonError + } +} + // PaymentSession is used during SendPayment attempts to provide routes to // attempt. It also defines methods to give the PaymentSession additional // information learned during the previous attempts. type PaymentSession interface { // RequestRoute returns the next route to attempt for routing the - // specified HTLC payment to the target node. - RequestRoute(payment *LightningPayment, - height uint32, finalCltvDelta uint16) (*route.Route, error) + // specified HTLC payment to the target node. The returned route should + // carry at most maxAmt to the target node, and pay at most feeLimit in + // fees. It can carry less if the payment is MPP. The activeShards + // argument should be set to instruct the payment session about the + // number of in flight HTLCS for the payment, such that it can choose + // splitting strategy accordingly. + // + // A noRouteError is returned if a non-critical error is encountered + // during path finding. + RequestRoute(maxAmt, feeLimit lnwire.MilliSatoshi, + activeShards, height uint32) (*route.Route, error) } // paymentSession is used during an HTLC routings session to prune the local @@ -35,12 +119,55 @@ type paymentSession struct { getBandwidthHints func() (map[uint64]lnwire.MilliSatoshi, error) - sessionSource *SessionSource + payment *LightningPayment - preBuiltRoute *route.Route - preBuiltRouteTried bool + empty bool pathFinder pathFinder + + getRoutingGraph func() (routingGraph, func(), error) + + // pathFindingConfig defines global parameters that control the + // trade-off in path finding between fees and probabiity. + pathFindingConfig PathFindingConfig + + missionControl MissionController + + // minShardAmt is the amount beyond which we won't try to further split + // the payment if no route is found. If the maximum number of htlcs + // specified in the payment is one, under no circumstances splitting + // will happen and this value remains unused. + minShardAmt lnwire.MilliSatoshi + + // log is a payment session-specific logger. + log btclog.Logger +} + +// newPaymentSession instantiates a new payment session. +func newPaymentSession(p *LightningPayment, + getBandwidthHints func() (map[uint64]lnwire.MilliSatoshi, error), + getRoutingGraph func() (routingGraph, func(), error), + missionControl MissionController, pathFindingConfig PathFindingConfig) ( + *paymentSession, error) { + + edges, err := RouteHintsToEdges(p.RouteHints, p.Target) + if err != nil { + return nil, err + } + + logPrefix := fmt.Sprintf("PaymentSession(%x):", p.PaymentHash) + + return &paymentSession{ + additionalEdges: edges, + getBandwidthHints: getBandwidthHints, + payment: p, + pathFinder: findPath, + getRoutingGraph: getRoutingGraph, + pathFindingConfig: pathFindingConfig, + missionControl: missionControl, + minShardAmt: DefaultShardMinAmt, + log: build.NewPrefixLog(logPrefix, log), + }, nil } // RequestRoute returns a route which is likely to be capable for successfully @@ -52,83 +179,146 @@ type paymentSession struct { // // NOTE: This function is safe for concurrent access. // NOTE: Part of the PaymentSession interface. -func (p *paymentSession) RequestRoute(payment *LightningPayment, - height uint32, finalCltvDelta uint16) (*route.Route, error) { - - switch { - - // If we have a pre-built route, use that directly. - case p.preBuiltRoute != nil && !p.preBuiltRouteTried: - p.preBuiltRouteTried = true +func (p *paymentSession) RequestRoute(maxAmt, feeLimit lnwire.MilliSatoshi, + activeShards, height uint32) (*route.Route, error) { - return p.preBuiltRoute, nil - - // If the pre-built route has been tried already, the payment session is - // over. - case p.preBuiltRoute != nil: - return nil, fmt.Errorf("pre-built route already tried") + if p.empty { + return nil, errEmptyPaySession } // Add BlockPadding to the finalCltvDelta so that the receiving node // does not reject the HTLC if some blocks are mined while it's in-flight. + finalCltvDelta := p.payment.FinalCLTVDelta finalCltvDelta += BlockPadding // We need to subtract the final delta before passing it into path // finding. The optimal path is independent of the final cltv delta and // the path finding algorithm is unaware of this value. - cltvLimit := payment.CltvLimit - uint32(finalCltvDelta) + cltvLimit := p.payment.CltvLimit - uint32(finalCltvDelta) // TODO(roasbeef): sync logic amongst dist sys // Taking into account this prune view, we'll attempt to locate a path // to our destination, respecting the recommendations from // MissionControl. - ss := p.sessionSource - restrictions := &RestrictParams{ - ProbabilitySource: ss.MissionControl.GetProbability, - FeeLimit: payment.FeeLimit, - OutgoingChannelID: payment.OutgoingChannelID, + ProbabilitySource: p.missionControl.GetProbability, + FeeLimit: feeLimit, + OutgoingChannelID: p.payment.OutgoingChannelID, + LastHop: p.payment.LastHop, CltvLimit: cltvLimit, + DestCustomRecords: p.payment.DestCustomRecords, + DestFeatures: p.payment.DestFeatures, + PaymentAddr: p.payment.PaymentAddr, } - // We'll also obtain a set of bandwidthHints from the lower layer for - // each of our outbound channels. This will allow the path finding to - // skip any links that aren't active or just don't have enough bandwidth - // to carry the payment. New bandwidth hints are queried for every new - // path finding attempt, because concurrent payments may change - // balances. - bandwidthHints, err := p.getBandwidthHints() - if err != nil { - return nil, err - } + finalHtlcExpiry := int32(height) + int32(finalCltvDelta) - path, err := p.pathFinder( - &graphParams{ - graph: ss.Graph, - additionalEdges: p.additionalEdges, - bandwidthHints: bandwidthHints, - }, - restrictions, &ss.PathFindingConfig, - ss.SelfNode.PubKeyBytes, payment.Target, - payment.Amount, - ) - if err != nil { - return nil, err - } + for { + // We'll also obtain a set of bandwidthHints from the lower + // layer for each of our outbound channels. This will allow the + // path finding to skip any links that aren't active or just + // don't have enough bandwidth to carry the payment. New + // bandwidth hints are queried for every new path finding + // attempt, because concurrent payments may change balances. + bandwidthHints, err := p.getBandwidthHints() + if err != nil { + return nil, err + } - // With the next candidate path found, we'll attempt to turn this into - // a route by applying the time-lock and fee requirements. - sourceVertex := route.Vertex(ss.SelfNode.PubKeyBytes) - route, err := newRoute( - payment.Amount, sourceVertex, path, height, finalCltvDelta, - payment.FinalDestRecords, - ) - if err != nil { - // TODO(roasbeef): return which edge/vertex didn't work - // out - return nil, err - } + p.log.Debugf("pathfinding for amt=%v", maxAmt) + + // Get a routing graph. + routingGraph, cleanup, err := p.getRoutingGraph() + if err != nil { + return nil, err + } - return route, err + sourceVertex := routingGraph.sourceNode() + + // Find a route for the current amount. + path, err := p.pathFinder( + &graphParams{ + additionalEdges: p.additionalEdges, + bandwidthHints: bandwidthHints, + graph: routingGraph, + }, + restrictions, &p.pathFindingConfig, + sourceVertex, p.payment.Target, + maxAmt, finalHtlcExpiry, + ) + + // Close routing graph. + cleanup() + + switch { + case err == errNoPathFound: + // Don't split if this is a legacy payment without mpp + // record. + if p.payment.PaymentAddr == nil { + p.log.Debugf("not splitting because payment " + + "address is unspecified") + + return nil, errNoPathFound + } + + // No splitting if this is the last shard. + isLastShard := activeShards+1 >= p.payment.MaxParts + if isLastShard { + p.log.Debugf("not splitting because shard "+ + "limit %v has been reached", + p.payment.MaxParts) + + return nil, errNoPathFound + } + + // This is where the magic happens. If we can't find a + // route, try it for half the amount. + maxAmt /= 2 + + // Put a lower bound on the minimum shard size. + if maxAmt < p.minShardAmt { + p.log.Debugf("not splitting because minimum "+ + "shard amount %v has been reached", + p.minShardAmt) + + return nil, errNoPathFound + } + + // Go pathfinding. + continue + + // If there isn't enough local bandwidth, there is no point in + // splitting. It won't be possible to create a complete set in + // any case, but the sent out partial payments would be held by + // the receiver until the mpp timeout. + case err == errInsufficientBalance: + p.log.Debug("not splitting because local balance " + + "is insufficient") + + return nil, err + + case err != nil: + return nil, err + } + + // With the next candidate path found, we'll attempt to turn + // this into a route by applying the time-lock and fee + // requirements. + route, err := newRoute( + sourceVertex, path, height, + finalHopParams{ + amt: maxAmt, + totalAmt: p.payment.Amount, + cltvDelta: finalCltvDelta, + records: p.payment.DestCustomRecords, + paymentAddr: p.payment.PaymentAddr, + }, + ) + if err != nil { + return nil, err + } + + return route, err + } } diff --git a/routing/payment_session_source.go b/routing/payment_session_source.go index b5175cf78f..8122ff7117 100644 --- a/routing/payment_session_source.go +++ b/routing/payment_session_source.go @@ -26,9 +26,6 @@ type SessionSource struct { // the available bandwidth of the link should be returned. QueryBandwidth func(*channeldb.ChannelEdgeInfo) lnwire.MilliSatoshi - // SelfNode is our own node. - SelfNode *channeldb.LightningNode - // MissionControl is a shared memory of sorts that executions of payment // path finding use in order to remember which vertexes/edges were // pruned from prior attempts. During payment execution, errors sent by @@ -43,12 +40,63 @@ type SessionSource struct { PathFindingConfig PathFindingConfig } +// getRoutingGraph returns a routing graph and a clean-up function for +// pathfinding. +func (m *SessionSource) getRoutingGraph() (routingGraph, func(), error) { + routingTx, err := newDbRoutingTx(m.Graph) + if err != nil { + return nil, nil, err + } + return routingTx, func() { + err := routingTx.close() + if err != nil { + log.Errorf("Error closing db tx: %v", err) + } + }, nil +} + // NewPaymentSession creates a new payment session backed by the latest prune // view from Mission Control. An optional set of routing hints can be provided // in order to populate additional edges to explore when finding a path to the // payment's destination. -func (m *SessionSource) NewPaymentSession(routeHints [][]zpay32.HopHint, - target route.Vertex) (PaymentSession, error) { +func (m *SessionSource) NewPaymentSession(p *LightningPayment) ( + PaymentSession, error) { + + sourceNode, err := m.Graph.SourceNode() + if err != nil { + return nil, err + } + + getBandwidthHints := func() (map[uint64]lnwire.MilliSatoshi, + error) { + + return generateBandwidthHints(sourceNode, m.QueryBandwidth) + } + + session, err := newPaymentSession( + p, getBandwidthHints, m.getRoutingGraph, + m.MissionControl, m.PathFindingConfig, + ) + if err != nil { + return nil, err + } + + return session, nil +} + +// NewPaymentSessionEmpty creates a new paymentSession instance that is empty, +// and will be exhausted immediately. Used for failure reporting to +// missioncontrol for resumed payment we don't want to make more attempts for. +func (m *SessionSource) NewPaymentSessionEmpty() PaymentSession { + return &paymentSession{ + empty: true, + } +} + +// RouteHintsToEdges converts a list of invoice route hints to an edge map that +// can be passed into pathfinding. +func RouteHintsToEdges(routeHints [][]zpay32.HopHint, target route.Vertex) ( + map[route.Vertex][]*channeldb.ChannelEdgePolicy, error) { edges := make(map[route.Vertex][]*channeldb.ChannelEdgePolicy) @@ -97,41 +145,5 @@ func (m *SessionSource) NewPaymentSession(routeHints [][]zpay32.HopHint, } } - sourceNode, err := m.Graph.SourceNode() - if err != nil { - return nil, err - } - - getBandwidthHints := func() (map[uint64]lnwire.MilliSatoshi, - error) { - - return generateBandwidthHints(sourceNode, m.QueryBandwidth) - } - - return &paymentSession{ - additionalEdges: edges, - getBandwidthHints: getBandwidthHints, - sessionSource: m, - pathFinder: findPath, - }, nil -} - -// NewPaymentSessionForRoute creates a new paymentSession instance that is just -// used for failure reporting to missioncontrol. -func (m *SessionSource) NewPaymentSessionForRoute(preBuiltRoute *route.Route) PaymentSession { - return &paymentSession{ - sessionSource: m, - preBuiltRoute: preBuiltRoute, - } -} - -// NewPaymentSessionEmpty creates a new paymentSession instance that is empty, -// and will be exhausted immediately. Used for failure reporting to -// missioncontrol for resumed payment we don't want to make more attempts for. -func (m *SessionSource) NewPaymentSessionEmpty() PaymentSession { - return &paymentSession{ - sessionSource: m, - preBuiltRoute: &route.Route{}, - preBuiltRouteTried: true, - } + return edges, nil } diff --git a/routing/payment_session_test.go b/routing/payment_session_test.go index 14f98449a3..ba55fcea6e 100644 --- a/routing/payment_session_test.go +++ b/routing/payment_session_test.go @@ -13,10 +13,38 @@ func TestRequestRoute(t *testing.T) { height = 10 ) - findPath := func(g *graphParams, r *RestrictParams, - cfg *PathFindingConfig, source, target route.Vertex, - amt lnwire.MilliSatoshi) ([]*channeldb.ChannelEdgePolicy, - error) { + cltvLimit := uint32(30) + finalCltvDelta := uint16(8) + + payment := &LightningPayment{ + CltvLimit: cltvLimit, + FinalCLTVDelta: finalCltvDelta, + Amount: 1000, + FeeLimit: 1000, + } + + session, err := newPaymentSession( + payment, + func() (map[uint64]lnwire.MilliSatoshi, + error) { + + return nil, nil + }, + func() (routingGraph, func(), error) { + return &sessionGraph{}, func() {}, nil + }, + &MissionControl{cfg: &MissionControlConfig{}}, + PathFindingConfig{}, + ) + if err != nil { + t.Fatal(err) + } + + // Override pathfinder with a mock. + session.pathFinder = func( + g *graphParams, r *RestrictParams, cfg *PathFindingConfig, + source, target route.Vertex, amt lnwire.MilliSatoshi, + finalHtlcExpiry int32) ([]*channeldb.ChannelEdgePolicy, error) { // We expect find path to receive a cltv limit excluding the // final cltv delta (including the block padding). @@ -37,32 +65,9 @@ func TestRequestRoute(t *testing.T) { return path, nil } - sessionSource := &SessionSource{ - SelfNode: &channeldb.LightningNode{}, - MissionControl: &MissionControl{ - cfg: &MissionControlConfig{}, - }, - } - - session := &paymentSession{ - getBandwidthHints: func() (map[uint64]lnwire.MilliSatoshi, - error) { - - return nil, nil - }, - sessionSource: sessionSource, - pathFinder: findPath, - } - - cltvLimit := uint32(30) - finalCltvDelta := uint16(8) - - payment := &LightningPayment{ - CltvLimit: cltvLimit, - FinalCLTVDelta: finalCltvDelta, - } - - route, err := session.RequestRoute(payment, height, finalCltvDelta) + route, err := session.RequestRoute( + payment.Amount, payment.FeeLimit, 0, height, + ) if err != nil { t.Fatal(err) } @@ -74,3 +79,11 @@ func TestRequestRoute(t *testing.T) { route.TotalTimeLock) } } + +type sessionGraph struct { + routingGraph +} + +func (g *sessionGraph) sourceNode() route.Vertex { + return route.Vertex{} +} diff --git a/routing/probability_estimator.go b/routing/probability_estimator.go new file mode 100644 index 0000000000..238a765f11 --- /dev/null +++ b/routing/probability_estimator.go @@ -0,0 +1,185 @@ +package routing + +import ( + "math" + "time" + + "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/routing/route" +) + +// probabilityEstimator returns node and pair probabilities based on historical +// payment results. +type probabilityEstimator struct { + // penaltyHalfLife defines after how much time a penalized node or + // channel is back at 50% probability. + penaltyHalfLife time.Duration + + // aprioriHopProbability is the assumed success probability of a hop in + // a route when no other information is available. + aprioriHopProbability float64 + + // aprioriWeight is a value in the range [0, 1] that defines to what + // extent historical results should be extrapolated to untried + // connections. Setting it to one will completely ignore historical + // results and always assume the configured a priori probability for + // untried connections. A value of zero will ignore the a priori + // probability completely and only base the probability on historical + // results, unless there are none available. + aprioriWeight float64 + + // prevSuccessProbability is the assumed probability for node pairs that + // successfully relayed the previous attempt. + prevSuccessProbability float64 +} + +// getNodeProbability calculates the probability for connections from a node +// that have not been tried before. The results parameter is a list of last +// payment results for that node. +func (p *probabilityEstimator) getNodeProbability(now time.Time, + results NodeResults, amt lnwire.MilliSatoshi) float64 { + + // If the channel history is not to be taken into account, we can return + // early here with the configured a priori probability. + if p.aprioriWeight == 1 { + return p.aprioriHopProbability + } + + // If there is no channel history, our best estimate is still the a + // priori probability. + if len(results) == 0 { + return p.aprioriHopProbability + } + + // The value of the apriori weight is in the range [0, 1]. Convert it to + // a factor that properly expresses the intention of the weight in the + // following weight average calculation. When the apriori weight is 0, + // the apriori factor is also 0. This means it won't have any effect on + // the weighted average calculation below. When the apriori weight + // approaches 1, the apriori factor goes to infinity. It will heavily + // outweigh any observations that have been collected. + aprioriFactor := 1/(1-p.aprioriWeight) - 1 + + // Calculate a weighted average consisting of the apriori probability + // and historical observations. This is the part that incentivizes nodes + // to make sure that all (not just some) of their channels are in good + // shape. Senders will steer around nodes that have shown a few + // failures, even though there may be many channels still untried. + // + // If there is just a single observation and the apriori weight is 0, + // this single observation will totally determine the node probability. + // The node probability is returned for all other channels of the node. + // This means that one failure will lead to the success probability + // estimates for all other channels being 0 too. The probability for the + // channel that was tried will not even recover, because it is + // recovering to the node probability (which is zero). So one failure + // effectively prunes all channels of the node forever. This is the most + // aggressive way in which we can penalize nodes and unlikely to yield + // good results in a real network. + probabilitiesTotal := p.aprioriHopProbability * aprioriFactor + totalWeight := aprioriFactor + + for _, result := range results { + switch { + + // Weigh success with a constant high weight of 1. There is no + // decay. Amt is never zero, so this clause is never executed + // when result.SuccessAmt is zero. + case amt <= result.SuccessAmt: + totalWeight++ + probabilitiesTotal += p.prevSuccessProbability + + // Weigh failures in accordance with their age. The base + // probability of a failure is considered zero, so nothing needs + // to be added to probabilitiesTotal. + case !result.FailTime.IsZero() && amt >= result.FailAmt: + age := now.Sub(result.FailTime) + totalWeight += p.getWeight(age) + } + } + + return probabilitiesTotal / totalWeight +} + +// getWeight calculates a weight in the range [0, 1] that should be assigned to +// a payment result. Weight follows an exponential curve that starts at 1 when +// the result is fresh and asymptotically approaches zero over time. The rate at +// which this happens is controlled by the penaltyHalfLife parameter. +func (p *probabilityEstimator) getWeight(age time.Duration) float64 { + exp := -age.Hours() / p.penaltyHalfLife.Hours() + return math.Pow(2, exp) +} + +// getPairProbability estimates the probability of successfully traversing to +// toNode based on historical payment outcomes for the from node. Those outcomes +// are passed in via the results parameter. +func (p *probabilityEstimator) getPairProbability( + now time.Time, results NodeResults, + toNode route.Vertex, amt lnwire.MilliSatoshi) float64 { + + nodeProbability := p.getNodeProbability(now, results, amt) + + return p.calculateProbability( + now, results, nodeProbability, toNode, amt, + ) +} + +// getLocalPairProbability estimates the probability of successfully traversing +// our own local channels to toNode. +func (p *probabilityEstimator) getLocalPairProbability( + now time.Time, results NodeResults, toNode route.Vertex) float64 { + + // For local channels that have never been tried before, we assume them + // to be successful. We have accurate balance and online status + // information on our own channels, so when we select them in a route it + // is close to certain that those channels will work. + nodeProbability := p.prevSuccessProbability + + return p.calculateProbability( + now, results, nodeProbability, toNode, lnwire.MaxMilliSatoshi, + ) +} + +// calculateProbability estimates the probability of successfully traversing to +// toNode based on historical payment outcomes and a fall-back node probability. +func (p *probabilityEstimator) calculateProbability( + now time.Time, results NodeResults, + nodeProbability float64, toNode route.Vertex, + amt lnwire.MilliSatoshi) float64 { + + // Retrieve the last pair outcome. + lastPairResult, ok := results[toNode] + + // If there is no history for this pair, return the node probability + // that is a probability estimate for untried channel. + if !ok { + return nodeProbability + } + + // For successes, we have a fixed (high) probability. Those pairs will + // be assumed good until proven otherwise. Amt is never zero, so this + // clause is never executed when lastPairResult.SuccessAmt is zero. + if amt <= lastPairResult.SuccessAmt { + return p.prevSuccessProbability + } + + // Take into account a minimum penalize amount. For balance errors, a + // failure may be reported with such a minimum to prevent too aggressive + // penalization. If the current amount is smaller than the amount that + // previously triggered a failure, we act as if this is an untried + // channel. + if lastPairResult.FailTime.IsZero() || amt < lastPairResult.FailAmt { + return nodeProbability + } + + timeSinceLastFailure := now.Sub(lastPairResult.FailTime) + + // Calculate success probability based on the weight of the last + // failure. When the failure is fresh, its weight is 1 and we'll return + // probability 0. Over time the probability recovers to the node + // probability. It would be as if this channel was never tried before. + weight := p.getWeight(timeSinceLastFailure) + probability := nodeProbability * (1 - weight) + + return probability +} diff --git a/routing/probability_estimator_test.go b/routing/probability_estimator_test.go new file mode 100644 index 0000000000..384b39b63a --- /dev/null +++ b/routing/probability_estimator_test.go @@ -0,0 +1,161 @@ +package routing + +import ( + "testing" + "time" + + "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/routing/route" +) + +const ( + // Define node identifiers + node1 = 1 + node2 = 2 + node3 = 3 + + // untriedNode is a node id for which we don't record any results in + // this test. This can be used to assert the probability for untried + // ndoes. + untriedNode = 255 + + // Define test estimator parameters. + aprioriHopProb = 0.6 + aprioriWeight = 0.75 + aprioriPrevSucProb = 0.95 +) + +type estimatorTestContext struct { + t *testing.T + estimator *probabilityEstimator + + // results contains a list of last results. Every element in the list + // corresponds to the last result towards a node. The list index equals + // the node id. So the first element in the list is the result towards + // node 0. + results map[int]TimedPairResult +} + +func newEstimatorTestContext(t *testing.T) *estimatorTestContext { + return &estimatorTestContext{ + t: t, + estimator: &probabilityEstimator{ + aprioriHopProbability: aprioriHopProb, + aprioriWeight: aprioriWeight, + penaltyHalfLife: time.Hour, + prevSuccessProbability: aprioriPrevSucProb, + }, + } +} + +// assertPairProbability asserts that the calculated success probability is +// correct. +func (c *estimatorTestContext) assertPairProbability(now time.Time, + toNode byte, amt lnwire.MilliSatoshi, expectedProb float64) { + + c.t.Helper() + + results := make(NodeResults) + for i, r := range c.results { + results[route.Vertex{byte(i)}] = r + } + + const tolerance = 0.01 + + p := c.estimator.getPairProbability(now, results, route.Vertex{toNode}, amt) + diff := p - expectedProb + if diff > tolerance || diff < -tolerance { + c.t.Fatalf("expected probability %v for node %v, but got %v", + expectedProb, toNode, p) + } +} + +// TestProbabilityEstimatorNoResults tests the probability estimation when no +// results are available. +func TestProbabilityEstimatorNoResults(t *testing.T) { + ctx := newEstimatorTestContext(t) + + ctx.assertPairProbability(testTime, 0, 0, aprioriHopProb) +} + +// TestProbabilityEstimatorOneSuccess tests the probability estimation for nodes +// that have a single success result. +func TestProbabilityEstimatorOneSuccess(t *testing.T) { + ctx := newEstimatorTestContext(t) + + ctx.results = map[int]TimedPairResult{ + node1: { + SuccessAmt: lnwire.MilliSatoshi(1000), + }, + } + + // Because of the previous success, this channel keep reporting a high + // probability. + ctx.assertPairProbability( + testTime, node1, 100, aprioriPrevSucProb, + ) + + // Untried channels are also influenced by the success. With a + // aprioriWeight of 0.75, the a priori probability is assigned weight 3. + expectedP := (3*aprioriHopProb + 1*aprioriPrevSucProb) / 4 + ctx.assertPairProbability(testTime, untriedNode, 100, expectedP) +} + +// TestProbabilityEstimatorOneFailure tests the probability estimation for nodes +// that have a single failure. +func TestProbabilityEstimatorOneFailure(t *testing.T) { + ctx := newEstimatorTestContext(t) + + ctx.results = map[int]TimedPairResult{ + node1: { + FailTime: testTime.Add(-time.Hour), + FailAmt: lnwire.MilliSatoshi(50), + }, + } + + // For an untried node, we expected the node probability. The weight for + // the failure after one hour is 0.5. This makes the node probability + // 0.51: + expectedNodeProb := (3*aprioriHopProb + 0.5*0) / 3.5 + ctx.assertPairProbability(testTime, untriedNode, 100, expectedNodeProb) + + // The pair probability decays back to the node probability. With the + // weight at 0.5, we expected a pair probability of 0.5 * 0.51 = 0.25. + ctx.assertPairProbability(testTime, node1, 100, expectedNodeProb/2) +} + +// TestProbabilityEstimatorMix tests the probability estimation for nodes for +// which a mix of successes and failures is recorded. +func TestProbabilityEstimatorMix(t *testing.T) { + ctx := newEstimatorTestContext(t) + + ctx.results = map[int]TimedPairResult{ + node1: { + SuccessAmt: lnwire.MilliSatoshi(1000), + }, + node2: { + FailTime: testTime.Add(-2 * time.Hour), + FailAmt: lnwire.MilliSatoshi(50), + }, + node3: { + FailTime: testTime.Add(-3 * time.Hour), + FailAmt: lnwire.MilliSatoshi(50), + }, + } + + // We expect the probability for a previously successful channel to + // remain high. + ctx.assertPairProbability(testTime, node1, 100, prevSuccessProbability) + + // For an untried node, we expected the node probability to be returned. + // This is a weighted average of the results above and the a priori + // probability: 0.62. + expectedNodeProb := (3*aprioriHopProb + 1*prevSuccessProbability) / + (3 + 1 + 0.25 + 0.125) + + ctx.assertPairProbability(testTime, untriedNode, 100, expectedNodeProb) + + // For the previously failed connection with node 1, we expect 0.75 * + // the node probability = 0.47. + ctx.assertPairProbability(testTime, node2, 100, expectedNodeProb*0.75) +} diff --git a/routing/result_interpretation.go b/routing/result_interpretation.go index 74c740149b..70cc08f6a0 100644 --- a/routing/result_interpretation.go +++ b/routing/result_interpretation.go @@ -11,28 +11,46 @@ import ( // Instantiate variables to allow taking a reference from the failure reason. var ( reasonError = channeldb.FailureReasonError - reasonIncorrectDetails = channeldb.FailureReasonIncorrectPaymentDetails + reasonIncorrectDetails = channeldb.FailureReasonPaymentDetails ) // pairResult contains the result of the interpretation of a payment attempt for // a specific node pair. type pairResult struct { - // minPenalizeAmt is the minimum amount for which a penalty should be - // applied based on this result. Only applies to fail results. - minPenalizeAmt lnwire.MilliSatoshi + // amt is the amount that was forwarded for this pair. Can be set to + // zero for failures that are amount independent. + amt lnwire.MilliSatoshi // success indicates whether the payment attempt was successful through // this pair. success bool } +// failPairResult creates a new result struct for a failure. +func failPairResult(minPenalizeAmt lnwire.MilliSatoshi) pairResult { + return pairResult{ + amt: minPenalizeAmt, + } +} + +// newSuccessPairResult creates a new result struct for a success. +func successPairResult(successAmt lnwire.MilliSatoshi) pairResult { + return pairResult{ + success: true, + amt: successAmt, + } +} + // String returns the human-readable representation of a pair result. func (p pairResult) String() string { + var resultType string if p.success { - return "success" + resultType = "success" + } else { + resultType = "failed" } - return fmt.Sprintf("failed (minPenalizeAmt=%v)", p.minPenalizeAmt) + return fmt.Sprintf("%v (amt=%v)", resultType, p.amt) } // interpretedResult contains the result of the interpretation of a payment @@ -173,10 +191,8 @@ func (i *interpretedResult) processPaymentOutcomeFinal( i.failPair(route, n-1) // The other hops relayed corectly, so assign those pairs a - // success result. - if n > 2 { - i.successPairRange(route, 0, n-2) - } + // success result. At this point, n >= 2. + i.successPairRange(route, 0, n-2) // We are using wrong payment hash or amount, fail the payment. case *lnwire.FailIncorrectPaymentAmount, @@ -198,6 +214,11 @@ func (i *interpretedResult) processPaymentOutcomeFinal( // deliberately. What to penalize? i.finalFailureReason = &reasonIncorrectDetails + case *lnwire.FailMPPTimeout: + // Assign all pairs a success result, as the payment reached the + // destination correctly. Continue the payment process. + i.successPairRange(route, 0, n-1) + default: // All other errors are considered terminal if coming from the // final hop. They indicate that something is wrong at the @@ -205,7 +226,7 @@ func (i *interpretedResult) processPaymentOutcomeFinal( i.failNode(route, n) // Other channels in the route forwarded correctly. - if n > 2 { + if n >= 2 { i.successPairRange(route, 0, n-2) } @@ -251,7 +272,17 @@ func (i *interpretedResult) processPaymentOutcomeIntermediate( // All nodes up to the failing pair must have forwarded // successfully. - if errorSourceIdx > 2 { + if errorSourceIdx > 1 { + i.successPairRange(route, 0, errorSourceIdx-2) + } + } + + reportNode := func() { + // Fail only the node that reported the failure. + i.failNode(route, errorSourceIdx) + + // Other preceding channels in the route forwarded correctly. + if errorSourceIdx > 1 { i.successPairRange(route, 0, errorSourceIdx-2) } } @@ -288,6 +319,14 @@ func (i *interpretedResult) processPaymentOutcomeIntermediate( reportOutgoing() + // If InvalidOnionPayload is received, we penalize only the reporting + // node. We know the preceding hop didn't corrupt the onion, since the + // reporting node is able to send the failure. We assume that we + // constructed a valid onion payload and that the failure is most likely + // an unknown required type or a bug in their implementation. + case *lnwire.InvalidOnionPayload: + reportNode() + // If the next hop in the route wasn't known or offline, we'll only // penalize the channel set which we attempted to route over. This is // conservative, and it can handle faulty channels between nodes @@ -364,10 +403,33 @@ func (i *interpretedResult) processPaymentOutcomeUnknown(route *route.Route) { i.failPairRange(route, 0, n-1) } -// failNode marks the node indicated by idx in the route as failed. This -// function intentionally panics when the self node is failed. +// failNode marks the node indicated by idx in the route as failed. It also +// marks the incoming and outgoing channels of the node as failed. This function +// intentionally panics when the self node is failed. func (i *interpretedResult) failNode(rt *route.Route, idx int) { + // Mark the node as failing. i.nodeFailure = &rt.Hops[idx-1].PubKeyBytes + + // Mark the incoming connection as failed for the node. We intent to + // penalize as much as we can for a node level failure, including future + // outgoing traffic for this connection. The pair as it is returned by + // getPair is penalized in the original and the reversed direction. Note + // that this will also affect the score of the failing node's peers. + // This is necessary to prevent future routes from keep going into the + // same node again. + incomingChannelIdx := idx - 1 + inPair, _ := getPair(rt, incomingChannelIdx) + i.pairResults[inPair] = failPairResult(0) + i.pairResults[inPair.Reverse()] = failPairResult(0) + + // If not the ultimate node, mark the outgoing connection as failed for + // the node. + if idx < len(rt.Hops) { + outgoingChannelIdx := idx + outPair, _ := getPair(rt, outgoingChannelIdx) + i.pairResults[outPair] = failPairResult(0) + i.pairResults[outPair.Reverse()] = failPairResult(0) + } } // failPairRange marks the node pairs from node fromIdx to node toIdx as failed @@ -387,8 +449,8 @@ func (i *interpretedResult) failPair( pair, _ := getPair(rt, idx) // Report pair in both directions without a minimum penalization amount. - i.pairResults[pair] = pairResult{} - i.pairResults[pair.Reverse()] = pairResult{} + i.pairResults[pair] = failPairResult(0) + i.pairResults[pair.Reverse()] = failPairResult(0) } // failPairBalance marks a pair as failed with a minimum penalization amount. @@ -397,9 +459,7 @@ func (i *interpretedResult) failPairBalance( pair, amt := getPair(rt, channelIdx) - i.pairResults[pair] = pairResult{ - minPenalizeAmt: amt, - } + i.pairResults[pair] = failPairResult(amt) } // successPairRange marks the node pairs from node fromIdx to node toIdx as @@ -408,11 +468,9 @@ func (i *interpretedResult) successPairRange( rt *route.Route, fromIdx, toIdx int) { for idx := fromIdx; idx <= toIdx; idx++ { - pair, _ := getPair(rt, idx) + pair, amt := getPair(rt, idx) - i.pairResults[pair] = pairResult{ - success: true, - } + i.pairResults[pair] = successPairResult(amt) } } diff --git a/routing/result_interpretation_test.go b/routing/result_interpretation_test.go index 8c4498b064..2fc0a7d90a 100644 --- a/routing/result_interpretation_test.go +++ b/routing/result_interpretation_test.go @@ -4,6 +4,7 @@ import ( "reflect" "testing" + "github.com/davecgh/go-spew/spew" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/routing/route" @@ -31,6 +32,16 @@ var ( }, } + routeThreeHop = route.Route{ + SourcePubKey: hops[0], + TotalAmount: 100, + Hops: []*route.Hop{ + {PubKeyBytes: hops[1], AmtToForward: 99}, + {PubKeyBytes: hops[2], AmtToForward: 97}, + {PubKeyBytes: hops[3], AmtToForward: 94}, + }, + } + routeFourHop = route.Route{ SourcePubKey: hops[0], TotalAmount: 100, @@ -47,6 +58,11 @@ func getTestPair(from, to int) DirectedNodePair { return NewDirectedNodePair(hops[from], hops[to]) } +func getPolicyFailure(from, to int) *DirectedNodePair { + pair := getTestPair(from, to) + return &pair +} + type resultTestCase struct { name string route *route.Route @@ -68,12 +84,8 @@ var resultTestCases = []resultTestCase{ expectedResult: &interpretedResult{ pairResults: map[DirectedNodePair]pairResult{ - getTestPair(0, 1): { - success: true, - }, - getTestPair(1, 2): { - minPenalizeAmt: 99, - }, + getTestPair(0, 1): successPairResult(100), + getTestPair(1, 2): failPairResult(99), }, }, }, @@ -87,12 +99,12 @@ var resultTestCases = []resultTestCase{ expectedResult: &interpretedResult{ pairResults: map[DirectedNodePair]pairResult{ - getTestPair(0, 1): {}, - getTestPair(1, 0): {}, - getTestPair(1, 2): {}, - getTestPair(2, 1): {}, - getTestPair(2, 3): {}, - getTestPair(3, 2): {}, + getTestPair(0, 1): failPairResult(0), + getTestPair(1, 0): failPairResult(0), + getTestPair(1, 2): failPairResult(0), + getTestPair(2, 1): failPairResult(0), + getTestPair(2, 3): failPairResult(0), + getTestPair(3, 2): failPairResult(0), }, }, }, @@ -107,12 +119,8 @@ var resultTestCases = []resultTestCase{ expectedResult: &interpretedResult{ pairResults: map[DirectedNodePair]pairResult{ - getTestPair(0, 1): { - success: true, - }, - getTestPair(1, 2): { - success: true, - }, + getTestPair(0, 1): successPairResult(100), + getTestPair(1, 2): successPairResult(99), }, finalFailureReason: &reasonIncorrectDetails, }, @@ -126,9 +134,7 @@ var resultTestCases = []resultTestCase{ expectedResult: &interpretedResult{ pairResults: map[DirectedNodePair]pairResult{ - getTestPair(0, 1): { - success: true, - }, + getTestPair(0, 1): successPairResult(100), }, }, }, @@ -141,12 +147,8 @@ var resultTestCases = []resultTestCase{ expectedResult: &interpretedResult{ pairResults: map[DirectedNodePair]pairResult{ - getTestPair(0, 1): { - success: true, - }, - getTestPair(1, 2): { - success: true, - }, + getTestPair(0, 1): successPairResult(100), + getTestPair(1, 2): successPairResult(99), }, }, }, @@ -160,6 +162,12 @@ var resultTestCases = []resultTestCase{ expectedResult: &interpretedResult{ nodeFailure: &hops[1], + pairResults: map[DirectedNodePair]pairResult{ + getTestPair(1, 0): failPairResult(0), + getTestPair(1, 2): failPairResult(0), + getTestPair(0, 1): failPairResult(0), + getTestPair(2, 1): failPairResult(0), + }, }, }, @@ -174,6 +182,171 @@ var resultTestCases = []resultTestCase{ expectedResult: &interpretedResult{ finalFailureReason: &reasonError, nodeFailure: &hops[1], + pairResults: map[DirectedNodePair]pairResult{ + getTestPair(1, 0): failPairResult(0), + getTestPair(0, 1): failPairResult(0), + }, + }, + }, + + // Tests that a fee insufficient failure to an intermediate hop with + // index 2 results in the first hop marked as success, and then a + // bidirectional failure for the incoming channel. It should also result + // in a policy failure for the outgoing hop. + { + name: "fail fee insufficient intermediate", + route: &routeFourHop, + failureSrcIdx: 2, + failure: lnwire.NewFeeInsufficient(0, lnwire.ChannelUpdate{}), + + expectedResult: &interpretedResult{ + pairResults: map[DirectedNodePair]pairResult{ + getTestPair(0, 1): { + success: true, + amt: 100, + }, + getTestPair(1, 2): {}, + getTestPair(2, 1): {}, + }, + policyFailure: getPolicyFailure(2, 3), + }, + }, + + // Tests an invalid onion payload from a final hop. The final hop should + // be failed while the proceeding hops are reproed as successes. The + // failure is terminal since the receiver can't process our onion. + { + name: "fail invalid onion payload final hop four", + route: &routeFourHop, + failureSrcIdx: 4, + failure: lnwire.NewInvalidOnionPayload(0, 0), + + expectedResult: &interpretedResult{ + pairResults: map[DirectedNodePair]pairResult{ + getTestPair(0, 1): { + success: true, + amt: 100, + }, + getTestPair(1, 2): { + success: true, + amt: 99, + }, + getTestPair(2, 3): { + success: true, + amt: 97, + }, + getTestPair(4, 3): {}, + getTestPair(3, 4): {}, + }, + finalFailureReason: &reasonError, + nodeFailure: &hops[4], + }, + }, + + // Tests an invalid onion payload from a final hop on a three hop route. + { + name: "fail invalid onion payload final hop three", + route: &routeThreeHop, + failureSrcIdx: 3, + failure: lnwire.NewInvalidOnionPayload(0, 0), + + expectedResult: &interpretedResult{ + pairResults: map[DirectedNodePair]pairResult{ + getTestPair(0, 1): { + success: true, + amt: 100, + }, + getTestPair(1, 2): { + success: true, + amt: 99, + }, + getTestPair(3, 2): {}, + getTestPair(2, 3): {}, + }, + finalFailureReason: &reasonError, + nodeFailure: &hops[3], + }, + }, + + // Tests an invalid onion payload from an intermediate hop. Only the + // reporting node should be failed. The failure is non-terminal since we + // can still try other paths. + { + name: "fail invalid onion payload intermediate", + route: &routeFourHop, + failureSrcIdx: 3, + failure: lnwire.NewInvalidOnionPayload(0, 0), + + expectedResult: &interpretedResult{ + pairResults: map[DirectedNodePair]pairResult{ + getTestPair(0, 1): { + success: true, + amt: 100, + }, + getTestPair(1, 2): { + success: true, + amt: 99, + }, + getTestPair(3, 2): {}, + getTestPair(3, 4): {}, + getTestPair(2, 3): {}, + getTestPair(4, 3): {}, + }, + nodeFailure: &hops[3], + }, + }, + + // Tests an invalid onion payload in a direct peer that is also the + // final hop. The final node should be failed and the error is terminal + // since the remote node can't process our onion. + { + name: "fail invalid onion payload direct", + route: &routeOneHop, + failureSrcIdx: 1, + failure: lnwire.NewInvalidOnionPayload(0, 0), + + expectedResult: &interpretedResult{ + pairResults: map[DirectedNodePair]pairResult{ + getTestPair(1, 0): {}, + getTestPair(0, 1): {}, + }, + finalFailureReason: &reasonError, + nodeFailure: &hops[1], + }, + }, + + // Tests a single hop mpp timeout. Test that final node is not + // penalized. This is a temporary measure while we decide how to + // penalize mpp timeouts. + { + name: "one hop mpp timeout", + route: &routeOneHop, + failureSrcIdx: 1, + failure: &lnwire.FailMPPTimeout{}, + + expectedResult: &interpretedResult{ + pairResults: map[DirectedNodePair]pairResult{ + getTestPair(0, 1): successPairResult(100), + }, + nodeFailure: nil, + }, + }, + + // Tests a two hop mpp timeout. Test that final node is not penalized + // and the intermediate hop is attributed the success. This is a + // temporary measure while we decide how to penalize mpp timeouts. + { + name: "two hop mpp timeout", + route: &routeTwoHop, + failureSrcIdx: 2, + failure: &lnwire.FailMPPTimeout{}, + + expectedResult: &interpretedResult{ + pairResults: map[DirectedNodePair]pairResult{ + getTestPair(0, 1): successPairResult(100), + getTestPair(1, 2): successPairResult(99), + }, + nodeFailure: nil, }, }, } @@ -199,7 +372,8 @@ func TestResultInterpretation(t *testing.T) { } if !reflect.DeepEqual(i, expected) { - t.Fatal("unexpected result") + t.Fatalf("unexpected result\nwant: %v\ngot: %v", + spew.Sdump(expected), spew.Sdump(i)) } }) } diff --git a/routing/route/route.go b/routing/route/route.go index ed51d7d022..63944af18a 100644 --- a/routing/route/route.go +++ b/routing/route/route.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/binary" "encoding/hex" + "errors" "fmt" "io" "strconv" @@ -19,9 +20,25 @@ import ( // VertexSize is the size of the array to store a vertex. const VertexSize = 33 -// ErrNoRouteHopsProvided is returned when a caller attempts to construct a new -// sphinx packet, but provides an empty set of hops for each route. -var ErrNoRouteHopsProvided = fmt.Errorf("empty route hops provided") +var ( + // ErrNoRouteHopsProvided is returned when a caller attempts to + // construct a new sphinx packet, but provides an empty set of hops for + // each route. + ErrNoRouteHopsProvided = fmt.Errorf("empty route hops provided") + + // ErrMaxRouteHopsExceeded is returned when a caller attempts to + // construct a new sphinx packet, but provides too many hops. + ErrMaxRouteHopsExceeded = fmt.Errorf("route has too many hops") + + // ErrIntermediateMPPHop is returned when a hop tries to deliver an MPP + // record to an intermediate hop, only final hops can receive MPP + // records. + ErrIntermediateMPPHop = errors.New("cannot send MPP to intermediate") + + // ErrAMPMissingMPP is returned when the caller tries to attach an AMP + // record but no MPP record is presented for the final hop. + ErrAMPMissingMPP = errors.New("cannot send AMP without MPP record") +) // Vertex is a simple alias for the serialization of a compressed Bitcoin // public key. @@ -94,9 +111,17 @@ type Hop struct { // carries as a fee will be subtracted by the hop. AmtToForward lnwire.MilliSatoshi - // TLVRecords if non-nil are a set of additional TLV records that + // MPP encapsulates the data required for option_mpp. This field should + // only be set for the final hop. + MPP *record.MPP + + // AMP encapsulates the data required for option_amp. This field should + // only be set for the final hop. + AMP *record.AMP + + // CustomRecords if non-nil are a set of additional TLV records that // should be included in the forwarding instructions for this node. - TLVRecords []tlv.Record + CustomRecords record.CustomSet // LegacyPayload if true, then this signals that this node doesn't // understand the new TLV payload, so we must instead use the legacy @@ -104,6 +129,23 @@ type Hop struct { LegacyPayload bool } +// Copy returns a deep copy of the Hop. +func (h *Hop) Copy() *Hop { + c := *h + + if h.MPP != nil { + m := *h.MPP + c.MPP = &m + } + + if h.AMP != nil { + a := *h.AMP + c.AMP = &a + } + + return &c +} + // PackHopPayload writes to the passed io.Writer, the series of byes that can // be placed directly into the per-hop payload (EOB) for this hop. This will // include the required routing fields, as well as serializing any of the @@ -140,8 +182,32 @@ func (h *Hop) PackHopPayload(w io.Writer, nextChanID uint64) error { ) } + // If an MPP record is destined for this hop, ensure that we only ever + // attach it to the final hop. Otherwise the route was constructed + // incorrectly. + if h.MPP != nil { + if nextChanID == 0 { + records = append(records, h.MPP.Record()) + } else { + return ErrIntermediateMPPHop + } + } + + // If an AMP record is destined for this hop, ensure that we only ever + // attach it if we also have an MPP record. We can infer that this is + // already a final hop if MPP is non-nil otherwise we would have exited + // above. + if h.AMP != nil { + if h.MPP != nil { + records = append(records, h.AMP.Record()) + } else { + return ErrAMPMissingMPP + } + } + // Append any custom types destined for this hop. - records = append(records, h.TLVRecords...) + tlvRecords := tlv.MapToRecords(h.CustomRecords) + records = append(records, tlvRecords...) // To ensure we produce a canonical stream, we'll sort the records // before encoding them as a stream in the hop payload. @@ -155,6 +221,58 @@ func (h *Hop) PackHopPayload(w io.Writer, nextChanID uint64) error { return tlvStream.Encode(w) } +// Size returns the total size this hop's payload would take up in the onion +// packet. +func (h *Hop) PayloadSize(nextChanID uint64) uint64 { + if h.LegacyPayload { + return sphinx.LegacyHopDataSize + } + + var payloadSize uint64 + + addRecord := func(tlvType tlv.Type, length uint64) { + payloadSize += tlv.VarIntSize(uint64(tlvType)) + + tlv.VarIntSize(length) + length + } + + // Add amount size. + addRecord(record.AmtOnionType, tlv.SizeTUint64(uint64(h.AmtToForward))) + + // Add lock time size. + addRecord( + record.LockTimeOnionType, + tlv.SizeTUint64(uint64(h.OutgoingTimeLock)), + ) + + // Add next hop if present. + if nextChanID != 0 { + addRecord(record.NextHopOnionType, 8) + } + + // Add mpp if present. + if h.MPP != nil { + addRecord(record.MPPOnionType, h.MPP.PayloadSize()) + } + + // Add amp if present. + if h.AMP != nil { + addRecord(record.AMPOnionType, h.AMP.PayloadSize()) + } + + // Add custom records. + for k, v := range h.CustomRecords { + addRecord(tlv.Type(k), uint64(len(v))) + } + + // Add the size required to encode the payload length. + payloadSize += tlv.VarIntSize(payloadSize) + + // Add HMAC. + payloadSize += sphinx.HMACSize + + return payloadSize +} + // Route represents a path through the channel graph which runs over one or // more channels in succession. This struct carries all the information // required to craft the Sphinx onion packet, and send the payment along the @@ -186,6 +304,18 @@ type Route struct { Hops []*Hop } +// Copy returns a deep copy of the Route. +func (r *Route) Copy() *Route { + c := *r + + c.Hops = make([]*Hop, len(r.Hops)) + for i := range r.Hops { + c.Hops[i] = r.Hops[i].Copy() + } + + return &c +} + // HopFee returns the fee charged by the route hop indicated by hopIndex. func (r *Route) HopFee(hopIndex int) lnwire.MilliSatoshi { var incomingAmt lnwire.MilliSatoshi @@ -207,7 +337,25 @@ func (r *Route) TotalFees() lnwire.MilliSatoshi { return 0 } - return r.TotalAmount - r.Hops[len(r.Hops)-1].AmtToForward + return r.TotalAmount - r.ReceiverAmt() +} + +// ReceiverAmt is the amount received by the final hop of this route. +func (r *Route) ReceiverAmt() lnwire.MilliSatoshi { + if len(r.Hops) == 0 { + return 0 + } + + return r.Hops[len(r.Hops)-1].AmtToForward +} + +// FinalHop returns the last hop of the route, or nil if the route is empty. +func (r *Route) FinalHop() *Hop { + if len(r.Hops) == 0 { + return nil + } + + return r.Hops[len(r.Hops)-1] } // NewRouteFromHops creates a new Route structure from the minimally required @@ -242,6 +390,16 @@ func NewRouteFromHops(amtToSend lnwire.MilliSatoshi, timeLock uint32, func (r *Route) ToSphinxPath() (*sphinx.PaymentPath, error) { var path sphinx.PaymentPath + // We can only construct a route if there are hops provided. + if len(r.Hops) == 0 { + return nil, ErrNoRouteHopsProvided + } + + // Check maximum route length. + if len(r.Hops) > sphinx.NumMaxHops { + return nil, ErrMaxRouteHopsExceeded + } + // For each hop encoded within the route, we'll convert the hop struct // to an OnionHop with matching per-hop payload within the path as used // by the sphinx package. @@ -316,15 +474,19 @@ func (r *Route) ToSphinxPath() (*sphinx.PaymentPath, error) { func (r *Route) String() string { var b strings.Builder + amt := r.TotalAmount for i, hop := range r.Hops { if i > 0 { - b.WriteString(",") + b.WriteString(" -> ") } - b.WriteString(strconv.FormatUint(hop.ChannelID, 10)) + b.WriteString(fmt.Sprintf("%v (%v)", + strconv.FormatUint(hop.ChannelID, 10), + amt, + )) + amt = hop.AmtToForward } - return fmt.Sprintf("amt=%v, fees=%v, tl=%v, chans=%v", - r.TotalAmount-r.TotalFees(), r.TotalFees(), r.TotalTimeLock, - b.String(), + return fmt.Sprintf("%v, cltv %v", + b.String(), r.TotalTimeLock, ) } diff --git a/routing/route/route_test.go b/routing/route/route_test.go index 92b0ee0df1..991175f493 100644 --- a/routing/route/route_test.go +++ b/routing/route/route_test.go @@ -1,24 +1,43 @@ package route import ( + "bytes" + "encoding/hex" "testing" + "github.com/btcsuite/btcd/btcec" "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/record" +) + +var ( + testPrivKeyBytes, _ = hex.DecodeString("e126f68f7eafcc8b74f54d269fe206be715000f94dac067d1c04a8ca3b2db734") + _, testPubKey = btcec.PrivKeyFromBytes(btcec.S256(), testPrivKeyBytes) + testPubKeyBytes, _ = NewVertexFromBytes(testPubKey.SerializeCompressed()) ) // TestRouteTotalFees checks that a route reports the expected total fee. func TestRouteTotalFees(t *testing.T) { t.Parallel() - // Make sure empty route returns a 0 fee. + // Make sure empty route returns a 0 fee, and zero amount. r := &Route{} if r.TotalFees() != 0 { t.Fatalf("expected 0 fees, got %v", r.TotalFees()) } + if r.ReceiverAmt() != 0 { + t.Fatalf("expected 0 amt, got %v", r.ReceiverAmt()) + } + + // Make sure empty route won't be allowed in the constructor. + amt := lnwire.MilliSatoshi(1000) + _, err := NewRouteFromHops(amt, 100, Vertex{}, []*Hop{}) + if err != ErrNoRouteHopsProvided { + t.Fatalf("expected ErrNoRouteHopsProvided, got %v", err) + } // For one-hop routes the fee should be 0, since the last node will // receive the full amount. - amt := lnwire.MilliSatoshi(1000) hops := []*Hop{ { PubKeyBytes: Vertex{}, @@ -27,7 +46,7 @@ func TestRouteTotalFees(t *testing.T) { AmtToForward: amt, }, } - r, err := NewRouteFromHops(amt, 100, Vertex{}, hops) + r, err = NewRouteFromHops(amt, 100, Vertex{}, hops) if err != nil { t.Fatal(err) } @@ -36,6 +55,10 @@ func TestRouteTotalFees(t *testing.T) { t.Fatalf("expected 0 fees, got %v", r.TotalFees()) } + if r.ReceiverAmt() != amt { + t.Fatalf("expected %v amt, got %v", amt, r.ReceiverAmt()) + } + // Append the route with a node, making the first one take a fee. fee := lnwire.MilliSatoshi(100) hops = append(hops, &Hop{ @@ -55,4 +78,138 @@ func TestRouteTotalFees(t *testing.T) { t.Fatalf("expected %v fees, got %v", fee, r.TotalFees()) } + if r.ReceiverAmt() != amt-fee { + t.Fatalf("expected %v amt, got %v", amt-fee, r.ReceiverAmt()) + } +} + +var ( + testAmt = lnwire.MilliSatoshi(1000) + testAddr = [32]byte{0x01, 0x02} +) + +// TestMPPHop asserts that a Hop will encode a non-nil MPP to final nodes, and +// fail when trying to send to intermediaries. +func TestMPPHop(t *testing.T) { + t.Parallel() + + hop := Hop{ + ChannelID: 1, + OutgoingTimeLock: 44, + AmtToForward: testAmt, + LegacyPayload: false, + MPP: record.NewMPP(testAmt, testAddr), + } + + // Encoding an MPP record to an intermediate hop should result in a + // failure. + var b bytes.Buffer + err := hop.PackHopPayload(&b, 2) + if err != ErrIntermediateMPPHop { + t.Fatalf("expected err: %v, got: %v", + ErrIntermediateMPPHop, err) + } + + // Encoding an MPP record to a final hop should be successful. + b.Reset() + err = hop.PackHopPayload(&b, 0) + if err != nil { + t.Fatalf("expected err: %v, got: %v", nil, err) + } +} + +// TestAMPHop asserts that a Hop will encode a non-nil AMP to final nodes of an +// MPP record is also present, and fail otherwise. +func TestAMPHop(t *testing.T) { + t.Parallel() + + hop := Hop{ + ChannelID: 1, + OutgoingTimeLock: 44, + AmtToForward: testAmt, + LegacyPayload: false, + AMP: record.NewAMP([32]byte{}, [32]byte{}, 3), + } + + // Encoding an AMP record to an intermediate hop w/o an MPP record + // should result in a failure. + var b bytes.Buffer + err := hop.PackHopPayload(&b, 2) + if err != ErrAMPMissingMPP { + t.Fatalf("expected err: %v, got: %v", + ErrAMPMissingMPP, err) + } + + // Encoding an AMP record to a final hop w/o an MPP record should result + // in a failure. + b.Reset() + err = hop.PackHopPayload(&b, 0) + if err != ErrAMPMissingMPP { + t.Fatalf("expected err: %v, got: %v", + ErrAMPMissingMPP, err) + } + + // Encoding an AMP record to a final hop w/ an MPP record should be + // successful. + hop.MPP = record.NewMPP(testAmt, testAddr) + b.Reset() + err = hop.PackHopPayload(&b, 0) + if err != nil { + t.Fatalf("expected err: %v, got: %v", nil, err) + } +} + +// TestPayloadSize tests the payload size calculation that is provided by Hop +// structs. +func TestPayloadSize(t *testing.T) { + hops := []*Hop{ + { + PubKeyBytes: testPubKeyBytes, + AmtToForward: 1000, + OutgoingTimeLock: 600000, + ChannelID: 3432483437438, + LegacyPayload: true, + }, + { + PubKeyBytes: testPubKeyBytes, + AmtToForward: 1200, + OutgoingTimeLock: 700000, + ChannelID: 63584534844, + }, + { + PubKeyBytes: testPubKeyBytes, + AmtToForward: 1200, + OutgoingTimeLock: 700000, + MPP: record.NewMPP(500, [32]byte{}), + AMP: record.NewAMP([32]byte{}, [32]byte{}, 8), + CustomRecords: map[uint64][]byte{ + 100000: {1, 2, 3}, + 1000000: {4, 5}, + }, + }, + } + + rt := Route{ + Hops: hops, + } + path, err := rt.ToSphinxPath() + if err != nil { + t.Fatal(err) + } + + for i, onionHop := range path[:path.TrueRouteLength()] { + hop := hops[i] + var nextChan uint64 + if i < len(hops)-1 { + nextChan = hops[i+1].ChannelID + } + + expected := uint64(onionHop.HopPayload.NumBytes()) + actual := hop.PayloadSize(nextChan) + if expected != actual { + t.Fatalf("unexpected payload size at hop %v: "+ + "expected %v, got %v", + i, expected, actual) + } + } } diff --git a/routing/router.go b/routing/router.go index 3908a93d89..c3773fe6a6 100644 --- a/routing/router.go +++ b/routing/router.go @@ -11,12 +11,13 @@ import ( "github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcutil" - "github.com/coreos/bbolt" "github.com/davecgh/go-spew/spew" "github.com/go-errors/errors" sphinx "github.com/lightningnetwork/lightning-onion" "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/channeldb/kvdb" + "github.com/lightningnetwork/lnd/clock" "github.com/lightningnetwork/lnd/htlcswitch" "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/lntypes" @@ -24,10 +25,10 @@ import ( "github.com/lightningnetwork/lnd/lnwallet/chanvalidate" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/multimutex" + "github.com/lightningnetwork/lnd/record" "github.com/lightningnetwork/lnd/routing/chainview" "github.com/lightningnetwork/lnd/routing/route" "github.com/lightningnetwork/lnd/ticker" - "github.com/lightningnetwork/lnd/tlv" "github.com/lightningnetwork/lnd/zpay32" ) @@ -158,13 +159,7 @@ type PaymentSessionSource interface { // routes to the given target. An optional set of routing hints can be // provided in order to populate additional edges to explore when // finding a path to the payment's destination. - NewPaymentSession(routeHints [][]zpay32.HopHint, - target route.Vertex) (PaymentSession, error) - - // NewPaymentSessionForRoute creates a new paymentSession instance that - // is just used for failure reporting to missioncontrol, and will only - // attempt the given route. - NewPaymentSessionForRoute(preBuiltRoute *route.Route) PaymentSession + NewPaymentSession(p *LightningPayment) (PaymentSession, error) // NewPaymentSessionEmpty creates a new paymentSession instance that is // empty, and will be exhausted immediately. Used for failure reporting @@ -224,6 +219,10 @@ type ChannelPolicy struct { // MaxHTLC is the maximum HTLC size including fees we are allowed to // forward over this channel. MaxHTLC lnwire.MilliSatoshi + + // MinHTLC is the minimum HTLC size including fees we are allowed to + // forward over this channel. + MinHTLC *lnwire.MilliSatoshi } // Config defines the configuration for the ChannelRouter. ALL elements within @@ -298,6 +297,9 @@ type Config struct { // PathFindingConfig defines global path finding parameters. PathFindingConfig PathFindingConfig + + // Clock is mockable time provider. + Clock clock.Clock } // EdgeLocator is a struct used to identify a specific edge. @@ -524,16 +526,17 @@ func (r *ChannelRouter) Start() error { // We create a dummy, empty payment session such that // we won't make another payment attempt when the // result for the in-flight attempt is received. - // - // PayAttemptTime doesn't need to be set, as there is - // only a single attempt. paySession := r.cfg.SessionSource.NewPaymentSessionEmpty() - lPayment := &LightningPayment{ - PaymentHash: payment.Info.PaymentHash, - } - - _, _, err = r.sendPayment(payment.Attempt, lPayment, paySession) + // We pass in a zero timeout value, to indicate we + // don't need it to timeout. It will stop immediately + // after the existing attempt has finished anyway. We + // also set a zero fee limit, as no more routes should + // be tried. + _, _, err := r.sendPayment( + payment.Info.Value, 0, + payment.Info.PaymentHash, 0, paySession, + ) if err != nil { log.Errorf("Resuming payment with hash %v "+ "failed: %v.", payment.Info.PaymentHash, err) @@ -852,7 +855,6 @@ func (r *ChannelRouter) networkHandler() { graphPruneTicker := time.NewTicker(r.cfg.GraphPruneInterval) defer graphPruneTicker.Stop() - r.statTicker.Resume() defer r.statTicker.Stop() r.stats.Reset() @@ -862,6 +864,12 @@ func (r *ChannelRouter) networkHandler() { validationBarrier := NewValidationBarrier(runtime.NumCPU()*4, r.quit) for { + + // If there are stats, resume the statTicker. + if !r.stats.Empty() { + r.statTicker.Resume() + } + select { // A new fully validated network update has just arrived. As a // result we'll modify the channel graph accordingly depending @@ -1344,8 +1352,6 @@ func (r *ChannelRouter) processUpdate(msg interface{}) error { return errors.Errorf("wrong routing update message type") } - r.statTicker.Resume() - return nil } @@ -1394,26 +1400,11 @@ type routingMsg struct { // factoring in channel capacities and cumulative fees along the route. func (r *ChannelRouter) FindRoute(source, target route.Vertex, amt lnwire.MilliSatoshi, restrictions *RestrictParams, - destTlvRecords []tlv.Record, - finalExpiry ...uint16) (*route.Route, error) { + destCustomRecords record.CustomSet, + routeHints map[route.Vertex][]*channeldb.ChannelEdgePolicy, + finalExpiry uint16) (*route.Route, error) { - var finalCLTVDelta uint16 - if len(finalExpiry) == 0 { - finalCLTVDelta = zpay32.DefaultFinalCLTVDelta - } else { - finalCLTVDelta = finalExpiry[0] - } - - log.Debugf("Searching for path to %x, sending %v", target, amt) - - // We can short circuit the routing by opportunistically checking to - // see if the target vertex event exists in the current graph. - if _, exists, err := r.cfg.Graph.HasLightningNode(target); err != nil { - return nil, err - } else if !exists { - log.Debugf("Target %x is not in known graph", target) - return nil, newErrf(ErrTargetNotInNetwork, "target not found") - } + log.Debugf("Searching for path to %v, sending %v", target, amt) // We'll attempt to obtain a set of bandwidth hints that can help us // eliminate certain routes early on in the path finding process. @@ -1424,31 +1415,51 @@ func (r *ChannelRouter) FindRoute(source, target route.Vertex, return nil, err } + // We'll fetch the current block height so we can properly calculate the + // required HTLC time locks within the route. + _, currentHeight, err := r.cfg.Chain.GetBestBlock() + if err != nil { + return nil, err + } + // Now that we know the destination is reachable within the graph, we'll // execute our path finding algorithm. - path, err := findPath( - &graphParams{ - graph: r.cfg.Graph, - bandwidthHints: bandwidthHints, - }, - restrictions, &r.cfg.PathFindingConfig, - source, target, amt, - ) + finalHtlcExpiry := currentHeight + int32(finalExpiry) + + routingTx, err := newDbRoutingTx(r.cfg.Graph) if err != nil { return nil, err } + defer func() { + err := routingTx.close() + if err != nil { + log.Errorf("Error closing db tx: %v", err) + } + }() - // We'll fetch the current block height so we can properly calculate the - // required HTLC time locks within the route. - _, currentHeight, err := r.cfg.Chain.GetBestBlock() + path, err := findPath( + &graphParams{ + additionalEdges: routeHints, + bandwidthHints: bandwidthHints, + graph: routingTx, + }, + restrictions, + &r.cfg.PathFindingConfig, + source, target, amt, finalHtlcExpiry, + ) if err != nil { return nil, err } // Create the route with absolute time lock values. route, err := newRoute( - amt, source, path, uint32(currentHeight), finalCLTVDelta, - destTlvRecords, + source, path, uint32(currentHeight), + finalHopParams{ + amt: amt, + totalAmt: amt, + cltvDelta: finalExpiry, + records: destCustomRecords, + }, ) if err != nil { return nil, err @@ -1480,13 +1491,6 @@ func generateNewSessionKey() (*btcec.PrivateKey, error) { func generateSphinxPacket(rt *route.Route, paymentHash []byte, sessionKey *btcec.PrivateKey) ([]byte, *sphinx.Circuit, error) { - // As a sanity check, we'll ensure that the set of hops has been - // properly filled in, otherwise, we won't actually be able to - // construct a route. - if len(rt.Hops) == 0 { - return nil, nil, route.ErrNoRouteHopsProvided - } - // Now that we know we have an actual route, we'll map the route into a // sphinx payument path which includes per-hop paylods for each hop // that give each node within the route the necessary information @@ -1512,6 +1516,7 @@ func generateSphinxPacket(rt *route.Route, paymentHash []byte, // privacy preserving source routing across the network. sphinxPacket, err := sphinx.NewOnionPacket( sphinxPath, sessionKey, paymentHash, + sphinx.DeterministicPacketFiller, ) if err != nil { return nil, nil, err @@ -1593,15 +1598,36 @@ type LightningPayment struct { // hop. If nil, any channel may be used. OutgoingChannelID *uint64 + // LastHop is the pubkey of the last node before the final destination + // is reached. If nil, any node may be used. + LastHop *route.Vertex + + // DestFeatures specifies the set of features we assume the final node + // has for pathfinding. Typically these will be taken directly from an + // invoice, but they can also be manually supplied or assumed by the + // sender. If a nil feature vector is provided, the router will try to + // fallback to the graph in order to load a feature vector for a node in + // the public graph. + DestFeatures *lnwire.FeatureVector + + // PaymentAddr is the payment address specified by the receiver. This + // field should be a random 32-byte nonce presented in the receiver's + // invoice to prevent probing of the destination. + PaymentAddr *[32]byte + // PaymentRequest is an optional payment request that this payment is // attempting to complete. PaymentRequest []byte - // FinalDestRecords are TLV records that are to be sent to the final + // DestCustomRecords are TLV records that are to be sent to the final // hop in the new onion payload format. If the destination does not // understand this new onion payload format, then the payment will // fail. - FinalDestRecords []tlv.Record + DestCustomRecords record.CustomSet + + // MaxParts is the maximum number of partial payments that may be used + // to complete the full amount. + MaxParts uint32 } // SendPayment attempts to send a payment as described within the passed @@ -1619,9 +1645,15 @@ func (r *ChannelRouter) SendPayment(payment *LightningPayment) ([32]byte, return [32]byte{}, nil, err } + log.Tracef("Dispatching SendPayment for lightning payment: %v", + spewPayment(payment)) + // Since this is the first time this payment is being made, we pass nil // for the existing attempt. - return r.sendPayment(nil, payment, paySession) + return r.sendPayment( + payment.Amount, payment.FeeLimit, payment.PaymentHash, + payment.PayAttemptTimeout, paySession, + ) } // SendPaymentAsync is the non-blocking version of SendPayment. The payment @@ -1638,7 +1670,13 @@ func (r *ChannelRouter) SendPaymentAsync(payment *LightningPayment) error { go func() { defer r.wg.Done() - _, _, err := r.sendPayment(nil, payment, paySession) + log.Tracef("Dispatching SendPayment for lightning payment: %v", + spewPayment(payment)) + + _, _, err := r.sendPayment( + payment.Amount, payment.FeeLimit, payment.PaymentHash, + payment.PayAttemptTimeout, paySession, + ) if err != nil { log.Errorf("Payment with hash %x failed: %v", payment.PaymentHash, err) @@ -1648,6 +1686,28 @@ func (r *ChannelRouter) SendPaymentAsync(payment *LightningPayment) error { return nil } +// spewPayment returns a log closures that provides a spewed string +// representation of the passed payment. +func spewPayment(payment *LightningPayment) logClosure { + return newLogClosure(func() string { + // Make a copy of the payment with a nilled Curve + // before spewing. + var routeHints [][]zpay32.HopHint + for _, routeHint := range payment.RouteHints { + var hopHints []zpay32.HopHint + for _, hopHint := range routeHint { + h := hopHint.Copy() + h.NodeID.Curve = nil + hopHints = append(hopHints, h) + } + routeHints = append(routeHints, hopHints) + } + p := *payment + p.RouteHints = routeHints + return spew.Sdump(p) + }) +} + // preparePayment creates the payment session and registers the payment with the // control tower. func (r *ChannelRouter) preparePayment(payment *LightningPayment) ( @@ -1656,9 +1716,7 @@ func (r *ChannelRouter) preparePayment(payment *LightningPayment) ( // Before starting the HTLC routing attempt, we'll create a fresh // payment session which will report our errors back to mission // control. - paySession, err := r.cfg.SessionSource.NewPaymentSession( - payment.RouteHints, payment.Target, - ) + paySession, err := r.cfg.SessionSource.NewPaymentSession(payment) if err != nil { return nil, err } @@ -1670,7 +1728,7 @@ func (r *ChannelRouter) preparePayment(payment *LightningPayment) ( info := &channeldb.PaymentCreationInfo{ PaymentHash: payment.PaymentHash, Value: payment.Amount, - CreationDate: time.Now(), + CreationTime: r.cfg.Clock.Now(), PaymentRequest: payment.PaymentRequest, } @@ -1685,69 +1743,124 @@ func (r *ChannelRouter) preparePayment(payment *LightningPayment) ( // SendToRoute attempts to send a payment with the given hash through the // provided route. This function is blocking and will return the obtained // preimage if the payment is successful or the full error in case of a failure. -func (r *ChannelRouter) SendToRoute(hash lntypes.Hash, route *route.Route) ( +func (r *ChannelRouter) SendToRoute(hash lntypes.Hash, rt *route.Route) ( lntypes.Preimage, error) { - // Create a payment session for just this route. - paySession := r.cfg.SessionSource.NewPaymentSessionForRoute(route) - // Calculate amount paid to receiver. - amt := route.TotalAmount - route.TotalFees() + amt := rt.ReceiverAmt() + + // If this is meant as a MP payment shard, we set the amount + // for the creating info to the total amount of the payment. + finalHop := rt.Hops[len(rt.Hops)-1] + mpp := finalHop.MPP + if mpp != nil { + amt = mpp.TotalMsat() + } // Record this payment hash with the ControlTower, ensuring it is not // already in-flight. info := &channeldb.PaymentCreationInfo{ PaymentHash: hash, Value: amt, - CreationDate: time.Now(), + CreationTime: r.cfg.Clock.Now(), PaymentRequest: nil, } err := r.cfg.Control.InitPayment(hash, info) - if err != nil { + switch { + // If this is an MPP attempt and the hash is already registered with + // the database, we can go on to launch the shard. + case err == channeldb.ErrPaymentInFlight && mpp != nil: + + // Any other error is not tolerated. + case err != nil: return [32]byte{}, err } - // Create a (mostly) dummy payment, as the created payment session is - // not going to do path finding. - // TODO(halseth): sendPayment doesn't really need LightningPayment, make - // it take just needed fields instead. - // - // PayAttemptTime doesn't need to be set, as there is only a single - // attempt. - payment := &LightningPayment{ - PaymentHash: hash, + log.Tracef("Dispatching SendToRoute for hash %v: %v", + hash, newLogClosure(func() string { + return spew.Sdump(rt) + }), + ) + + // Launch a shard along the given route. + sh := &shardHandler{ + router: r, + paymentHash: hash, } - // Since this is the first time this payment is being made, we pass nil - // for the existing attempt. - preimage, _, err := r.sendPayment(nil, payment, paySession) + var shardError error + attempt, outcome, err := sh.launchShard(rt) + + // With SendToRoute, it can happen that the route exceeds protocol + // constraints. Mark the payment as failed with an internal error. + if err == route.ErrMaxRouteHopsExceeded || + err == sphinx.ErrMaxRoutingInfoSizeExceeded { + + log.Debugf("Invalid route provided for payment %x: %v", + hash, err) + + controlErr := r.cfg.Control.Fail( + hash, channeldb.FailureReasonError, + ) + if controlErr != nil { + return [32]byte{}, controlErr + } + } + + // In any case, don't continue if there is an error. if err != nil { - // SendToRoute should return a structured error. In case the - // provided route fails, payment lifecycle will return a - // noRouteError with the structured error embedded. - if noRouteError, ok := err.(errNoRoute); ok { - if noRouteError.lastError == nil { - return lntypes.Preimage{}, - errors.New("failure message missing") - } + return lntypes.Preimage{}, err + } + + switch { + // Failed to launch shard. + case outcome.err != nil: + shardError = outcome.err + + // Shard successfully launched, wait for the result to be available. + default: + result, err := sh.collectResult(attempt) + if err != nil { + return lntypes.Preimage{}, err + } - return lntypes.Preimage{}, noRouteError.lastError + // We got a successful result. + if result.err == nil { + return result.preimage, nil } + // The shard failed, break switch to handle it. + shardError = result.err + } + + // Since for SendToRoute we won't retry in case the shard fails, we'll + // mark the payment failed with the control tower immediately. Process + // the error to check if it maps into a terminal error code, if not use + // a generic NO_ROUTE error. + reason := r.processSendError( + attempt.AttemptID, &attempt.Route, shardError, + ) + if reason == nil { + r := channeldb.FailureReasonNoRoute + reason = &r + } + + err = r.cfg.Control.Fail(hash, *reason) + if err != nil { return lntypes.Preimage{}, err } - return preimage, nil + return lntypes.Preimage{}, shardError } -// sendPayment attempts to send a payment as described within the passed -// LightningPayment. This function is blocking and will return either: when the -// payment is successful, or all candidates routes have been attempted and -// resulted in a failed payment. If the payment succeeds, then a non-nil Route -// will be returned which describes the path the successful payment traversed -// within the network to reach the destination. Additionally, the payment -// preimage will also be returned. +// sendPayment attempts to send a payment to the passed payment hash. This +// function is blocking and will return either: when the payment is successful, +// or all candidates routes have been attempted and resulted in a failed +// payment. If the payment succeeds, then a non-nil Route will be returned +// which describes the path the successful payment traversed within the network +// to reach the destination. Additionally, the payment preimage will also be +// returned. // // The existing attempt argument should be set to nil if this is a payment that // haven't had any payment attempt sent to the switch yet. If it has had an @@ -1758,29 +1871,9 @@ func (r *ChannelRouter) SendToRoute(hash lntypes.Hash, route *route.Route) ( // router will call this method for every payment still in-flight according to // the ControlTower. func (r *ChannelRouter) sendPayment( - existingAttempt *channeldb.PaymentAttemptInfo, - payment *LightningPayment, paySession PaymentSession) ( - [32]byte, *route.Route, error) { - - log.Tracef("Dispatching route for lightning payment: %v", - newLogClosure(func() string { - // Make a copy of the payment with a nilled Curve - // before spewing. - var routeHints [][]zpay32.HopHint - for _, routeHint := range payment.RouteHints { - var hopHints []zpay32.HopHint - for _, hopHint := range routeHint { - h := hopHint.Copy() - h.NodeID.Curve = nil - hopHints = append(hopHints, h) - } - routeHints = append(routeHints, hopHints) - } - p := *payment - p.RouteHints = routeHints - return spew.Sdump(p) - }), - ) + totalAmt, feeLimit lnwire.MilliSatoshi, paymentHash lntypes.Hash, + timeout time.Duration, + paySession PaymentSession) ([32]byte, *route.Route, error) { // We'll also fetch the current block height so we can properly // calculate the required HTLC time locks within the route. @@ -1792,21 +1885,19 @@ func (r *ChannelRouter) sendPayment( // Now set up a paymentLifecycle struct with these params, such that we // can resume the payment from the current state. p := &paymentLifecycle{ - router: r, - payment: payment, - paySession: paySession, - currentHeight: currentHeight, - finalCLTVDelta: uint16(payment.FinalCLTVDelta), - attempt: existingAttempt, - circuit: nil, - lastError: nil, + router: r, + totalAmount: totalAmt, + feeLimit: feeLimit, + paymentHash: paymentHash, + paySession: paySession, + currentHeight: currentHeight, } // If a timeout is specified, create a timeout channel. If no timeout is // specified, the channel is left nil and will never abort the payment // loop. - if payment.PayAttemptTimeout != 0 { - p.timeoutChan = time.After(payment.PayAttemptTimeout) + if timeout != 0 { + p.timeoutChan = time.After(timeout) } return p.resumePayment() @@ -1847,7 +1938,7 @@ func (r *ChannelRouter) tryApplyChannelUpdate(rt *route.Route, // Apply channel update. if !r.applyChannelUpdate(update, errSource) { - log.Debugf("Invalid channel update received: node=%x", + log.Debugf("Invalid channel update received: node=%v", errVertex) } @@ -1886,18 +1977,33 @@ func (r *ChannelRouter) processSendError(paymentID uint64, rt *route.Route, return reportFail(nil, nil) } - // If an internal, non-forwarding error occurred, we can stop - // trying. - fErr, ok := sendErr.(*htlcswitch.ForwardingError) + + // If the error is a ClearTextError, we have received a valid wire + // failure message, either from our own outgoing link or from a node + // down the route. If the error is not related to the propagation of + // our payment, we can stop trying because an internal error has + // occurred. + rtErr, ok := sendErr.(htlcswitch.ClearTextError) if !ok { return &internalErrorReason } - failureMessage := fErr.FailureMessage - failureSourceIdx := fErr.FailureSourceIdx + // failureSourceIdx is the index of the node that the failure occurred + // at. If the ClearTextError received is not a ForwardingError the + // payment error occurred at our node, so we leave this value as 0 + // to indicate that the failure occurred locally. If the error is a + // ForwardingError, it did not originate at our node, so we set + // failureSourceIdx to the index of the node where the failure occurred. + failureSourceIdx := 0 + source, ok := rtErr.(*htlcswitch.ForwardingError) + if ok { + failureSourceIdx = source.FailureSourceIdx + } - // Apply channel update if the error contains one. For unknown - // failures, failureMessage is nil. + // Extract the wire failure and apply channel update if it contains one. + // If we received an unknown failure message from a node along the + // route, the failure message will be nil. + failureMessage := rtErr.WireMessage() if failureMessage != nil { err := r.tryApplyChannelUpdate( rt, failureSourceIdx, failureMessage, @@ -2068,18 +2174,14 @@ func (r *ChannelRouter) GetChannelByID(chanID lnwire.ShortChannelID) ( // // NOTE: This method is part of the ChannelGraphSource interface. func (r *ChannelRouter) FetchLightningNode(node route.Vertex) (*channeldb.LightningNode, error) { - pubKey, err := btcec.ParsePubKey(node[:], btcec.S256()) - if err != nil { - return nil, fmt.Errorf("unable to parse raw public key: %v", err) - } - return r.cfg.Graph.FetchLightningNode(pubKey) + return r.cfg.Graph.FetchLightningNode(nil, node) } // ForEachNode is used to iterate over every node in router topology. // // NOTE: This method is part of the ChannelGraphSource interface. func (r *ChannelRouter) ForEachNode(cb func(*channeldb.LightningNode) error) error { - return r.cfg.Graph.ForEachNode(nil, func(_ *bbolt.Tx, n *channeldb.LightningNode) error { + return r.cfg.Graph.ForEachNode(nil, func(_ kvdb.ReadTx, n *channeldb.LightningNode) error { return cb(n) }) } @@ -2091,7 +2193,7 @@ func (r *ChannelRouter) ForEachNode(cb func(*channeldb.LightningNode) error) err func (r *ChannelRouter) ForAllOutgoingChannels(cb func(*channeldb.ChannelEdgeInfo, *channeldb.ChannelEdgePolicy) error) error { - return r.selfNode.ForEachChannel(nil, func(_ *bbolt.Tx, c *channeldb.ChannelEdgeInfo, + return r.selfNode.ForEachChannel(nil, func(_ kvdb.ReadTx, c *channeldb.ChannelEdgeInfo, e, _ *channeldb.ChannelEdgePolicy) error { if e == nil { @@ -2232,7 +2334,7 @@ func generateBandwidthHints(sourceNode *channeldb.LightningNode, // First, we'll collect the set of outbound edges from the target // source node. var localChans []*channeldb.ChannelEdgeInfo - err := sourceNode.ForEachChannel(nil, func(tx *bbolt.Tx, + err := sourceNode.ForEachChannel(nil, func(tx kvdb.ReadTx, edgeInfo *channeldb.ChannelEdgeInfo, _, _ *channeldb.ChannelEdgePolicy) error { @@ -2254,90 +2356,6 @@ func generateBandwidthHints(sourceNode *channeldb.LightningNode, return bandwidthHints, nil } -// runningAmounts keeps running amounts while the route is traversed. -type runningAmounts struct { - // amt is the intended amount to send via the route. - amt lnwire.MilliSatoshi - - // max is the running maximum that the route can carry. - max lnwire.MilliSatoshi -} - -// prependChannel returns a new set of running amounts that would result from -// prepending the given channel to the route. If canIncreaseAmt is set, the -// amount may be increased if it is too small to satisfy the channel's minimum -// htlc amount. -func (r *runningAmounts) prependChannel(policy *channeldb.ChannelEdgePolicy, - capacity btcutil.Amount, localChan bool, canIncreaseAmt bool) ( - runningAmounts, error) { - - // Determine max htlc value. - maxHtlc := lnwire.NewMSatFromSatoshis(capacity) - if policy.MessageFlags.HasMaxHtlc() { - maxHtlc = policy.MaxHTLC - } - - amt := r.amt - - // If we have a specific amount for which we are building the route, - // validate it against the channel constraints and return the new - // running amount. - if !canIncreaseAmt { - if amt < policy.MinHTLC || amt > maxHtlc { - return runningAmounts{}, fmt.Errorf("channel htlc "+ - "constraints [%v - %v] violated with amt %v", - policy.MinHTLC, maxHtlc, amt) - } - - // Update running amount by adding the fee for non-local - // channels. - if !localChan { - amt += policy.ComputeFee(amt) - } - - return runningAmounts{ - amt: amt, - }, nil - } - - // Adapt the minimum amount to what this channel allows. - if policy.MinHTLC > r.amt { - amt = policy.MinHTLC - } - - // Update the maximum amount too to be able to detect incompatible - // channels. - max := r.max - if maxHtlc < r.max { - max = maxHtlc - } - - // If we get in the situation that the minimum amount exceeds the - // maximum amount (enforced further down stream), we have incompatible - // channel policies. - // - // There is possibility with pubkey addressing that we should have - // selected a different channel downstream, but we don't backtrack to - // try to fix that. It would complicate path finding while we expect - // this situation to be rare. The spec recommends to keep all policies - // towards a peer identical. If that is the case, there isn't a better - // channel that we should have selected. - if amt > max { - return runningAmounts{}, - fmt.Errorf("incompatible channel policies: %v "+ - "exceeds %v", amt, max) - } - - // Add fees to the running amounts. Skip the source node fees as - // those do not need to be paid. - if !localChan { - amt += policy.ComputeFee(amt) - max += policy.ComputeFee(max) - } - - return runningAmounts{amt: amt, max: max}, nil -} - // ErrNoChannel is returned when a route cannot be built because there are no // channels that satisfy all requirements. type ErrNoChannel struct { @@ -2374,25 +2392,41 @@ func (r *ChannelRouter) BuildRoute(amt *lnwire.MilliSatoshi, return nil, err } - // Allocate a list that will contain the selected channels for this + // Fetch the current block height outside the routing transaction, to + // prevent the rpc call blocking the database. + _, height, err := r.cfg.Chain.GetBestBlock() + if err != nil { + return nil, err + } + + // Allocate a list that will contain the unified policies for this // route. - edges := make([]*channeldb.ChannelEdgePolicy, len(hops)) + edges := make([]*unifiedPolicy, len(hops)) - // Keep a running amount and the maximum for this route. - amts := runningAmounts{ - max: lnwire.MilliSatoshi(^uint64(0)), - } + var runningAmt lnwire.MilliSatoshi if useMinAmt { // For minimum amount routes, aim to deliver at least 1 msat to // the destination. There are nodes in the wild that have a // min_htlc channel policy of zero, which could lead to a zero // amount payment being made. - amts.amt = 1 + runningAmt = 1 } else { // If an amount is specified, we need to build a route that // delivers exactly this amount to the final destination. - amts.amt = *amt + runningAmt = *amt + } + + // Open a transaction to execute the graph queries in. + routingTx, err := newDbRoutingTx(r.cfg.Graph) + if err != nil { + return nil, err } + defer func() { + err := routingTx.close() + if err != nil { + log.Errorf("Error closing db tx: %v", err) + } + }() // Traverse hops backwards to accumulate fees in the running amounts. source := r.selfNode.PubKeyBytes @@ -2408,142 +2442,85 @@ func (r *ChannelRouter) BuildRoute(amt *lnwire.MilliSatoshi, localChan := i == 0 - // Iterate over candidate channels to select the channel - // to use for the final route. - var ( - bestEdge *channeldb.ChannelEdgePolicy - bestAmts *runningAmounts - bestBandwidth lnwire.MilliSatoshi - ) - - cb := func(tx *bbolt.Tx, - edgeInfo *channeldb.ChannelEdgeInfo, - _, inEdge *channeldb.ChannelEdgePolicy) error { - - chanID := edgeInfo.ChannelID + // Build unified policies for this hop based on the channels + // known in the graph. + u := newUnifiedPolicies(source, toNode, outgoingChan) - // Apply outgoing channel restriction is active. - if localChan && outgoingChan != nil && - chanID != *outgoingChan { - - return nil - } - - // No unknown policy channels. - if inEdge == nil { - return nil - } - - // Before we can process the edge, we'll need to - // fetch the node on the _other_ end of this - // channel as we may later need to iterate over - // the incoming edges of this node if we explore - // it further. - chanFromNode, err := edgeInfo.FetchOtherNode( - tx, toNode[:], - ) - if err != nil { - return err - } - - // Continue searching if this channel doesn't - // connect with the previous hop. - if chanFromNode.PubKeyBytes != fromNode { - return nil - } - - // Validate whether this channel's policy is satisfied - // and obtain the new running amounts if this channel - // was to be selected. - newAmts, err := amts.prependChannel( - inEdge, edgeInfo.Capacity, localChan, - useMinAmt, - ) - if err != nil { - log.Tracef("Skipping chan %v: %v", - inEdge.ChannelID, err) - - return nil - } + err := u.addGraphPolicies(routingTx) + if err != nil { + return nil, err + } - // If we already have a best edge, check whether this - // edge is better. - bandwidth := bandwidthHints[chanID] - if bestEdge != nil { - if localChan { - // For local channels, better is defined - // as having more bandwidth. We try to - // maximize the chance that the returned - // route succeeds. - if bandwidth < bestBandwidth { - return nil - } - } else { - // For other channels, better is defined - // as lower fees for the amount to send. - // Normally all channels between two - // nodes should have the same policy, - // but in case not we minimize our cost - // here. Regular path finding would do - // the same. - if newAmts.amt > bestAmts.amt { - return nil - } - } + // Exit if there are no channels. + unifiedPolicy, ok := u.policies[fromNode] + if !ok { + return nil, ErrNoChannel{ + fromNode: fromNode, + position: i, } - - // If we get here, the current edge is better. Replace - // the best. - bestEdge = inEdge - bestAmts = &newAmts - bestBandwidth = bandwidth - - return nil } - err := r.cfg.Graph.ForEachNodeChannel(nil, toNode[:], cb) - if err != nil { - return nil, err + // If using min amt, increase amt if needed. + if useMinAmt { + min := unifiedPolicy.minAmt() + if min > runningAmt { + runningAmt = min + } } - // There is no matching channel. Stop building the route here. - if bestEdge == nil { + // Get a forwarding policy for the specific amount that we want + // to forward. + policy := unifiedPolicy.getPolicy(runningAmt, bandwidthHints) + if policy == nil { return nil, ErrNoChannel{ fromNode: fromNode, position: i, } } - log.Tracef("Select channel %v at position %v", bestEdge.ChannelID, i) + // Add fee for this hop. + if !localChan { + runningAmt += policy.ComputeFee(runningAmt) + } - edges[i] = bestEdge - amts = *bestAmts - } + log.Tracef("Select channel %v at position %v", policy.ChannelID, i) - _, height, err := r.cfg.Chain.GetBestBlock() - if err != nil { - return nil, err + edges[i] = unifiedPolicy } - var receiverAmt lnwire.MilliSatoshi - if useMinAmt { - // We've calculated the minimum amount for the htlc that the - // source node hands out. The newRoute call below expects the - // amount that must reach the receiver after subtraction of fees - // along the way. Iterate over all edges to calculate the - // receiver amount. - receiverAmt = amts.amt - for _, edge := range edges[1:] { - receiverAmt -= edge.ComputeFeeFromIncoming(receiverAmt) + // Now that we arrived at the start of the route and found out the route + // total amount, we make a forward pass. Because the amount may have + // been increased in the backward pass, fees need to be recalculated and + // amount ranges re-checked. + var pathEdges []*channeldb.ChannelEdgePolicy + receiverAmt := runningAmt + for i, edge := range edges { + policy := edge.getPolicy(receiverAmt, bandwidthHints) + if policy == nil { + return nil, ErrNoChannel{ + fromNode: hops[i-1], + position: i, + } } - } else { - // Deliver the specified amount to the receiver. - receiverAmt = *amt + + if i > 0 { + // Decrease the amount to send while going forward. + receiverAmt -= policy.ComputeFeeFromIncoming( + receiverAmt, + ) + } + + pathEdges = append(pathEdges, policy) } // Build and return the final route. return newRoute( - receiverAmt, source, edges, uint32(height), - uint16(finalCltvDelta), nil, + source, pathEdges, uint32(height), + finalHopParams{ + amt: receiverAmt, + totalAmt: receiverAmt, + cltvDelta: uint16(finalCltvDelta), + records: nil, + }, ) } diff --git a/routing/router_test.go b/routing/router_test.go index 0aae52cafb..d8b0950f0b 100644 --- a/routing/router_test.go +++ b/routing/router_test.go @@ -6,7 +6,6 @@ import ( "image/color" "math" "math/rand" - "strings" "sync/atomic" "testing" "time" @@ -18,9 +17,11 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/clock" "github.com/lightningnetwork/lnd/htlcswitch" "github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/record" "github.com/lightningnetwork/lnd/routing/route" "github.com/lightningnetwork/lnd/zpay32" ) @@ -78,11 +79,6 @@ func createTestCtxFromGraphInstance(startingHeight uint32, graphInstance *testGr chain := newMockChain(startingHeight) chainView := newMockChainView(chain) - selfNode, err := graphInstance.graph.SourceNode() - if err != nil { - return nil, nil, err - } - pathFindingConfig := PathFindingConfig{ MinProbability: 0.01, PaymentAttemptPenalty: 100, @@ -91,10 +87,11 @@ func createTestCtxFromGraphInstance(startingHeight uint32, graphInstance *testGr mcConfig := &MissionControlConfig{ PenaltyHalfLife: time.Hour, AprioriHopProbability: 0.9, + AprioriWeight: 0.5, } mc, err := NewMissionControl( - graphInstance.graph.Database().DB, + graphInstance.graph.Database(), mcConfig, ) if err != nil { @@ -102,8 +99,7 @@ func createTestCtxFromGraphInstance(startingHeight uint32, graphInstance *testGr } sessionSource := &SessionSource{ - Graph: graphInstance.graph, - SelfNode: selfNode, + Graph: graphInstance.graph, QueryBandwidth: func(e *channeldb.ChannelEdgeInfo) lnwire.MilliSatoshi { return lnwire.NewMSatFromSatoshis(e.Capacity) }, @@ -129,6 +125,7 @@ func createTestCtxFromGraphInstance(startingHeight uint32, graphInstance *testGr return next, nil }, PathFindingConfig: pathFindingConfig, + Clock: clock.NewTestClock(time.Unix(1, 0)), }) if err != nil { return nil, nil, fmt.Errorf("unable to create router %v", err) @@ -225,7 +222,7 @@ func TestFindRoutesWithFeeLimit(t *testing.T) { route, err := ctx.router.FindRoute( ctx.router.selfNode.PubKeyBytes, - target, paymentAmt, restrictions, nil, + target, paymentAmt, restrictions, nil, nil, zpay32.DefaultFinalCLTVDelta, ) if err != nil { @@ -286,11 +283,12 @@ func TestSendPaymentRouteFailureFallback(t *testing.T) { roasbeefSongoku := lnwire.NewShortChanIDFromInt(12345) if firstHop == roasbeefSongoku { - return [32]byte{}, &htlcswitch.ForwardingError{ - FailureSourceIdx: 1, - // TODO(roasbeef): temp node failure should be? - FailureMessage: &lnwire.FailTemporaryChannelFailure{}, - } + return [32]byte{}, htlcswitch.NewForwardingError( + // TODO(roasbeef): temp node failure + // should be? + &lnwire.FailTemporaryChannelFailure{}, + 1, + ) } return preImage, nil @@ -418,12 +416,12 @@ func TestChannelUpdateValidation(t *testing.T) { // The unsigned channel update is attached to the failure message. ctx.router.cfg.Payer.(*mockPaymentAttemptDispatcher).setPaymentResult( func(firstHop lnwire.ShortChannelID) ([32]byte, error) { - return [32]byte{}, &htlcswitch.ForwardingError{ - FailureSourceIdx: 1, - FailureMessage: &lnwire.FailFeeInsufficient{ + return [32]byte{}, htlcswitch.NewForwardingError( + &lnwire.FailFeeInsufficient{ Update: errChanUpdate, }, - } + 1, + ) }) // The payment parameter is mostly redundant in SendToRoute. Can be left @@ -540,16 +538,15 @@ func TestSendPaymentErrorRepeatedFeeInsufficient(t *testing.T) { roasbeefSongoku := lnwire.NewShortChanIDFromInt(chanID) if firstHop == roasbeefSongoku { - return [32]byte{}, &htlcswitch.ForwardingError{ - FailureSourceIdx: 1, - - // Within our error, we'll add a channel update - // which is meant to reflect he new fee - // schedule for the node/channel. - FailureMessage: &lnwire.FailFeeInsufficient{ + return [32]byte{}, htlcswitch.NewForwardingError( + // Within our error, we'll add a + // channel update which is meant to + // reflect the new fee schedule for the + // node/channel. + &lnwire.FailFeeInsufficient{ Update: errChanUpdate, - }, - } + }, 1, + ) } return preImage, nil @@ -644,12 +641,11 @@ func TestSendPaymentErrorNonFinalTimeLockErrors(t *testing.T) { func(firstHop lnwire.ShortChannelID) ([32]byte, error) { if firstHop == roasbeefSongoku { - return [32]byte{}, &htlcswitch.ForwardingError{ - FailureSourceIdx: 1, - FailureMessage: &lnwire.FailExpiryTooSoon{ + return [32]byte{}, htlcswitch.NewForwardingError( + &lnwire.FailExpiryTooSoon{ Update: errChanUpdate, - }, - } + }, 1, + ) } return preImage, nil @@ -698,12 +694,11 @@ func TestSendPaymentErrorNonFinalTimeLockErrors(t *testing.T) { func(firstHop lnwire.ShortChannelID) ([32]byte, error) { if firstHop == roasbeefSongoku { - return [32]byte{}, &htlcswitch.ForwardingError{ - FailureSourceIdx: 1, - FailureMessage: &lnwire.FailIncorrectCltvExpiry{ + return [32]byte{}, htlcswitch.NewForwardingError( + &lnwire.FailIncorrectCltvExpiry{ Update: errChanUpdate, - }, - } + }, 1, + ) } return preImage, nil @@ -761,20 +756,19 @@ func TestSendPaymentErrorPathPruning(t *testing.T) { // We'll first simulate an error from the first // hop to simulate the channel from songoku to // sophon not having enough capacity. - return [32]byte{}, &htlcswitch.ForwardingError{ - FailureSourceIdx: 1, - FailureMessage: &lnwire.FailTemporaryChannelFailure{}, - } + return [32]byte{}, htlcswitch.NewForwardingError( + &lnwire.FailTemporaryChannelFailure{}, + 1, + ) } // Next, we'll create an error from phan nuwen to // indicate that the sophon node is not longer online, // which should prune out the rest of the routes. if firstHop == roasbeefPhanNuwen { - return [32]byte{}, &htlcswitch.ForwardingError{ - FailureSourceIdx: 1, - FailureMessage: &lnwire.FailUnknownNextPeer{}, - } + return [32]byte{}, htlcswitch.NewForwardingError( + &lnwire.FailUnknownNextPeer{}, 1, + ) } return preImage, nil @@ -791,8 +785,30 @@ func TestSendPaymentErrorPathPruning(t *testing.T) { // The final error returned should also indicate that the peer wasn't // online (the last error we returned). - if !strings.Contains(err.Error(), "UnknownNextPeer") { - t.Fatalf("expected UnknownNextPeer instead got: %v", err) + if err != channeldb.FailureReasonNoRoute { + t.Fatalf("expected no route instead got: %v", err) + } + + // Inspect the two attempts that were made before the payment failed. + p, err := ctx.router.cfg.Control.FetchPayment(payHash) + if err != nil { + t.Fatal(err) + } + + if len(p.HTLCs) != 2 { + t.Fatalf("expected two attempts got %v", len(p.HTLCs)) + } + + // We expect the first attempt to have failed with a + // TemporaryChannelFailure, the second with UnknownNextPeer. + msg := p.HTLCs[0].Failure.Message + if _, ok := msg.(*lnwire.FailTemporaryChannelFailure); !ok { + t.Fatalf("unexpected fail message: %T", msg) + } + + msg = p.HTLCs[1].Failure.Message + if _, ok := msg.(*lnwire.FailUnknownNextPeer); !ok { + t.Fatalf("unexpected fail message: %T", msg) } ctx.router.cfg.MissionControl.(*MissionControl).ResetHistory() @@ -803,10 +819,10 @@ func TestSendPaymentErrorPathPruning(t *testing.T) { func(firstHop lnwire.ShortChannelID) ([32]byte, error) { if firstHop == roasbeefSongoku { - return [32]byte{}, &htlcswitch.ForwardingError{ - FailureSourceIdx: 1, - FailureMessage: &lnwire.FailUnknownNextPeer{}, - } + failure := htlcswitch.NewForwardingError( + &lnwire.FailUnknownNextPeer{}, 1, + ) + return [32]byte{}, failure } return preImage, nil @@ -849,10 +865,10 @@ func TestSendPaymentErrorPathPruning(t *testing.T) { // We'll first simulate an error from the first // outgoing link to simulate the channel from luo ji to // roasbeef not having enough capacity. - return [32]byte{}, &htlcswitch.ForwardingError{ - FailureSourceIdx: 1, - FailureMessage: &lnwire.FailTemporaryChannelFailure{}, - } + return [32]byte{}, htlcswitch.NewForwardingError( + &lnwire.FailTemporaryChannelFailure{}, + 1, + ) } return preImage, nil }) @@ -1267,7 +1283,7 @@ func TestAddEdgeUnknownVertexes(t *testing.T) { copy(targetPubKeyBytes[:], targetNode.SerializeCompressed()) _, err = ctx.router.FindRoute( ctx.router.selfNode.PubKeyBytes, - targetPubKeyBytes, paymentAmt, noRestrictions, nil, + targetPubKeyBytes, paymentAmt, noRestrictions, nil, nil, zpay32.DefaultFinalCLTVDelta, ) if err != nil { @@ -1310,14 +1326,14 @@ func TestAddEdgeUnknownVertexes(t *testing.T) { // updated. _, err = ctx.router.FindRoute( ctx.router.selfNode.PubKeyBytes, - targetPubKeyBytes, paymentAmt, noRestrictions, nil, + targetPubKeyBytes, paymentAmt, noRestrictions, nil, nil, zpay32.DefaultFinalCLTVDelta, ) if err != nil { t.Fatalf("unable to find any routes: %v", err) } - copy1, err := ctx.graph.FetchLightningNode(priv1.PubKey()) + copy1, err := ctx.graph.FetchLightningNode(nil, pub1) if err != nil { t.Fatalf("unable to fetch node: %v", err) } @@ -1326,7 +1342,7 @@ func TestAddEdgeUnknownVertexes(t *testing.T) { t.Fatalf("fetched node not equal to original") } - copy2, err := ctx.graph.FetchLightningNode(priv2.PubKey()) + copy2, err := ctx.graph.FetchLightningNode(nil, pub2) if err != nil { t.Fatalf("unable to fetch node: %v", err) } @@ -2166,13 +2182,11 @@ func TestFindPathFeeWeighting(t *testing.T) { // We'll now attempt a path finding attempt using this set up. Due to // the edge weighting, we should select the direct path over the 2 hop // path even though the direct path has a higher potential time lock. - path, err := findPath( - &graphParams{ - graph: ctx.graph, - }, + path, err := dbFindPath( + ctx.graph, nil, nil, noRestrictions, testPathFindingConfig, - sourceNode.PubKeyBytes, target, amt, + sourceNode.PubKeyBytes, target, amt, 0, ) if err != nil { t.Fatalf("unable to find path: %v", err) @@ -2537,9 +2551,7 @@ func TestUnknownErrorSource(t *testing.T) { // couldn't be decoded (FailureMessage is nil). if firstHop.ToUint64() == 2 { return [32]byte{}, - &htlcswitch.ForwardingError{ - FailureSourceIdx: 1, - } + htlcswitch.NewUnknownForwardingError(1) } // Otherwise the payment succeeds. @@ -2596,17 +2608,12 @@ func assertChannelsPruned(t *testing.T, graph *channeldb.ChannelGraph, } } -// TestRouterPaymentStateMachine tests that the router interacts as expected -// with the ControlTower during a payment lifecycle, such that it payment -// attempts are not sent twice to the switch, and results are handled after a -// restart. -func TestRouterPaymentStateMachine(t *testing.T) { +// TestSendToRouteStructuredError asserts that SendToRoute returns a structured +// error. +func TestSendToRouteStructuredError(t *testing.T) { t.Parallel() - const startingBlockHeight = 101 - - // Setup two simple channels such that we can mock sending along this - // route. + // Setup a three node network. chanCapSat := btcutil.Amount(100000) testChannels := []*testChannel{ symmetricTestChannel("a", "b", chanCapSat, &testChannelPolicy{ @@ -2629,605 +2636,226 @@ func TestRouterPaymentStateMachine(t *testing.T) { } defer testGraph.cleanUp() - hop1 := testGraph.aliasMap["b"] - hop2 := testGraph.aliasMap["c"] + const startingBlockHeight = 101 + + ctx, cleanUp, err := createTestCtxFromGraphInstance( + startingBlockHeight, testGraph, + ) + if err != nil { + t.Fatalf("unable to create router: %v", err) + } + defer cleanUp() + + // Set up an init channel for the control tower, such that we can make + // sure the payment is initiated correctly. + init := make(chan initArgs, 1) + ctx.router.cfg.Control.(*mockControlTower).init = init + + // Setup a route from source a to destination c. The route will be used + // in a call to SendToRoute. SendToRoute also applies channel updates, + // but it saves us from including RequestRoute in the test scope too. + const payAmt = lnwire.MilliSatoshi(10000) + hop1 := ctx.aliases["b"] + hop2 := ctx.aliases["c"] hops := []*route.Hop{ { ChannelID: 1, PubKeyBytes: hop1, + AmtToForward: payAmt, LegacyPayload: true, }, { ChannelID: 2, PubKeyBytes: hop2, + AmtToForward: payAmt, LegacyPayload: true, }, } - // We create a simple route that we will supply every time the router - // requests one. - rt, err := route.NewRouteFromHops( - lnwire.MilliSatoshi(10000), 100, testGraph.aliasMap["a"], hops, - ) + rt, err := route.NewRouteFromHops(payAmt, 100, ctx.aliases["a"], hops) if err != nil { t.Fatalf("unable to create route: %v", err) } - // A payment state machine test case consists of several ordered steps, - // that we use for driving the scenario. - type testCase struct { - // steps is a list of steps to perform during the testcase. - steps []string - - // routes is the sequence of routes we will provide to the - // router when it requests a new route. - routes []*route.Route - } - - const ( - // routerInitPayment is a test step where we expect the router - // to call the InitPayment method on the control tower. - routerInitPayment = "Router:init-payment" - - // routerRegisterAttempt is a test step where we expect the - // router to call the RegisterAttempt method on the control - // tower. - routerRegisterAttempt = "Router:register-attempt" - - // routerSuccess is a test step where we expect the router to - // call the Success method on the control tower. - routerSuccess = "Router:success" - - // routerFail is a test step where we expect the router to call - // the Fail method on the control tower. - routerFail = "Router:fail" - - // sendToSwitchSuccess is a step where we expect the router to - // call send the payment attempt to the switch, and we will - // respond with a non-error, indicating that the payment - // attempt was successfully forwarded. - sendToSwitchSuccess = "SendToSwitch:success" - - // sendToSwitchResultFailure is a step where we expect the - // router to send the payment attempt to the switch, and we - // will respond with a forwarding error. This can happen when - // forwarding fail on our local links. - sendToSwitchResultFailure = "SendToSwitch:failure" - - // getPaymentResultSuccess is a test step where we expect the - // router to call the GetPaymentResult method, and we will - // respond with a successful payment result. - getPaymentResultSuccess = "GetPaymentResult:success" - - // getPaymentResultFailure is a test step where we expect the - // router to call the GetPaymentResult method, and we will - // respond with a forwarding error. - getPaymentResultFailure = "GetPaymentResult:failure" - - // resendPayment is a test step where we manually try to resend - // the same payment, making sure the router responds with an - // error indicating that it is alreayd in flight. - resendPayment = "ResendPayment" - - // startRouter is a step where we manually start the router, - // used to test that it automatically will resume payments at - // startup. - startRouter = "StartRouter" - - // stopRouter is a test step where we manually make the router - // shut down. - stopRouter = "StopRouter" - - // paymentSuccess is a step where assert that we receive a - // successful result for the original payment made. - paymentSuccess = "PaymentSuccess" - - // paymentError is a step where assert that we receive an error - // for the original payment made. - paymentError = "PaymentError" - - // resentPaymentSuccess is a step where assert that we receive - // a successful result for a payment that was resent. - resentPaymentSuccess = "ResentPaymentSuccess" - - // resentPaymentError is a step where assert that we receive an - // error for a payment that was resent. - resentPaymentError = "ResentPaymentError" - ) + // We'll modify the SendToSwitch method so that it simulates a failed + // payment with an error originating from the first hop of the route. + // The unsigned channel update is attached to the failure message. + ctx.router.cfg.Payer.(*mockPaymentAttemptDispatcher).setPaymentResult( + func(firstHop lnwire.ShortChannelID) ([32]byte, error) { + return [32]byte{}, htlcswitch.NewForwardingError( + &lnwire.FailFeeInsufficient{ + Update: lnwire.ChannelUpdate{}, + }, 1, + ) + }) - tests := []testCase{ - { - // Tests a normal payment flow that succeeds. - steps: []string{ - routerInitPayment, - routerRegisterAttempt, - sendToSwitchSuccess, - getPaymentResultSuccess, - routerSuccess, - paymentSuccess, - }, - routes: []*route.Route{rt}, - }, - { - // A payment flow with a failure on the first attempt, - // but that succeeds on the second attempt. - steps: []string{ - routerInitPayment, - routerRegisterAttempt, - sendToSwitchSuccess, - - // Make the first sent attempt fail. - getPaymentResultFailure, - - // The router should retry. - routerRegisterAttempt, - sendToSwitchSuccess, - - // Make the second sent attempt succeed. - getPaymentResultSuccess, - routerSuccess, - paymentSuccess, - }, - routes: []*route.Route{rt, rt}, - }, - { - // A payment flow with a forwarding failure first time - // sending to the switch, but that succeeds on the - // second attempt. - steps: []string{ - routerInitPayment, - routerRegisterAttempt, - - // Make the first sent attempt fail. - sendToSwitchResultFailure, - - // The router should retry. - routerRegisterAttempt, - sendToSwitchSuccess, - - // Make the second sent attempt succeed. - getPaymentResultSuccess, - routerSuccess, - paymentSuccess, - }, - routes: []*route.Route{rt, rt}, - }, - { - // A payment that fails on the first attempt, and has - // only one route available to try. It will therefore - // fail permanently. - steps: []string{ - routerInitPayment, - routerRegisterAttempt, - sendToSwitchSuccess, - - // Make the first sent attempt fail. - getPaymentResultFailure, - - // Since there are no more routes to try, the - // payment should fail. - routerFail, - paymentError, - }, - routes: []*route.Route{rt}, - }, - { - // We expect the payment to fail immediately if we have - // no routes to try. - steps: []string{ - routerInitPayment, - routerFail, - paymentError, - }, - routes: []*route.Route{}, - }, - { - // A normal payment flow, where we attempt to resend - // the same payment after each step. This ensures that - // the router don't attempt to resend a payment already - // in flight. - steps: []string{ - routerInitPayment, - routerRegisterAttempt, - - // Manually resend the payment, the router - // should attempt to init with the control - // tower, but fail since it is already in - // flight. - resendPayment, - routerInitPayment, - resentPaymentError, - - // The original payment should proceed as - // normal. - sendToSwitchSuccess, - - // Again resend the payment and assert it's not - // allowed. - resendPayment, - routerInitPayment, - resentPaymentError, - - // Notify about a success for the original - // payment. - getPaymentResultSuccess, - routerSuccess, - - // Now that the original payment finished, - // resend it again to ensure this is not - // allowed. - resendPayment, - routerInitPayment, - resentPaymentError, - paymentSuccess, - }, - routes: []*route.Route{rt}, - }, - { - // Tests that the router is able to handle the - // receieved payment result after a restart. - steps: []string{ - routerInitPayment, - routerRegisterAttempt, - sendToSwitchSuccess, - - // Shut down the router. The original caller - // should get notified about this. - stopRouter, - paymentError, - - // Start the router again, and ensure the - // router registers the success with the - // control tower. - startRouter, - getPaymentResultSuccess, - routerSuccess, - }, - routes: []*route.Route{rt}, - }, - { - // Tests that we are allowed to resend a payment after - // it has permanently failed. - steps: []string{ - routerInitPayment, - routerRegisterAttempt, - sendToSwitchSuccess, - - // Resending the payment at this stage should - // not be allowed. - resendPayment, - routerInitPayment, - resentPaymentError, - - // Make the first attempt fail. - getPaymentResultFailure, - routerFail, - - // Since we have no more routes to try, the - // original payment should fail. - paymentError, - - // Now resend the payment again. This should be - // allowed, since the payment has failed. - resendPayment, - routerInitPayment, - routerRegisterAttempt, - sendToSwitchSuccess, - getPaymentResultSuccess, - routerSuccess, - resentPaymentSuccess, - }, - routes: []*route.Route{rt}, - }, + // The payment parameter is mostly redundant in SendToRoute. Can be left + // empty for this test. + var payment lntypes.Hash + + // Send off the payment request to the router. The specified route + // should be attempted and the channel update should be received by + // router and ignored because it is missing a valid signature. + _, err = ctx.router.SendToRoute(payment, rt) + + fErr, ok := err.(*htlcswitch.ForwardingError) + if !ok { + t.Fatalf("expected forwarding error") } - // Create a mock control tower with channels set up, that we use to - // synchronize and listen for events. - control := makeMockControlTower() - control.init = make(chan initArgs) - control.register = make(chan registerArgs) - control.success = make(chan successArgs) - control.fail = make(chan failArgs) - control.fetchInFlight = make(chan struct{}) - - quit := make(chan struct{}) - defer close(quit) - - // setupRouter is a helper method that creates and starts the router in - // the desired configuration for this test. - setupRouter := func() (*ChannelRouter, chan error, - chan *htlcswitch.PaymentResult, chan error) { - - chain := newMockChain(startingBlockHeight) - chainView := newMockChainView(chain) - - // We set uo the use the following channels and a mock Payer to - // synchonize with the interaction to the Switch. - sendResult := make(chan error) - paymentResultErr := make(chan error) - paymentResult := make(chan *htlcswitch.PaymentResult) - - payer := &mockPayer{ - sendResult: sendResult, - paymentResult: paymentResult, - paymentResultErr: paymentResultErr, - } + if _, ok := fErr.WireMessage().(*lnwire.FailFeeInsufficient); !ok { + t.Fatalf("expected fee insufficient error") + } - router, err := New(Config{ - Graph: testGraph.graph, - Chain: chain, - ChainView: chainView, - Control: control, - SessionSource: &mockPaymentSessionSource{}, - MissionControl: &mockMissionControl{}, - Payer: payer, - ChannelPruneExpiry: time.Hour * 24, - GraphPruneInterval: time.Hour * 2, - QueryBandwidth: func(e *channeldb.ChannelEdgeInfo) lnwire.MilliSatoshi { - return lnwire.NewMSatFromSatoshis(e.Capacity) - }, - NextPaymentID: func() (uint64, error) { - next := atomic.AddUint64(&uniquePaymentID, 1) - return next, nil - }, - }) - if err != nil { - t.Fatalf("unable to create router %v", err) + // Check that the correct values were used when initiating the payment. + select { + case initVal := <-init: + if initVal.c.Value != payAmt { + t.Fatalf("expected %v, got %v", payAmt, initVal.c.Value) } + case <-time.After(100 * time.Millisecond): + t.Fatalf("initPayment not called") + } +} - // On startup, the router should fetch all pending payments - // from the ControlTower, so assert that here. - didFetch := make(chan struct{}) - go func() { - select { - case <-control.fetchInFlight: - close(didFetch) - case <-time.After(1 * time.Second): - t.Fatalf("router did not fetch in flight " + - "payments") - } - }() +// TestSendToRouteMultiShardSend checks that a 3-shard payment can be executed +// using SendToRoute. +func TestSendToRouteMultiShardSend(t *testing.T) { + t.Parallel() - if err := router.Start(); err != nil { - t.Fatalf("unable to start router: %v", err) - } + ctx, cleanup, err := createTestCtxSingleNode(0) + if err != nil { + t.Fatal(err) + } + defer cleanup() - select { - case <-didFetch: - case <-time.After(1 * time.Second): - t.Fatalf("did not fetch in flight payments at startup") - } + const numShards = 3 + const payAmt = lnwire.MilliSatoshi(numShards * 10000) + node, err := createTestNode() + if err != nil { + t.Fatal(err) + } - return router, sendResult, paymentResult, paymentResultErr + // Create a simple 1-hop route that we will use for all three shards. + hops := []*route.Hop{ + { + ChannelID: 1, + PubKeyBytes: node.PubKeyBytes, + AmtToForward: payAmt / numShards, + MPP: record.NewMPP(payAmt, [32]byte{}), + }, } - router, sendResult, getPaymentResult, getPaymentResultErr := setupRouter() - defer router.Stop() + sourceNode, err := ctx.graph.SourceNode() + if err != nil { + t.Fatal(err) + } - for _, test := range tests { - // Craft a LightningPayment struct. - var preImage lntypes.Preimage - if _, err := rand.Read(preImage[:]); err != nil { - t.Fatalf("unable to generate preimage") - } + rt, err := route.NewRouteFromHops( + payAmt, 100, sourceNode.PubKeyBytes, hops, + ) + if err != nil { + t.Fatalf("unable to create route: %v", err) + } - payHash := preImage.Hash() + // The first shard we send we'll fail immediately, to check that we are + // still allowed to retry with other shards after a failed one. + ctx.router.cfg.Payer.(*mockPaymentAttemptDispatcher).setPaymentResult( + func(firstHop lnwire.ShortChannelID) ([32]byte, error) { + return [32]byte{}, htlcswitch.NewForwardingError( + &lnwire.FailFeeInsufficient{ + Update: lnwire.ChannelUpdate{}, + }, 1, + ) + }) - paymentAmt := lnwire.NewMSatFromSatoshis(1000) - payment := LightningPayment{ - Target: testGraph.aliasMap["c"], - Amount: paymentAmt, - FeeLimit: noFeeLimit, - PaymentHash: payHash, - } + // The payment parameter is mostly redundant in SendToRoute. Can be left + // empty for this test. + var payment lntypes.Hash - copy(preImage[:], bytes.Repeat([]byte{9}, 32)) + // Send the shard using the created route, and expect an error to be + // returned. + _, err = ctx.router.SendToRoute(payment, rt) + if err == nil { + t.Fatalf("expected forwarding error") + } - router.cfg.SessionSource = &mockPaymentSessionSource{ - routes: test.routes, - } + // Now we'll modify the SendToSwitch method again to wait until all + // three shards are initiated before returning a result. We do this by + // signalling when the method has been called, and then stop to wait + // for the test to deliver the final result on the channel below. + waitForResultSignal := make(chan struct{}, numShards) + results := make(chan lntypes.Preimage, numShards) + + ctx.router.cfg.Payer.(*mockPaymentAttemptDispatcher).setPaymentResult( + func(firstHop lnwire.ShortChannelID) ([32]byte, error) { + + // Signal that the shard has been initiated and is + // waiting for a result. + waitForResultSignal <- struct{}{} - router.cfg.MissionControl = &mockMissionControl{} + // Wait for a result before returning it. + res, ok := <-results + if !ok { + return [32]byte{}, fmt.Errorf("failure") + } + return res, nil + }) - // Send the payment. Since this is new payment hash, the - // information should be registered with the ControlTower. - paymentResult := make(chan error) + // Launch three shards by calling SendToRoute in three goroutines, + // returning their final error on the channel. + errChan := make(chan error) + successes := make(chan lntypes.Preimage) + + for i := 0; i < numShards; i++ { go func() { - _, _, err := router.SendPayment(&payment) - paymentResult <- err + preimg, err := ctx.router.SendToRoute(payment, rt) + if err != nil { + errChan <- err + return + } + + successes <- preimg }() + } - var resendResult chan error - for _, step := range test.steps { - switch step { - - case routerInitPayment: - var args initArgs - select { - case args = <-control.init: - case <-time.After(1 * time.Second): - t.Fatalf("no init payment with control") - } - - if args.c == nil { - t.Fatalf("expected non-nil CreationInfo") - } - - // In this step we expect the router to make a call to - // register a new attempt with the ControlTower. - case routerRegisterAttempt: - var args registerArgs - select { - case args = <-control.register: - case <-time.After(1 * time.Second): - t.Fatalf("not registered with control") - } - - if args.a == nil { - t.Fatalf("expected non-nil AttemptInfo") - } - - // In this step we expect the router to call the - // ControlTower's Succcess method with the preimage. - case routerSuccess: - select { - case _ = <-control.success: - case <-time.After(1 * time.Second): - t.Fatalf("not registered with control") - } - - // In this step we expect the router to call the - // ControlTower's Fail method, to indicate that the - // payment failed. - case routerFail: - select { - case _ = <-control.fail: - case <-time.After(1 * time.Second): - t.Fatalf("not registered with control") - } - - // In this step we expect the SendToSwitch method to be - // called, and we respond with a nil-error. - case sendToSwitchSuccess: - select { - case sendResult <- nil: - case <-time.After(1 * time.Second): - t.Fatalf("unable to send result") - } - - // In this step we expect the SendToSwitch method to be - // called, and we respond with a forwarding error - case sendToSwitchResultFailure: - select { - case sendResult <- &htlcswitch.ForwardingError{ - FailureSourceIdx: 1, - FailureMessage: &lnwire.FailTemporaryChannelFailure{}, - }: - case <-time.After(1 * time.Second): - t.Fatalf("unable to send result") - } - - // In this step we expect the GetPaymentResult method - // to be called, and we respond with the preimage to - // complete the payment. - case getPaymentResultSuccess: - select { - case getPaymentResult <- &htlcswitch.PaymentResult{ - Preimage: preImage, - }: - case <-time.After(1 * time.Second): - t.Fatalf("unable to send result") - } - - // In this state we expect the GetPaymentResult method - // to be called, and we respond with a forwarding - // error, indicating that the router should retry. - case getPaymentResultFailure: - select { - case getPaymentResult <- &htlcswitch.PaymentResult{ - Error: &htlcswitch.ForwardingError{ - FailureSourceIdx: 1, - FailureMessage: &lnwire.FailTemporaryChannelFailure{}, - }, - }: - case <-time.After(1 * time.Second): - t.Fatalf("unable to get result") - } - - // In this step we manually try to resend the same - // payment, making sure the router responds with an - // error indicating that it is alreayd in flight. - case resendPayment: - resendResult = make(chan error) - go func() { - _, _, err := router.SendPayment(&payment) - resendResult <- err - }() - - // In this step we manually stop the router. - case stopRouter: - select { - case getPaymentResultErr <- fmt.Errorf( - "shutting down"): - case <-time.After(1 * time.Second): - t.Fatalf("unable to send payment " + - "result error") - } - - if err := router.Stop(); err != nil { - t.Fatalf("unable to restart: %v", err) - } - - // In this step we manually start the router. - case startRouter: - router, sendResult, getPaymentResult, - getPaymentResultErr = setupRouter() - - // In this state we expect to receive an error for the - // original payment made. - case paymentError: - select { - case err := <-paymentResult: - if err == nil { - t.Fatalf("expected error") - } - - case <-time.After(1 * time.Second): - t.Fatalf("got no payment result") - } - - // In this state we expect the original payment to - // succeed. - case paymentSuccess: - select { - case err := <-paymentResult: - if err != nil { - t.Fatalf("did not expecte error %v", err) - } - - case <-time.After(1 * time.Second): - t.Fatalf("got no payment result") - } - - // In this state we expect to receive an error for the - // resent payment made. - case resentPaymentError: - select { - case err := <-resendResult: - if err == nil { - t.Fatalf("expected error") - } - - case <-time.After(1 * time.Second): - t.Fatalf("got no payment result") - } - - // In this state we expect the resent payment to - // succeed. - case resentPaymentSuccess: - select { - case err := <-resendResult: - if err != nil { - t.Fatalf("did not expect error %v", err) - } - - case <-time.After(1 * time.Second): - t.Fatalf("got no payment result") - } - - default: - t.Fatalf("unknown step %v", step) + // Wait for all shards to signal they have been initiated. + for i := 0; i < numShards; i++ { + select { + case <-waitForResultSignal: + case <-time.After(5 * time.Second): + t.Fatalf("not waiting for results") + } + } + + // Deliver a dummy preimage to all the shard handlers. + preimage := lntypes.Preimage{} + preimage[4] = 42 + for i := 0; i < numShards; i++ { + results <- preimage + } + + // Finally expect all shards to return with the above preimage. + for i := 0; i < numShards; i++ { + select { + case p := <-successes: + if p != preimage { + t.Fatalf("preimage mismatch") } + case err := <-errChan: + t.Fatalf("unexpected error from SendToRoute: %v", err) + case <-time.After(5 * time.Second): + t.Fatalf("result not received") } } } -// TestSendToRouteStructuredError asserts that SendToRoute returns a structured -// error. -func TestSendToRouteStructuredError(t *testing.T) { +// TestSendToRouteMaxHops asserts that SendToRoute fails when using a route that +// exceeds the maximum number of hops. +func TestSendToRouteMaxHops(t *testing.T) { t.Parallel() - // Setup a three node network. + // Setup a two node network. chanCapSat := btcutil.Amount(100000) testChannels := []*testChannel{ symmetricTestChannel("a", "b", chanCapSat, &testChannelPolicy{ @@ -3236,12 +2864,6 @@ func TestSendToRouteStructuredError(t *testing.T) { MinHTLC: 1, MaxHTLC: lnwire.NewMSatFromSatoshis(chanCapSat), }, 1), - symmetricTestChannel("b", "c", chanCapSat, &testChannelPolicy{ - Expiry: 144, - FeeRate: 400, - MinHTLC: 1, - MaxHTLC: lnwire.NewMSatFromSatoshis(chanCapSat), - }, 2), } testGraph, err := createTestGraphFromChannels(testChannels, "a") @@ -3260,30 +2882,26 @@ func TestSendToRouteStructuredError(t *testing.T) { } defer cleanUp() - // Set up an init channel for the control tower, such that we can make - // sure the payment is initiated correctly. - init := make(chan initArgs, 1) - ctx.router.cfg.Control.(*mockControlTower).init = init - - // Setup a route from source a to destination c. The route will be used - // in a call to SendToRoute. SendToRoute also applies channel updates, - // but it saves us from including RequestRoute in the test scope too. + // Create a 30 hop route that exceeds the maximum hop limit. const payAmt = lnwire.MilliSatoshi(10000) - hop1 := ctx.aliases["b"] - hop2 := ctx.aliases["c"] - hops := []*route.Hop{ - { + hopA := ctx.aliases["a"] + hopB := ctx.aliases["b"] + + var hops []*route.Hop + for i := 0; i < 15; i++ { + hops = append(hops, &route.Hop{ ChannelID: 1, - PubKeyBytes: hop1, + PubKeyBytes: hopB, AmtToForward: payAmt, LegacyPayload: true, - }, - { - ChannelID: 2, - PubKeyBytes: hop2, + }) + + hops = append(hops, &route.Hop{ + ChannelID: 1, + PubKeyBytes: hopA, AmtToForward: payAmt, LegacyPayload: true, - }, + }) } rt, err := route.NewRouteFromHops(payAmt, 100, ctx.aliases["a"], hops) @@ -3291,45 +2909,12 @@ func TestSendToRouteStructuredError(t *testing.T) { t.Fatalf("unable to create route: %v", err) } - // We'll modify the SendToSwitch method so that it simulates a failed - // payment with an error originating from the first hop of the route. - // The unsigned channel update is attached to the failure message. - ctx.router.cfg.Payer.(*mockPaymentAttemptDispatcher).setPaymentResult( - func(firstHop lnwire.ShortChannelID) ([32]byte, error) { - return [32]byte{}, &htlcswitch.ForwardingError{ - FailureSourceIdx: 1, - FailureMessage: &lnwire.FailFeeInsufficient{ - Update: lnwire.ChannelUpdate{}, - }, - } - }) - - // The payment parameter is mostly redundant in SendToRoute. Can be left - // empty for this test. + // Send off the payment request to the router. We expect an error back + // indicating that the route is too long. var payment lntypes.Hash - - // Send off the payment request to the router. The specified route - // should be attempted and the channel update should be received by - // router and ignored because it is missing a valid signature. _, err = ctx.router.SendToRoute(payment, rt) - - fErr, ok := err.(*htlcswitch.ForwardingError) - if !ok { - t.Fatalf("expected forwarding error") - } - - if _, ok := fErr.FailureMessage.(*lnwire.FailFeeInsufficient); !ok { - t.Fatalf("expected fee insufficient error") - } - - // Check that the correct values were used when initiating the payment. - select { - case initVal := <-init: - if initVal.c.Value != payAmt { - t.Fatalf("expected %v, got %v", payAmt, initVal.c.Value) - } - case <-time.After(100 * time.Millisecond): - t.Fatalf("initPayment not called") + if err != route.ErrMaxRouteHopsExceeded { + t.Fatalf("expected ErrMaxRouteHopsExceeded, but got %v", err) } } @@ -3404,6 +2989,8 @@ func TestBuildRoute(t *testing.T) { defer cleanUp() checkHops := func(rt *route.Route, expected []uint64) { + t.Helper() + if len(rt.Hops) != len(expected) { t.Fatal("hop count mismatch") } @@ -3431,10 +3018,10 @@ func TestBuildRoute(t *testing.T) { } // Check that we get the expected route back. The total amount should be - // the amount to deliver to hop c (100 sats) plus the fee for hop b (5 - // sats). - checkHops(rt, []uint64{1, 2}) - if rt.TotalAmount != 105000 { + // the amount to deliver to hop c (100 sats) plus the max fee for the + // connection b->c (6 sats). + checkHops(rt, []uint64{1, 7}) + if rt.TotalAmount != 106000 { t.Fatalf("unexpected total amount %v", rt.TotalAmount) } @@ -3447,11 +3034,11 @@ func TestBuildRoute(t *testing.T) { } // Check that we get the expected route back. The minimum that we can - // send from b to c is 20 sats. Hop b charges 1 sat for the forwarding. - // The channel between hop a and b can carry amounts in the range [5, - // 100], so 21 sats is the minimum amount for this route. - checkHops(rt, []uint64{1, 2}) - if rt.TotalAmount != 21000 { + // send from b to c is 20 sats. Hop b charges 1200 msat for the + // forwarding. The channel between hop a and b can carry amounts in the + // range [5, 100], so 21200 msats is the minimum amount for this route. + checkHops(rt, []uint64{1, 7}) + if rt.TotalAmount != 21200 { t.Fatalf("unexpected total amount %v", rt.TotalAmount) } diff --git a/routing/testdata/excessive_hops.json b/routing/testdata/excessive_hops.json deleted file mode 100644 index 3094f1b47c..0000000000 --- a/routing/testdata/excessive_hops.json +++ /dev/null @@ -1,410 +0,0 @@ -{ - "nodes": [ - { - "source": true, - "pubkey": "021b96642e723592ee0b095983fe3a26c8b40b8926968d8b7510e51c9429d4562c", - "alias": "alice" - }, - { - "source": false, - "pubkey": "022096b2b0ac083e708074a5ab57288bc821b6bef7b964185b307e073772c3748f", - "alias": "bob" - }, - { - "source": false, - "pubkey": "022a190ce901ab2b6f349483f18b28a1d72c64a7bccb8057291f25784c0899840f", - "alias": "carol" - }, - { - "source": false, - "pubkey": "022d855d09971dd047b7ecf929b23c6f147b568d4668af67fb2226eb8c15c4660d", - "alias": "dave" - }, - { - "source": false, - "pubkey": "024ca436834b0d38d9dc7ee4d95aa21db321c45598dc5921a4a52304a8e0dd2952", - "alias": "eve" - }, - { - "source": false, - "pubkey": "025234a0c44cbf1b20c18e2c397107ad731376831e1c43ddb360b41dbb98c10266", - "alias": "fez" - }, - { - "source": false, - "pubkey": "0253e9d03030f2ff08d3a7f1d824ad6d8c0dae422f324e72d5bb313e3f2a2d45a8", - "alias": "gabby" - }, - { - "source": false, - "pubkey": "0263d4f2baca258ff3bd5bce86c2754e95daaea27f268ae1a048c1253ff20de56e", - "alias": "harold" - }, - { - "source": false, - "pubkey": "02650db8e44302f75e265e9427264bc0d7e2337831d6b9ceb7c58ed1e725d4576a", - "alias": "inez" - }, - { - "source": false, - "pubkey": "02727bfd298aa055a6419404931dfc1ccb4f0eb7c9660a7df346b93d0025df3ba1", - "alias": "jake" - }, - { - "source": false, - "pubkey": "0280c83b3eded413dcec12f7952410e2738f079bd9cbc9a7c462e32ed4d74bd5b7", - "alias": "karen" - }, - { - "source": false, - "pubkey": "0290bf454f4b95baf9227801301b331e35d477c6b6e7f36a599983ae58747b3828", - "alias": "liam" - }, - { - "source": false, - "pubkey": "0297c8de635d17e3dd5775edfa2797be0874c53b0026f69009787cecd2fa577de8", - "alias": "maggie" - }, - { - "source": false, - "pubkey": "02a27227113c71eab0c8609ac0cdc7e76791fc3163c16e643cb4658d1080c7e336", - "alias": "nick" - }, - { - "source": false, - "pubkey": "02f5f6bb6373fc60528118003f803557b916fbecd90c3a0c5df4c86c6a6e962fd1", - "alias": "ophelia" - }, - { - "source": false, - "pubkey": "02fd7a5f04d550cf0ba8af6053a20e0080d956f41b1221357a35fab3a363e5f78e", - "alias": "patrick" - }, - { - "source": false, - "pubkey": "030da942ed7cfc7d3096811b3264e15115778e692eaacb2b7a76fb27a58cbb5359", - "alias": "quinn" - }, - { - "source": false, - "pubkey": "0319d6b038e26ac89802e856d7e78f293e9d109c414614f98e3fa5c626f20934be", - "alias": "rick" - }, - { - "source": false, - "pubkey": "03384439e78e87d168fecabe8d88218dfd5983c5e14fd8fa6dc89caeb3cc0fb171", - "alias": "sarah" - }, - { - "source": false, - "pubkey": "0362002b8fbc1a799c839c8bcea43fce38a147467a00bc450414bbeab5c7a19efe", - "alias": "tim" - }, - { - "source": false, - "pubkey": "0369bca64993fce966745d32c09b882f668958d9bd7aabb60ba35ef1884013be1d", - "alias": "ursula" - }, - { - "source": false, - "pubkey": "0367cec75158a4129177bfb8b269cb586efe93d751b43800d456485e81c2620ca6", - "alias": "vincent" - } - ], - "edges": [ - { - "node_1": "021b96642e723592ee0b095983fe3a26c8b40b8926968d8b7510e51c9429d4562c", - "node_2": "022096b2b0ac083e708074a5ab57288bc821b6bef7b964185b307e073772c3748f", - "channel_id": 12345, - "channel_point": "99dc56859c6a082d15ba1a7f6cb6be3fea62e1746e2cb8497b1189155c21a233:0", - "channel_flags": 0, - "message_flags": 1, - "expiry": 1, - "min_htlc": 1, - "max_htlc": 100000000, - "fee_base_msat": 10, - "fee_rate": 0.001, - "capacity": 100000 - }, - { - "node_1": "022096b2b0ac083e708074a5ab57288bc821b6bef7b964185b307e073772c3748f", - "node_2": "022a190ce901ab2b6f349483f18b28a1d72c64a7bccb8057291f25784c0899840f", - "channel_id": 12346, - "channel_point": "79dc56859c6a082d15ba1a7f6cb6be3fea62e1746e2cb8497b1189155c21a233:0", - "channel_flags": 0, - "message_flags": 1, - "expiry": 1, - "min_htlc": 1, - "max_htlc": 100000000, - "fee_base_msat": 10, - "fee_rate": 0.001, - "capacity": 100000 - }, - { - "node_1": "022a190ce901ab2b6f349483f18b28a1d72c64a7bccb8057291f25784c0899840f", - "node_2": "022d855d09971dd047b7ecf929b23c6f147b568d4668af67fb2226eb8c15c4660d", - "channel_id": 12347, - "channel_point": "69dc56859c6a082d15ba1a7f6cb6be3fea62e1746e2cb8497b1189155c21a233:0", - "channel_flags": 0, - "message_flags": 1, - "expiry": 1, - "min_htlc": 1, - "max_htlc": 100000000, - "fee_base_msat": 10, - "fee_rate": 0.001, - "capacity": 100000 - }, - { - "node_1": "022d855d09971dd047b7ecf929b23c6f147b568d4668af67fb2226eb8c15c4660d", - "node_2": "024ca436834b0d38d9dc7ee4d95aa21db321c45598dc5921a4a52304a8e0dd2952", - "channel_id": 12348, - "channel_point": "59dc56859c6a082d15ba1a7f6cb6be3fea62e1746e2cb8497b1189155c21a233:0", - "channel_flags": 0, - "message_flags": 1, - "expiry": 1, - "min_htlc": 1, - "max_htlc": 100000000, - "fee_base_msat": 10, - "fee_rate": 0.001, - "capacity": 100000 - }, - { - "node_1": "024ca436834b0d38d9dc7ee4d95aa21db321c45598dc5921a4a52304a8e0dd2952", - "node_2": "025234a0c44cbf1b20c18e2c397107ad731376831e1c43ddb360b41dbb98c10266", - "channel_id": 12349, - "channel_point": "49dc56859c6a082d15ba1a7f6cb6be3fea62e1746e2cb8497b1189155c21a233:0", - "channel_flags": 0, - "message_flags": 1, - "expiry": 1, - "min_htlc": 1, - "max_htlc": 100000000, - "fee_base_msat": 10, - "fee_rate": 0.001, - "capacity": 100000 - }, - { - "node_1": "025234a0c44cbf1b20c18e2c397107ad731376831e1c43ddb360b41dbb98c10266", - "node_2": "0253e9d03030f2ff08d3a7f1d824ad6d8c0dae422f324e72d5bb313e3f2a2d45a8", - "channel_id": 12340, - "channel_point": "39dc56859c6a082d15ba1a7f6cb6be3fea62e1746e2cb8497b1189155c21a233:0", - "channel_flags": 0, - "message_flags": 1, - "expiry": 1, - "min_htlc": 1, - "max_htlc": 100000000, - "fee_base_msat": 10, - "fee_rate": 0.001, - "capacity": 100000 - }, - { - "node_1": "0253e9d03030f2ff08d3a7f1d824ad6d8c0dae422f324e72d5bb313e3f2a2d45a8", - "node_2": "0263d4f2baca258ff3bd5bce86c2754e95daaea27f268ae1a048c1253ff20de56e", - "channel_id": 12344, - "channel_point": "29dc56859c6a082d15ba1a7f6cb6be3fea62e1746e2cb8497b1189155c21a233:0", - "channel_flags": 0, - "message_flags": 1, - "expiry": 1, - "min_htlc": 1, - "max_htlc": 100000000, - "fee_base_msat": 10, - "fee_rate": 0.001, - "capacity": 100000 - }, - { - "node_1": "0263d4f2baca258ff3bd5bce86c2754e95daaea27f268ae1a048c1253ff20de56e", - "node_2": "02650db8e44302f75e265e9427264bc0d7e2337831d6b9ceb7c58ed1e725d4576a", - "channel_id": 12343, - "channel_point": "19dc56859c6a082d15ba1a7f6cb6be3fea62e1746e2cb8497b1189155c21a233:0", - "channel_flags": 0, - "message_flags": 1, - "expiry": 1, - "min_htlc": 1, - "max_htlc": 100000000, - "fee_base_msat": 10, - "fee_rate": 0.001, - "capacity": 100000 - }, - { - "node_1": "02650db8e44302f75e265e9427264bc0d7e2337831d6b9ceb7c58ed1e725d4576a", - "node_2": "02727bfd298aa055a6419404931dfc1ccb4f0eb7c9660a7df346b93d0025df3ba1", - "channel_id": 12342, - "channel_point": "88dc56859c6a082d15ba1a7f6cb6be3fea62e1746e2cb8497b1189155c21a233:0", - "channel_flags": 0, - "message_flags": 1, - "expiry": 1, - "min_htlc": 1, - "max_htlc": 100000000, - "fee_base_msat": 10, - "fee_rate": 0.001, - "capacity": 100000 - }, - { - "node_1": "02727bfd298aa055a6419404931dfc1ccb4f0eb7c9660a7df346b93d0025df3ba1", - "node_2": "0280c83b3eded413dcec12f7952410e2738f079bd9cbc9a7c462e32ed4d74bd5b7", - "channel_id": 12341, - "channel_point": "87dc56859c6a082d15ba1a7f6cb6be3fea62e1746e2cb8497b1189155c21a233:0", - "channel_flags": 0, - "message_flags": 1, - "expiry": 1, - "min_htlc": 1, - "max_htlc": 100000000, - "fee_base_msat": 10, - "fee_rate": 0.001, - "capacity": 100000 - }, - { - "node_1": "0280c83b3eded413dcec12f7952410e2738f079bd9cbc9a7c462e32ed4d74bd5b7", - "node_2": "0290bf454f4b95baf9227801301b331e35d477c6b6e7f36a599983ae58747b3828", - "channel_id": 12355, - "channel_point": "86dc56859c6a082d15ba1a7f6cb6be3fea62e1746e2cb8497b1189155c21a233:0", - "channel_flags": 0, - "message_flags": 1, - "expiry": 1, - "min_htlc": 1, - "max_htlc": 100000000, - "fee_base_msat": 10, - "fee_rate": 0.001, - "capacity": 100000 - }, - { - "node_1": "0290bf454f4b95baf9227801301b331e35d477c6b6e7f36a599983ae58747b3828", - "node_2": "0297c8de635d17e3dd5775edfa2797be0874c53b0026f69009787cecd2fa577de8", - "channel_id": 12365, - "channel_point": "85dc56859c6a082d15ba1a7f6cb6be3fea62e1746e2cb8497b1189155c21a233:0", - "channel_flags": 0, - "message_flags": 1, - "expiry": 1, - "min_htlc": 1, - "max_htlc": 100000000, - "fee_base_msat": 10, - "fee_rate": 0.001, - "capacity": 100000 - }, - { - "node_1": "0297c8de635d17e3dd5775edfa2797be0874c53b0026f69009787cecd2fa577de8", - "node_2": "02a27227113c71eab0c8609ac0cdc7e76791fc3163c16e643cb4658d1080c7e336", - "channel_id": 12375, - "channel_point": "84dc56859c6a082d15ba1a7f6cb6be3fea62e1746e2cb8497b1189155c21a233:0", - "channel_flags": 0, - "message_flags": 1, - "expiry": 1, - "min_htlc": 1, - "max_htlc": 100000000, - "fee_base_msat": 10, - "fee_rate": 0.001, - "capacity": 100000 - }, - { - "node_1": "02a27227113c71eab0c8609ac0cdc7e76791fc3163c16e643cb4658d1080c7e336", - "node_2": "02f5f6bb6373fc60528118003f803557b916fbecd90c3a0c5df4c86c6a6e962fd1", - "channel_id": 12385, - "channel_point": "83dc56859c6a082d15ba1a7f6cb6be3fea62e1746e2cb8497b1189155c21a233:0", - "channel_flags": 0, - "message_flags": 1, - "expiry": 1, - "min_htlc": 1, - "max_htlc": 100000000, - "fee_base_msat": 10, - "fee_rate": 0.001, - "capacity": 100000 - }, - { - "node_1": "02f5f6bb6373fc60528118003f803557b916fbecd90c3a0c5df4c86c6a6e962fd1", - "node_2": "02fd7a5f04d550cf0ba8af6053a20e0080d956f41b1221357a35fab3a363e5f78e", - "channel_id": 12395, - "channel_point": "82dc56859c6a082d15ba1a7f6cb6be3fea62e1746e2cb8497b1189155c21a233:0", - "channel_flags": 0, - "message_flags": 1, - "expiry": 1, - "min_htlc": 1, - "max_htlc": 100000000, - "fee_base_msat": 10, - "fee_rate": 0.001, - "capacity": 100000 - }, - { - "node_1": "02fd7a5f04d550cf0ba8af6053a20e0080d956f41b1221357a35fab3a363e5f78e", - "node_2": "030da942ed7cfc7d3096811b3264e15115778e692eaacb2b7a76fb27a58cbb5359", - "channel_id": 12305, - "channel_point": "81dc56859c6a082d15ba1a7f6cb6be3fea62e1746e2cb8497b1189155c21a233:0", - "channel_flags": 0, - "message_flags": 1, - "expiry": 1, - "min_htlc": 1, - "max_htlc": 100000000, - "fee_base_msat": 10, - "fee_rate": 0.001, - "capacity": 100000 - }, - { - "node_1": "030da942ed7cfc7d3096811b3264e15115778e692eaacb2b7a76fb27a58cbb5359", - "node_2": "0319d6b038e26ac89802e856d7e78f293e9d109c414614f98e3fa5c626f20934be", - "channel_id": 12335, - "channel_point": "80dc56859c6a082d15ba1a7f6cb6be3fea62e1746e2cb8497b1189155c21a233:0", - "channel_flags": 0, - "message_flags": 1, - "expiry": 1, - "min_htlc": 1, - "max_htlc": 100000000, - "fee_base_msat": 10, - "fee_rate": 0.001, - "capacity": 100000 - }, - { - "node_1": "0319d6b038e26ac89802e856d7e78f293e9d109c414614f98e3fa5c626f20934be", - "node_2": "03384439e78e87d168fecabe8d88218dfd5983c5e14fd8fa6dc89caeb3cc0fb171", - "channel_id": 12325, - "channel_point": "89ec56859c6a082d15ba1a7f6cb6be3fea62e1746e2cb8497b1189155c21a233:0", - "channel_flags": 0, - "message_flags": 1, - "expiry": 1, - "min_htlc": 1, - "max_htlc": 100000000, - "fee_base_msat": 10, - "fee_rate": 0.001, - "capacity": 100000 - }, - { - "node_1": "03384439e78e87d168fecabe8d88218dfd5983c5e14fd8fa6dc89caeb3cc0fb171", - "node_2": "0362002b8fbc1a799c839c8bcea43fce38a147467a00bc450414bbeab5c7a19efe", - "channel_id": 12315, - "channel_point": "89fc56859c6a082d15ba1a7f6cb6be3fea62e1746e2cb8497b1189155c21a233:0", - "channel_flags": 0, - "message_flags": 1, - "expiry": 1, - "min_htlc": 1, - "max_htlc": 100000000, - "fee_base_msat": 10, - "fee_rate": 0.001, - "capacity": 100000 - }, - { - "node_1": "0362002b8fbc1a799c839c8bcea43fce38a147467a00bc450414bbeab5c7a19efe", - "node_2": "0369bca64993fce966745d32c09b882f668958d9bd7aabb60ba35ef1884013be1d", - "channel_id": 12445, - "channel_point": "89cc56859c6a082d15ba1a7f6cb6be3fea62e1746e2cb8497b1189155c21a233:0", - "channel_flags": 0, - "message_flags": 1, - "expiry": 1, - "min_htlc": 1, - "max_htlc": 100000000, - "fee_base_msat": 10, - "fee_rate": 0.001, - "capacity": 100000 - }, - { - "node_1": "0369bca64993fce966745d32c09b882f668958d9bd7aabb60ba35ef1884013be1d", - "node_2": "0367cec75158a4129177bfb8b269cb586efe93d751b43800d456485e81c2620ca6", - "channel_id": 12545, - "channel_point": "89bc56859c6a082d15ba1a7f6cb6be3fea62e1746e2cb8497b1189155c21a233:0", - "channel_flags": 0, - "message_flags": 1, - "expiry": 1, - "min_htlc": 1, - "max_htlc": 100000000, - "fee_base_msat": 10, - "fee_rate": 0.001, - "capacity": 100000 - } - ] -} diff --git a/routing/unified_policies.go b/routing/unified_policies.go new file mode 100644 index 0000000000..3759175a6e --- /dev/null +++ b/routing/unified_policies.go @@ -0,0 +1,278 @@ +package routing + +import ( + "github.com/btcsuite/btcutil" + "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/routing/route" +) + +// unifiedPolicies holds all unified policies for connections towards a node. +type unifiedPolicies struct { + // policies contains a unified policy for every from node. + policies map[route.Vertex]*unifiedPolicy + + // sourceNode is the sender of a payment. The rules to pick the final + // policy are different for local channels. + sourceNode route.Vertex + + // toNode is the node for which the unified policies are instantiated. + toNode route.Vertex + + // outChanRestr is an optional outgoing channel restriction for the + // local channel to use. + outChanRestr *uint64 +} + +// newUnifiedPolicies instantiates a new unifiedPolicies object. Channel +// policies can be added to this object. +func newUnifiedPolicies(sourceNode, toNode route.Vertex, + outChanRestr *uint64) *unifiedPolicies { + + return &unifiedPolicies{ + policies: make(map[route.Vertex]*unifiedPolicy), + toNode: toNode, + sourceNode: sourceNode, + outChanRestr: outChanRestr, + } +} + +// addPolicy adds a single channel policy. Capacity may be zero if unknown +// (light clients). +func (u *unifiedPolicies) addPolicy(fromNode route.Vertex, + edge *channeldb.ChannelEdgePolicy, capacity btcutil.Amount) { + + localChan := fromNode == u.sourceNode + + // Skip channels if there is an outgoing channel restriction. + if localChan && u.outChanRestr != nil && + *u.outChanRestr != edge.ChannelID { + + return + } + + // Update the policies map. + policy, ok := u.policies[fromNode] + if !ok { + policy = &unifiedPolicy{ + localChan: localChan, + } + u.policies[fromNode] = policy + } + + policy.edges = append(policy.edges, &unifiedPolicyEdge{ + policy: edge, + capacity: capacity, + }) +} + +// addGraphPolicies adds all policies that are known for the toNode in the +// graph. +func (u *unifiedPolicies) addGraphPolicies(g routingGraph) error { + cb := func(edgeInfo *channeldb.ChannelEdgeInfo, _, + inEdge *channeldb.ChannelEdgePolicy) error { + + // If there is no edge policy for this candidate node, skip. + // Note that we are searching backwards so this node would have + // come prior to the pivot node in the route. + if inEdge == nil { + return nil + } + + // The node on the other end of this channel is the from node. + fromNode, err := edgeInfo.OtherNodeKeyBytes(u.toNode[:]) + if err != nil { + return err + } + + // Add this policy to the unified policies map. + u.addPolicy(fromNode, inEdge, edgeInfo.Capacity) + + return nil + } + + // Iterate over all channels of the to node. + return g.forEachNodeChannel(u.toNode, cb) +} + +// unifiedPolicyEdge is the individual channel data that is kept inside an +// unifiedPolicy object. +type unifiedPolicyEdge struct { + policy *channeldb.ChannelEdgePolicy + capacity btcutil.Amount +} + +// amtInRange checks whether an amount falls within the valid range for a +// channel. +func (u *unifiedPolicyEdge) amtInRange(amt lnwire.MilliSatoshi) bool { + // If the capacity is available (non-light clients), skip channels that + // are too small. + if u.capacity > 0 && + amt > lnwire.NewMSatFromSatoshis(u.capacity) { + + return false + } + + // Skip channels for which this htlc is too large. + if u.policy.MessageFlags.HasMaxHtlc() && + amt > u.policy.MaxHTLC { + + return false + } + + // Skip channels for which this htlc is too small. + if amt < u.policy.MinHTLC { + return false + } + + return true +} + +// unifiedPolicy is the unified policy that covers all channels between a pair +// of nodes. +type unifiedPolicy struct { + edges []*unifiedPolicyEdge + localChan bool +} + +// getPolicy returns the optimal policy to use for this connection given a +// specific amount to send. It differentiates between local and network +// channels. +func (u *unifiedPolicy) getPolicy(amt lnwire.MilliSatoshi, + bandwidthHints map[uint64]lnwire.MilliSatoshi) *channeldb.ChannelEdgePolicy { + + if u.localChan { + return u.getPolicyLocal(amt, bandwidthHints) + } + + return u.getPolicyNetwork(amt) +} + +// getPolicyLocal returns the optimal policy to use for this local connection +// given a specific amount to send. +func (u *unifiedPolicy) getPolicyLocal(amt lnwire.MilliSatoshi, + bandwidthHints map[uint64]lnwire.MilliSatoshi) *channeldb.ChannelEdgePolicy { + + var ( + bestPolicy *channeldb.ChannelEdgePolicy + maxBandwidth lnwire.MilliSatoshi + ) + + for _, edge := range u.edges { + // Check valid amount range for the channel. + if !edge.amtInRange(amt) { + continue + } + + // For local channels, there is no fee to pay or an extra time + // lock. We only consider the currently available bandwidth for + // channel selection. The disabled flag is ignored for local + // channels. + + // Retrieve bandwidth for this local channel. If not + // available, assume this channel has enough bandwidth. + // + // TODO(joostjager): Possibly change to skipping this + // channel. The bandwidth hint is expected to be + // available. + bandwidth, ok := bandwidthHints[edge.policy.ChannelID] + if !ok { + bandwidth = lnwire.MaxMilliSatoshi + } + + // Skip channels that can't carry the payment. + if amt > bandwidth { + continue + } + + // We pick the local channel with the highest available + // bandwidth, to maximize the success probability. It + // can be that the channel state changes between + // querying the bandwidth hints and sending out the + // htlc. + if bandwidth < maxBandwidth { + continue + } + maxBandwidth = bandwidth + + // Update best policy. + bestPolicy = edge.policy + } + + return bestPolicy +} + +// getPolicyNetwork returns the optimal policy to use for this connection given +// a specific amount to send. The goal is to return a policy that maximizes the +// probability of a successful forward in a non-strict forwarding context. +func (u *unifiedPolicy) getPolicyNetwork( + amt lnwire.MilliSatoshi) *channeldb.ChannelEdgePolicy { + + var ( + bestPolicy *channeldb.ChannelEdgePolicy + maxFee lnwire.MilliSatoshi + maxTimelock uint16 + ) + + for _, edge := range u.edges { + // Check valid amount range for the channel. + if !edge.amtInRange(amt) { + continue + } + + // For network channels, skip the disabled ones. + edgeFlags := edge.policy.ChannelFlags + isDisabled := edgeFlags&lnwire.ChanUpdateDisabled != 0 + if isDisabled { + continue + } + + // Track the maximum time lock of all channels that are + // candidate for non-strict forwarding at the routing node. + if edge.policy.TimeLockDelta > maxTimelock { + maxTimelock = edge.policy.TimeLockDelta + } + + // Use the policy that results in the highest fee for this + // specific amount. + fee := edge.policy.ComputeFee(amt) + if fee < maxFee { + continue + } + maxFee = fee + + bestPolicy = edge.policy + } + + // Return early if no channel matches. + if bestPolicy == nil { + return nil + } + + // We have already picked the highest fee that could be required for + // non-strict forwarding. To also cover the case where a lower fee + // channel requires a longer time lock, we modify the policy by setting + // the maximum encountered time lock. Note that this results in a + // synthetic policy that is not actually present on the routing node. + // + // The reason we do this, is that we try to maximize the chance that we + // get forwarded. Because we penalize pair-wise, there won't be a second + // chance for this node pair. But this is all only needed for nodes that + // have distinct policies for channels to the same peer. + modifiedPolicy := *bestPolicy + modifiedPolicy.TimeLockDelta = maxTimelock + + return &modifiedPolicy +} + +// minAmt returns the minimum amount that can be forwarded on this connection. +func (u *unifiedPolicy) minAmt() lnwire.MilliSatoshi { + min := lnwire.MaxMilliSatoshi + for _, edge := range u.edges { + if edge.policy.MinHTLC < min { + min = edge.policy.MinHTLC + } + } + + return min +} diff --git a/routing/unified_policies_test.go b/routing/unified_policies_test.go new file mode 100644 index 0000000000..e89a3cb122 --- /dev/null +++ b/routing/unified_policies_test.go @@ -0,0 +1,91 @@ +package routing + +import ( + "testing" + + "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/routing/route" +) + +// TestUnifiedPolicies tests the composition of unified policies for nodes that +// have multiple channels between them. +func TestUnifiedPolicies(t *testing.T) { + source := route.Vertex{1} + toNode := route.Vertex{2} + fromNode := route.Vertex{3} + + bandwidthHints := map[uint64]lnwire.MilliSatoshi{} + + u := newUnifiedPolicies(source, toNode, nil) + + // Add two channels between the pair of nodes. + p1 := channeldb.ChannelEdgePolicy{ + FeeProportionalMillionths: 100000, + FeeBaseMSat: 30, + TimeLockDelta: 60, + MessageFlags: lnwire.ChanUpdateOptionMaxHtlc, + MaxHTLC: 500, + MinHTLC: 100, + } + p2 := channeldb.ChannelEdgePolicy{ + FeeProportionalMillionths: 190000, + FeeBaseMSat: 10, + TimeLockDelta: 40, + MessageFlags: lnwire.ChanUpdateOptionMaxHtlc, + MaxHTLC: 400, + MinHTLC: 100, + } + u.addPolicy(fromNode, &p1, 7) + u.addPolicy(fromNode, &p2, 7) + + checkPolicy := func(policy *channeldb.ChannelEdgePolicy, + feeBase lnwire.MilliSatoshi, feeRate lnwire.MilliSatoshi, + timeLockDelta uint16) { + + t.Helper() + + if policy.FeeBaseMSat != feeBase { + t.Fatalf("expected fee base %v, got %v", + feeBase, policy.FeeBaseMSat) + } + + if policy.TimeLockDelta != timeLockDelta { + t.Fatalf("expected fee base %v, got %v", + timeLockDelta, policy.TimeLockDelta) + } + + if policy.FeeProportionalMillionths != feeRate { + t.Fatalf("expected fee rate %v, got %v", + feeRate, policy.FeeProportionalMillionths) + } + } + + policy := u.policies[fromNode].getPolicy(50, bandwidthHints) + if policy != nil { + t.Fatal("expected no policy for amt below min htlc") + } + + policy = u.policies[fromNode].getPolicy(550, bandwidthHints) + if policy != nil { + t.Fatal("expected no policy for amt above max htlc") + } + + // For 200 sat, p1 yields the highest fee. Use that policy to forward, + // because it will also match p2 in case p1 does not have enough + // balance. + policy = u.policies[fromNode].getPolicy(200, bandwidthHints) + checkPolicy( + policy, p1.FeeBaseMSat, p1.FeeProportionalMillionths, + p1.TimeLockDelta, + ) + + // For 400 sat, p2 yields the highest fee. Use that policy to forward, + // because it will also match p1 in case p2 does not have enough + // balance. In order to match p1, it needs to have p1's time lock delta. + policy = u.policies[fromNode].getPolicy(400, bandwidthHints) + checkPolicy( + policy, p2.FeeBaseMSat, p2.FeeProportionalMillionths, + p1.TimeLockDelta, + ) +} diff --git a/rpcserver.go b/rpcserver.go index 437735db87..2f824de89c 100644 --- a/rpcserver.go +++ b/rpcserver.go @@ -9,51 +9,59 @@ import ( "fmt" "io" "math" - "net" "net/http" + "runtime" "sort" "strings" "sync" "sync/atomic" "time" - "github.com/lightningnetwork/lnd/chanacceptor" - "github.com/lightningnetwork/lnd/lnrpc/routerrpc" - "github.com/lightningnetwork/lnd/routing/route" - "github.com/lightningnetwork/lnd/tlv" - "github.com/lightningnetwork/lnd/watchtower" - "github.com/btcsuite/btcd/blockchain" "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcutil" + "github.com/btcsuite/btcutil/psbt" "github.com/btcsuite/btcwallet/wallet/txauthor" - "github.com/coreos/bbolt" "github.com/davecgh/go-spew/spew" grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" proxy "github.com/grpc-ecosystem/grpc-gateway/runtime" "github.com/lightningnetwork/lnd/autopilot" "github.com/lightningnetwork/lnd/build" + "github.com/lightningnetwork/lnd/chanacceptor" "github.com/lightningnetwork/lnd/chanbackup" + "github.com/lightningnetwork/lnd/chanfitness" "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/channelnotifier" + "github.com/lightningnetwork/lnd/contractcourt" "github.com/lightningnetwork/lnd/discovery" + "github.com/lightningnetwork/lnd/feature" "github.com/lightningnetwork/lnd/htlcswitch" "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/invoices" + "github.com/lightningnetwork/lnd/keychain" "github.com/lightningnetwork/lnd/lncfg" "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lnrpc/invoicesrpc" + "github.com/lightningnetwork/lnd/lnrpc/routerrpc" "github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lnwallet" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" + "github.com/lightningnetwork/lnd/lnwallet/chanfunding" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/macaroons" "github.com/lightningnetwork/lnd/monitoring" + "github.com/lightningnetwork/lnd/peernotifier" + "github.com/lightningnetwork/lnd/record" "github.com/lightningnetwork/lnd/routing" + "github.com/lightningnetwork/lnd/routing/route" "github.com/lightningnetwork/lnd/signal" "github.com/lightningnetwork/lnd/sweep" + "github.com/lightningnetwork/lnd/watchtower" "github.com/lightningnetwork/lnd/zpay32" "github.com/tv42/zbase32" "google.golang.org/grpc" @@ -114,6 +122,10 @@ var ( Entity: "invoices", Action: "read", }, + { + Entity: "signer", + Action: "read", + }, } // writePermissions is a slice of all entities that allow write @@ -151,6 +163,10 @@ var ( Entity: "signer", Action: "generate", }, + { + Entity: "macaroon", + Action: "generate", + }, } // invoicePermissions is a slice of all the entities that allows a user @@ -173,9 +189,33 @@ var ( Entity: "address", Action: "write", }, + { + Entity: "onchain", + Action: "read", + }, + } + + // TODO(guggero): Refactor into constants that are used for all + // permissions in this file. Also expose the list of possible + // permissions in an RPC when per RPC permissions are + // implemented. + validActions = []string{"read", "write", "generate"} + validEntities = []string{ + "onchain", "offchain", "address", "message", + "peers", "info", "invoices", "signer", "macaroon", } ) +// stringInSlice returns true if a string is contained in the given slice. +func stringInSlice(a string, slice []string) bool { + for _, b := range slice { + if b == a { + return true + } + } + return false +} + // mainRPCServerPermissions returns a mapping of the main RPC server calls to // the permissions they require. func mainRPCServerPermissions() map[string][]bakery.Op { @@ -317,6 +357,10 @@ func mainRPCServerPermissions() map[string][]bakery.Op { Entity: "info", Action: "read", }}, + "/lnrpc.Lightning/GetNodeMetrics": {{ + Entity: "info", + Action: "read", + }}, "/lnrpc.Lightning/GetChanInfo": {{ Entity: "info", Action: "read", @@ -396,6 +440,21 @@ func mainRPCServerPermissions() map[string][]bakery.Op { Entity: "offchain", Action: "write", }}, + "/lnrpc.Lightning/BakeMacaroon": {{ + Entity: "macaroon", + Action: "generate", + }}, + "/lnrpc.Lightning/SubscribePeerEvents": {{ + Entity: "peers", + Action: "read", + }}, + "/lnrpc.Lightning/FundingStateStep": {{ + Entity: "onchain", + Action: "write", + }, { + Entity: "offchain", + Action: "write", + }}, } } @@ -422,7 +481,7 @@ type rpcServer struct { // listeners is a list of listeners to use when starting the grpc // server. We make it configurable such that the grpc server can listen // on custom interfaces. - listeners []net.Listener + listeners []*ListenerWithSignal // listenerCleanUp are a set of closures functions that will allow this // main RPC server to clean up all the listening socket created for the @@ -449,6 +508,13 @@ type rpcServer struct { chanPredicate *chanacceptor.ChainedAcceptor quit chan struct{} + + // macService is the macaroon service that we need to mint new + // macaroons. + macService *macaroons.Service + + // selfNode is our own pubkey. + selfNode route.Vertex } // A compile time check to ensure that rpcServer fully implements the @@ -461,10 +527,11 @@ var _ lnrpc.LightningServer = (*rpcServer)(nil) // base level options passed to the grPC server. This typically includes things // like requiring TLS, etc. func newRPCServer(s *server, macService *macaroons.Service, - subServerCgs *subRPCServerConfigs, restDialOpts []grpc.DialOption, - restProxyDest string, atpl *autopilot.Manager, - invoiceRegistry *invoices.InvoiceRegistry, tower *watchtower.Standalone, - tlsCfg *tls.Config, getListeners rpcListeners, + subServerCgs *subRPCServerConfigs, serverOpts []grpc.ServerOption, + restDialOpts []grpc.DialOption, restProxyDest string, + atpl *autopilot.Manager, invoiceRegistry *invoices.InvoiceRegistry, + tower *watchtower.Standalone, tlsCfg *tls.Config, + getListeners rpcListeners, chanPredicate *chanacceptor.ChainedAcceptor) (*rpcServer, error) { // Set up router rpc backend. @@ -501,11 +568,17 @@ func newRPCServer(s *server, macService *macaroons.Service, return info.NodeKey1Bytes, info.NodeKey2Bytes, nil }, - FindRoute: s.chanRouter.FindRoute, - MissionControl: s.missionControl, - ActiveNetParams: activeNetParams.Params, - Tower: s.controlTower, - MaxTotalTimelock: cfg.MaxOutgoingCltvExpiry, + FindRoute: s.chanRouter.FindRoute, + MissionControl: s.missionControl, + ActiveNetParams: activeNetParams.Params, + Tower: s.controlTower, + MaxTotalTimelock: cfg.MaxOutgoingCltvExpiry, + DefaultFinalCltvDelta: uint16(cfg.Bitcoin.TimeLockDelta), + SubscribeHtlcEvents: s.htlcNotifier.SubscribeHtlcEvents, + } + + genInvoiceFeatures := func() *lnwire.FeatureVector { + return s.featureMgr.Get(feature.SetInvoice) } var ( @@ -520,7 +593,7 @@ func newRPCServer(s *server, macService *macaroons.Service, s.cc, networkDir, macService, atpl, invoiceRegistry, s.htlcSwitch, activeNetParams.Params, s.chanRouter, routerBackend, s.nodeSigner, s.chanDB, s.sweeper, tower, - s.towerClient, cfg.net.ResolveTCPAddr, + s.towerClient, cfg.net.ResolveTCPAddr, genInvoiceFeatures, ) if err != nil { return nil, err @@ -592,7 +665,7 @@ func newRPCServer(s *server, macService *macaroons.Service, ) // Get the listeners and server options to use for this rpc server. - listeners, cleanup, serverOpts, err := getListeners() + listeners, cleanup, err := getListeners() if err != nil { return nil, err } @@ -623,6 +696,8 @@ func newRPCServer(s *server, macService *macaroons.Service, routerBackend: routerBackend, chanPredicate: chanPredicate, quit: make(chan struct{}, 1), + macService: macService, + selfNode: selfNode.PubKeyBytes, } lnrpc.RegisterLightningServer(grpcServer, rootRPCServer) @@ -663,8 +738,11 @@ func (r *rpcServer) Start() error { // With all the sub-servers started, we'll spin up the listeners for // the main RPC server itself. for _, lis := range r.listeners { - go func(lis net.Listener) { + go func(lis *ListenerWithSignal) { rpcsLog.Infof("RPC server listening on %s", lis.Addr()) + + // Close the ready chan to indicate we are listening. + close(lis.Ready) r.grpcServer.Serve(lis) }(lis) } @@ -679,6 +757,17 @@ func (r *rpcServer) Start() error { } } + // The default JSON marshaler of the REST proxy only sets OrigName to + // true, which instructs it to use the same field names as specified in + // the proto file and not switch to camel case. What we also want is + // that the marshaler prints all values, even if they are falsey. + customMarshalerOption := proxy.WithMarshalerOption( + proxy.MIMEWildcard, &proxy.JSONPb{ + OrigName: true, + EmitDefaults: true, + }, + ) + // Finally, start the REST proxy for our gRPC server above. We'll ensure // we direct LND to connect to its loopback address rather than a // wildcard to prevent certificate issues when accessing the proxy @@ -686,7 +775,7 @@ func (r *rpcServer) Start() error { // // TODO(roasbeef): eventually also allow the sub-servers to themselves // have a REST proxy. - mux := proxy.NewServeMux() + mux := proxy.NewServeMux(customMarshalerOption) err := lnrpc.RegisterLightningHandlerFromEndpoint( context.Background(), mux, r.restProxyDest, @@ -778,7 +867,7 @@ func addrPairsToOutputs(addrPairs map[string]int64) ([]*wire.TxOut, error) { // more addresses specified in the passed payment map. The payment map maps an // address to a specified output value to be sent to that address. func (r *rpcServer) sendCoinsOnChain(paymentMap map[string]int64, - feeRate lnwallet.SatPerKWeight) (*chainhash.Hash, error) { + feeRate chainfee.SatPerKWeight) (*chainhash.Hash, error) { outputs, err := addrPairsToOutputs(paymentMap) if err != nil { @@ -862,7 +951,7 @@ func (r *rpcServer) ListUnspent(ctx context.Context, } utxoResp := lnrpc.Utxo{ - Type: addrType, + AddressType: addrType, AmountSat: int64(utxo.Value), PkScript: hex.EncodeToString(utxo.PkScript), Outpoint: outpoint, @@ -963,7 +1052,7 @@ func (r *rpcServer) SendCoins(ctx context.Context, // Based on the passed fee related parameters, we'll determine an // appropriate fee rate for this transaction. - satPerKw := lnwallet.SatPerKVByte(in.SatPerByte * 1000).FeePerKWeight() + satPerKw := chainfee.SatPerKVByte(in.SatPerByte * 1000).FeePerKWeight() feePerKw, err := sweep.DetermineFeePerKw( r.server.cc.feeEstimator, sweep.FeePreference{ ConfTarget: uint32(in.TargetConf), @@ -1085,7 +1174,7 @@ func (r *rpcServer) SendMany(ctx context.Context, // Based on the passed fee related parameters, we'll determine an // appropriate fee rate for this transaction. - satPerKw := lnwallet.SatPerKVByte(in.SatPerByte * 1000).FeePerKWeight() + satPerKw := chainfee.SatPerKVByte(in.SatPerByte * 1000).FeePerKWeight() feePerKw, err := sweep.DetermineFeePerKw( r.server.cc.feeEstimator, sweep.FeePreference{ ConfTarget: uint32(in.TargetConf), @@ -1257,8 +1346,7 @@ func (r *rpcServer) ConnectPeer(ctx context.Context, // The server hasn't yet started, so it won't be able to service any of // our requests, so we'll bail early here. if !r.server.Started() { - return nil, fmt.Errorf("chain backend is still syncing, server " + - "not active yet") + return nil, ErrServerNotActive } if in.Addr == nil { @@ -1311,8 +1399,7 @@ func (r *rpcServer) DisconnectPeer(ctx context.Context, rpcsLog.Debugf("[disconnectpeer] from peer(%s)", in.PubKey) if !r.server.Started() { - return nil, fmt.Errorf("chain backend is still syncing, server " + - "not active yet") + return nil, ErrServerNotActive } // First we'll validate the string passed in within the request to @@ -1388,23 +1475,169 @@ func extractOpenChannelMinConfs(in *lnrpc.OpenChannelRequest) (int32, error) { } } -// OpenChannel attempts to open a singly funded channel specified in the -// request to a remote peer. -func (r *rpcServer) OpenChannel(in *lnrpc.OpenChannelRequest, - updateStream lnrpc.Lightning_OpenChannelServer) error { +// newFundingShimAssembler returns a new fully populated +// chanfunding.CannedAssembler using a FundingShim obtained from an RPC caller. +func newFundingShimAssembler(chanPointShim *lnrpc.ChanPointShim, initiator bool, + keyRing keychain.KeyRing) (chanfunding.Assembler, error) { - rpcsLog.Tracef("[openchannel] request to NodeKey(%v) "+ - "allocation(us=%v, them=%v)", in.NodePubkeyString, - in.LocalFundingAmount, in.PushSat) + // Perform some basic sanity checks to ensure that all the expected + // fields are populated. + switch { + case chanPointShim.RemoteKey == nil: + return nil, fmt.Errorf("remote key not set") + + case chanPointShim.LocalKey == nil: + return nil, fmt.Errorf("local key desc not set") + + case chanPointShim.LocalKey.RawKeyBytes == nil: + return nil, fmt.Errorf("local raw key bytes not set") + + case chanPointShim.LocalKey.KeyLoc == nil: + return nil, fmt.Errorf("local key loc not set") + + case chanPointShim.ChanPoint == nil: + return nil, fmt.Errorf("chan point not set") + + case len(chanPointShim.PendingChanId) != 32: + return nil, fmt.Errorf("pending chan ID not set") + } + // First, we'll map the RPC's channel point to one we can actually use. + index := chanPointShim.ChanPoint.OutputIndex + txid, err := GetChanPointFundingTxid(chanPointShim.ChanPoint) + if err != nil { + return nil, err + } + chanPoint := wire.NewOutPoint(txid, index) + + // Next we'll parse out the remote party's funding key, as well as our + // full key descriptor. + remoteKey, err := btcec.ParsePubKey( + chanPointShim.RemoteKey, btcec.S256(), + ) + if err != nil { + return nil, err + } + + shimKeyDesc := chanPointShim.LocalKey + localKey, err := btcec.ParsePubKey( + shimKeyDesc.RawKeyBytes, btcec.S256(), + ) + if err != nil { + return nil, err + } + localKeyDesc := keychain.KeyDescriptor{ + PubKey: localKey, + KeyLocator: keychain.KeyLocator{ + Family: keychain.KeyFamily( + shimKeyDesc.KeyLoc.KeyFamily, + ), + Index: uint32(shimKeyDesc.KeyLoc.KeyIndex), + }, + } + + // Verify that if we re-derive this key according to the passed + // KeyLocator, that we get the exact same key back. Otherwise, we may + // end up in a situation where we aren't able to actually sign for this + // newly created channel. + derivedKey, err := keyRing.DeriveKey(localKeyDesc.KeyLocator) + if err != nil { + return nil, err + } + if !derivedKey.PubKey.IsEqual(localKey) { + return nil, fmt.Errorf("KeyLocator does not match attached " + + "raw pubkey") + } + + // With all the parts assembled, we can now make the canned assembler + // to pass into the wallet. + return chanfunding.NewCannedAssembler( + chanPointShim.ThawHeight, *chanPoint, + btcutil.Amount(chanPointShim.Amt), &localKeyDesc, + remoteKey, initiator, + ), nil +} + +// newFundingShimAssembler returns a new fully populated +// chanfunding.PsbtAssembler using a FundingShim obtained from an RPC caller. +func newPsbtAssembler(req *lnrpc.OpenChannelRequest, normalizedMinConfs int32, + psbtShim *lnrpc.PsbtShim, netParams *chaincfg.Params) ( + chanfunding.Assembler, error) { + + var ( + packet *psbt.Packet + err error + ) + + // Perform some basic sanity checks to ensure that all the expected + // fields are populated and none of the incompatible fields are. + if len(psbtShim.PendingChanId) != 32 { + return nil, fmt.Errorf("pending chan ID not set") + } + if normalizedMinConfs != 1 { + return nil, fmt.Errorf("setting non-default values for " + + "minimum confirmation is not supported for PSBT " + + "funding") + } + if req.SatPerByte != 0 || req.TargetConf != 0 { + return nil, fmt.Errorf("specifying fee estimation parameters " + + "is not supported for PSBT funding") + } + + // The base PSBT is optional. But if it's set, it has to be a valid, + // binary serialized PSBT. + if len(psbtShim.BasePsbt) > 0 { + packet, err = psbt.NewFromRawBytes( + bytes.NewReader(psbtShim.BasePsbt), false, + ) + if err != nil { + return nil, fmt.Errorf("error parsing base PSBT: %v", + err) + } + } + + // With all the parts assembled, we can now make the canned assembler + // to pass into the wallet. + return chanfunding.NewPsbtAssembler( + btcutil.Amount(req.LocalFundingAmount), packet, netParams, + ), nil +} + +// canOpenChannel returns an error if the necessary subsystems for channel +// funding are not ready. +func (r *rpcServer) canOpenChannel() error { + // We can't open a channel until the main server has started. if !r.server.Started() { - return fmt.Errorf("chain backend is still syncing, server " + - "not active yet") + return ErrServerNotActive + } + + // Creation of channels before the wallet syncs up is currently + // disallowed. + isSynced, _, err := r.server.cc.wallet.IsSynced() + if err != nil { + return err + } + if !isSynced { + return errors.New("channels cannot be created before the " + + "wallet is fully synced") } + return nil +} + +// praseOpenChannelReq parses an OpenChannelRequest message into the server's +// native openChanReq struct. The logic is abstracted so that it can be shared +// between OpenChannel and OpenChannelSync. +func (r *rpcServer) parseOpenChannelReq(in *lnrpc.OpenChannelRequest, + isSync bool) (*openChanReq, error) { + + rpcsLog.Debugf("[openchannel] request to NodeKey(%x) "+ + "allocation(us=%v, them=%v)", in.NodePubkey, + in.LocalFundingAmount, in.PushSat) + localFundingAmt := btcutil.Amount(in.LocalFundingAmount) remoteInitialBalance := btcutil.Amount(in.PushSat) - minHtlc := lnwire.MilliSatoshi(in.MinHtlcMsat) + minHtlcIn := lnwire.MilliSatoshi(in.MinHtlcMsat) remoteCsvDelay := uint16(in.RemoteCsvDelay) // Ensure that the initial balance of the remote party (if pushing @@ -1413,15 +1646,15 @@ func (r *rpcServer) OpenChannel(in *lnrpc.OpenChannelRequest, // // TODO(roasbeef): incorporate base fee? if remoteInitialBalance >= localFundingAmt { - return fmt.Errorf("amount pushed to remote peer for initial " + - "state must be below the local funding amount") + return nil, fmt.Errorf("amount pushed to remote peer for " + + "initial state must be below the local funding amount") } // Ensure that the user doesn't exceed the current soft-limit for // channel size. If the funding amount is above the soft-limit, then // we'll reject the request. if localFundingAmt > MaxFundingAmount { - return fmt.Errorf("funding amount is too large, the max "+ + return nil, fmt.Errorf("funding amount is too large, the max "+ "channel size is: %v", MaxFundingAmount) } @@ -1429,8 +1662,8 @@ func (r *rpcServer) OpenChannel(in *lnrpc.OpenChannelRequest, // level, we'll ensure that the output we create after accounting for // fees that a dust output isn't created. if localFundingAmt < minChanFundingSize { - return fmt.Errorf("channel is too small, the minimum channel "+ - "size is: %v SAT", int64(minChanFundingSize)) + return nil, fmt.Errorf("channel is too small, the minimum "+ + "channel size is: %v SAT", int64(minChanFundingSize)) } // Then, we'll extract the minimum number of confirmations that each @@ -1438,39 +1671,53 @@ func (r *rpcServer) OpenChannel(in *lnrpc.OpenChannelRequest, // satisfy. minConfs, err := extractOpenChannelMinConfs(in) if err != nil { - return err + return nil, err } - var ( - nodePubKey *btcec.PublicKey - nodePubKeyBytes []byte - ) - // TODO(roasbeef): also return channel ID? - // Ensure that the NodePubKey is set before attempting to use it - if len(in.NodePubkey) == 0 { - return fmt.Errorf("NodePubKey is not set") - } + var nodePubKey *btcec.PublicKey - // Parse the raw bytes of the node key into a pubkey object so we - // can easily manipulate it. - nodePubKey, err = btcec.ParsePubKey(in.NodePubkey, btcec.S256()) - if err != nil { - return err + // Parse the remote pubkey the NodePubkey field of the request. If it's + // not present, we'll fallback to the deprecated version that parses the + // key from a hex string if this is for REST for backwards compatibility. + switch { + + // Parse the raw bytes of the node key into a pubkey object so we can + // easily manipulate it. + case len(in.NodePubkey) > 0: + nodePubKey, err = btcec.ParsePubKey(in.NodePubkey, btcec.S256()) + if err != nil { + return nil, err + } + + // Decode the provided target node's public key, parsing it into a pub + // key object. For all sync call, byte slices are expected to be encoded + // as hex strings. + case isSync: + keyBytes, err := hex.DecodeString(in.NodePubkeyString) + if err != nil { + return nil, err + } + + nodePubKey, err = btcec.ParsePubKey(keyBytes, btcec.S256()) + if err != nil { + return nil, err + } + + default: + return nil, fmt.Errorf("NodePubkey is not set") } // Making a channel to ourselves wouldn't be of any use, so we // explicitly disallow them. if nodePubKey.IsEqual(r.server.identityPriv.PubKey()) { - return fmt.Errorf("cannot open channel to self") + return nil, fmt.Errorf("cannot open channel to self") } - nodePubKeyBytes = nodePubKey.SerializeCompressed() - // Based on the passed fee related parameters, we'll determine an // appropriate fee rate for the funding transaction. - satPerKw := lnwallet.SatPerKVByte(in.SatPerByte * 1000).FeePerKWeight() + satPerKw := chainfee.SatPerKVByte(in.SatPerByte * 1000).FeePerKWeight() feeRate, err := sweep.DetermineFeePerKw( r.server.cc.feeEstimator, sweep.FeePreference{ ConfTarget: uint32(in.TargetConf), @@ -1478,25 +1725,89 @@ func (r *rpcServer) OpenChannel(in *lnrpc.OpenChannelRequest, }, ) if err != nil { - return err + return nil, err } rpcsLog.Debugf("[openchannel]: using fee of %v sat/kw for funding tx", int64(feeRate)) + script, err := parseUpfrontShutdownAddress(in.CloseAddress) + if err != nil { + return nil, fmt.Errorf("error parsing upfront shutdown: %v", + err) + } + // Instruct the server to trigger the necessary events to attempt to // open a new channel. A stream is returned in place, this stream will // be used to consume updates of the state of the pending channel. - req := &openChanReq{ + return &openChanReq{ targetPubkey: nodePubKey, chainHash: *activeNetParams.GenesisHash, localFundingAmt: localFundingAmt, pushAmt: lnwire.NewMSatFromSatoshis(remoteInitialBalance), - minHtlc: minHtlc, + minHtlcIn: minHtlcIn, fundingFeePerKw: feeRate, private: in.Private, remoteCsvDelay: remoteCsvDelay, minConfs: minConfs, + shutdownScript: script, + }, nil +} + +// OpenChannel attempts to open a singly funded channel specified in the +// request to a remote peer. +func (r *rpcServer) OpenChannel(in *lnrpc.OpenChannelRequest, + updateStream lnrpc.Lightning_OpenChannelServer) error { + + if err := r.canOpenChannel(); err != nil { + return err + } + + req, err := r.parseOpenChannelReq(in, false) + if err != nil { + return err + } + + // If the user has provided a shim, then we'll now augment the based + // open channel request with this additional logic. + if in.FundingShim != nil { + switch { + // If we have a chan point shim, then this means the funding + // transaction was crafted externally. In this case we only + // need to hand a channel point down into the wallet. + case in.FundingShim.GetChanPointShim() != nil: + chanPointShim := in.FundingShim.GetChanPointShim() + + // Map the channel point shim into a new + // chanfunding.CannedAssembler that the wallet will use + // to obtain the channel point details. + copy(req.pendingChanID[:], chanPointShim.PendingChanId) + req.chanFunder, err = newFundingShimAssembler( + chanPointShim, true, r.server.cc.keyRing, + ) + if err != nil { + return err + } + + // If we have a PSBT shim, then this means the funding + // transaction will be crafted outside of the wallet, once the + // funding multisig output script is known. We'll create an + // intent that will supervise the multi-step process. + case in.FundingShim.GetPsbtShim() != nil: + psbtShim := in.FundingShim.GetPsbtShim() + + // Instruct the wallet to use the new + // chanfunding.PsbtAssembler to construct the funding + // transaction. + copy(req.pendingChanID[:], psbtShim.PendingChanId) + req.chanFunder, err = newPsbtAssembler( + in, req.minConfs, psbtShim, + &r.server.cc.wallet.Cfg.NetParams, + ) + if err != nil { + return err + } + } } updateChan, errChan := r.server.OpenChannel(req) @@ -1507,7 +1818,7 @@ out: select { case err := <-errChan: rpcsLog.Errorf("unable to open channel to NodeKey(%x): %v", - nodePubKeyBytes, err) + req.targetPubkey.SerializeCompressed(), err) return err case fundingUpdate := <-updateChan: rpcsLog.Tracef("[openchannel] sending update: %v", @@ -1539,7 +1850,7 @@ out: } rpcsLog.Tracef("[openchannel] success NodeKey(%x), ChannelPoint(%v)", - nodePubKeyBytes, outpoint) + req.targetPubkey.SerializeCompressed(), outpoint) return nil } @@ -1550,104 +1861,21 @@ out: func (r *rpcServer) OpenChannelSync(ctx context.Context, in *lnrpc.OpenChannelRequest) (*lnrpc.ChannelPoint, error) { - rpcsLog.Tracef("[openchannel] request to NodeKey(%v) "+ - "allocation(us=%v, them=%v)", in.NodePubkeyString, - in.LocalFundingAmount, in.PushSat) - - // We don't allow new channels to be open while the server is still - // syncing, as otherwise we may not be able to obtain the relevant - // notifications. - if !r.server.Started() { - return nil, fmt.Errorf("chain backend is still syncing, server " + - "not active yet") - } - - // Creation of channels before the wallet syncs up is currently - // disallowed. - isSynced, _, err := r.server.cc.wallet.IsSynced() - if err != nil { + if err := r.canOpenChannel(); err != nil { return nil, err } - if !isSynced { - return nil, errors.New("channels cannot be created before the " + - "wallet is fully synced") - } - // Decode the provided target node's public key, parsing it into a pub - // key object. For all sync call, byte slices are expected to be - // encoded as hex strings. - keyBytes, err := hex.DecodeString(in.NodePubkeyString) + req, err := r.parseOpenChannelReq(in, true) if err != nil { return nil, err } - nodepubKey, err := btcec.ParsePubKey(keyBytes, btcec.S256()) - if err != nil { - return nil, err - } - - localFundingAmt := btcutil.Amount(in.LocalFundingAmount) - remoteInitialBalance := btcutil.Amount(in.PushSat) - minHtlc := lnwire.MilliSatoshi(in.MinHtlcMsat) - remoteCsvDelay := uint16(in.RemoteCsvDelay) - - // Ensure that the initial balance of the remote party (if pushing - // satoshis) does not exceed the amount the local party has requested - // for funding. - if remoteInitialBalance >= localFundingAmt { - return nil, fmt.Errorf("amount pushed to remote peer for " + - "initial state must be below the local funding amount") - } - - // Restrict the size of the channel we'll actually open. At a later - // level, we'll ensure that the output we create after accounting for - // fees that a dust output isn't created. - if localFundingAmt < minChanFundingSize { - return nil, fmt.Errorf("channel is too small, the minimum channel "+ - "size is: %v SAT", int64(minChanFundingSize)) - } - - // Then, we'll extract the minimum number of confirmations that each - // output we use to fund the channel's funding transaction should - // satisfy. - minConfs, err := extractOpenChannelMinConfs(in) - if err != nil { - return nil, err - } - - // Based on the passed fee related parameters, we'll determine an - // appropriate fee rate for the funding transaction. - satPerKw := lnwallet.SatPerKVByte(in.SatPerByte * 1000).FeePerKWeight() - feeRate, err := sweep.DetermineFeePerKw( - r.server.cc.feeEstimator, sweep.FeePreference{ - ConfTarget: uint32(in.TargetConf), - FeeRate: satPerKw, - }, - ) - if err != nil { - return nil, err - } - - rpcsLog.Tracef("[openchannel] target sat/kw for funding tx: %v", - int64(feeRate)) - - req := &openChanReq{ - targetPubkey: nodepubKey, - chainHash: *activeNetParams.GenesisHash, - localFundingAmt: localFundingAmt, - pushAmt: lnwire.NewMSatFromSatoshis(remoteInitialBalance), - minHtlc: minHtlc, - fundingFeePerKw: feeRate, - private: in.Private, - remoteCsvDelay: remoteCsvDelay, - minConfs: minConfs, - } updateChan, errChan := r.server.OpenChannel(req) select { // If an error occurs them immediately return the error to the client. case err := <-errChan: rpcsLog.Errorf("unable to open channel to NodeKey(%x): %v", - nodepubKey, err) + req.targetPubkey.SerializeCompressed(), err) return nil, err // Otherwise, wait for the first channel update. The first update sent @@ -1673,6 +1901,24 @@ func (r *rpcServer) OpenChannelSync(ctx context.Context, } } +// parseUpfrontShutdownScript attempts to parse an upfront shutdown address. +// If the address is empty, it returns nil. If it successfully decoded the +// address, it returns a script that pays out to the address. +func parseUpfrontShutdownAddress(address string) (lnwire.DeliveryAddress, error) { + if len(address) == 0 { + return nil, nil + } + + addr, err := btcutil.DecodeAddress( + address, activeNetParams.Params, + ) + if err != nil { + return nil, fmt.Errorf("invalid address: %v", err) + } + + return txscript.PayToAddrScript(addr) +} + // GetChanPointFundingTxid returns the given channel point's funding txid in // raw bytes. func GetChanPointFundingTxid(chanPoint *lnrpc.ChannelPoint) (*chainhash.Hash, error) { @@ -1702,6 +1948,10 @@ func GetChanPointFundingTxid(chanPoint *lnrpc.ChannelPoint) (*chainhash.Hash, er func (r *rpcServer) CloseChannel(in *lnrpc.CloseChannelRequest, updateStream lnrpc.Lightning_CloseChannelServer) error { + if !r.server.Started() { + return ErrServerNotActive + } + // If the user didn't specify a channel point, then we'll reject this // request all together. if in.GetChannelPoint() == nil { @@ -1735,7 +1985,14 @@ func (r *rpcServer) CloseChannel(in *lnrpc.CloseChannelRequest, // First, we'll fetch the channel as is, as we'll need to examine it // regardless of if this is a force close or not. - channel, err := r.fetchActiveChannel(*chanPoint) + channel, err := r.server.chanDB.FetchChannel(*chanPoint) + if err != nil { + return err + } + + // Retrieve the best height of the chain, which we'll use to complete + // either closing flow. + _, bestHeight, err := r.server.cc.chainIO.GetBestBlock() if err != nil { return err } @@ -1745,23 +2002,19 @@ func (r *rpcServer) CloseChannel(in *lnrpc.CloseChannelRequest, // transaction here rather than going to the switch as we don't require // interaction from the peer. if force { - _, bestHeight, err := r.server.cc.chainIO.GetBestBlock() - if err != nil { - return err - } // As we're force closing this channel, as a precaution, we'll // ensure that the switch doesn't continue to see this channel // as eligible for forwarding HTLC's. If the peer is online, // then we'll also purge all of its indexes. - remotePub := &channel.StateSnapshot().RemoteIdentity + remotePub := channel.IdentityPub if peer, err := r.server.FindPeer(remotePub); err == nil { // TODO(roasbeef): actually get the active channel // instead too? // * so only need to grab from database - peer.WipeChannel(channel.ChannelPoint()) + peer.WipeChannel(&channel.FundingOutpoint) } else { - chanID := lnwire.NewChanIDFromOutPoint(channel.ChannelPoint()) + chanID := lnwire.NewChanIDFromOutPoint(&channel.FundingOutpoint) r.server.htlcSwitch.RemoveLink(chanID) } @@ -1797,6 +2050,17 @@ func (r *rpcServer) CloseChannel(in *lnrpc.CloseChannelRequest, } }) } else { + // If this is a frozen channel, then we only allow the co-op + // close to proceed if we were the responder to this channel. + if channel.ChanType.IsFrozen() && channel.IsInitiator && + uint32(bestHeight) < channel.ThawHeight { + + return fmt.Errorf("cannot co-op close frozen channel "+ + "as initiator until height=%v, "+ + "(current_height=%v)", channel.ThawHeight, + bestHeight) + } + // If the link is not known by the switch, we cannot gracefully close // the channel. channelID := lnwire.NewChanIDFromOutPoint(chanPoint) @@ -1810,7 +2074,7 @@ func (r *rpcServer) CloseChannel(in *lnrpc.CloseChannelRequest, // Based on the passed fee related parameters, we'll determine // an appropriate fee rate for the cooperative closure // transaction. - satPerKw := lnwallet.SatPerKVByte( + satPerKw := chainfee.SatPerKVByte( in.SatPerByte * 1000, ).FeePerKWeight() feeRate, err := sweep.DetermineFeePerKw( @@ -1838,8 +2102,28 @@ func (r *rpcServer) CloseChannel(in *lnrpc.CloseChannelRequest, // cooperative channel closure. So we'll forward the request to // the htlc switch which will handle the negotiation and // broadcast details. + + var deliveryScript lnwire.DeliveryAddress + + // If a delivery address to close out to was specified, decode it. + if len(in.DeliveryAddress) > 0 { + // Decode the address provided. + addr, err := btcutil.DecodeAddress( + in.DeliveryAddress, activeNetParams.Params, + ) + if err != nil { + return fmt.Errorf("invalid delivery address: %v", err) + } + + // Create a script to pay out to the address provided. + deliveryScript, err = txscript.PayToAddrScript(addr) + if err != nil { + return err + } + } + updateChan, errChan = r.server.htlcSwitch.CloseLink( - chanPoint, htlcswitch.CloseRegular, feeRate, + chanPoint, htlcswitch.CloseRegular, feeRate, deliveryScript, ) } out: @@ -1908,6 +2192,28 @@ func createRPCCloseUpdate(update interface{}) ( return nil, errors.New("unknown close status update") } +// abandonChanFromGraph attempts to remove a channel from the channel graph. If +// we can't find the chanID in the graph, then we assume it has already been +// removed, and will return a nop. +func abandonChanFromGraph(chanGraph *channeldb.ChannelGraph, + chanPoint *wire.OutPoint) error { + + // First, we'll obtain the channel ID. If we can't locate this, then + // it's the case that the channel may have already been removed from + // the graph, so we'll return a nil error. + chanID, err := chanGraph.ChannelID(chanPoint) + switch { + case err == channeldb.ErrEdgeNotFound: + return nil + case err != nil: + return err + } + + // If the channel ID is still in the graph, then that means the channel + // is still open, so we'll now move to purge it from the graph. + return chanGraph.DeleteChannelEdges(chanID) +} + // AbandonChannel removes all channel state from the database except for a // close summary. This method can be used to get rid of permanently unusable // channels due to bugs fixed in newer versions of lnd. @@ -1931,63 +2237,73 @@ func (r *rpcServer) AbandonChannel(ctx context.Context, index := in.ChannelPoint.OutputIndex chanPoint := wire.NewOutPoint(txid, index) - // With the chanPoint constructed, we'll attempt to find the target - // channel in the database. If we can't find the channel, then we'll - // return the error back to the caller. - dbChan, err := r.server.chanDB.FetchChannel(*chanPoint) + // When we remove the channel from the database, we need to set a close + // height, so we'll just use the current best known height. + _, bestHeight, err := r.server.cc.chainIO.GetBestBlock() if err != nil { return nil, err } - // Now that we've found the channel, we'll populate a close summary for - // the channel, so we can store as much information for this abounded - // channel as possible. We also ensure that we set Pending to false, to - // indicate that this channel has been "fully" closed. - _, bestHeight, err := r.server.cc.chainIO.GetBestBlock() + dbChan, err := r.server.chanDB.FetchChannel(*chanPoint) + switch { + // If the channel isn't found in the set of open channels, then we can + // continue on as it can't be loaded into the link/peer. + case err == channeldb.ErrChannelNotFound: + break + + // If the channel is still known to be open, then before we modify any + // on-disk state, we'll remove the channel from the switch and peer + // state if it's been loaded in. + case err == nil: + // We'll mark the channel as borked before we remove the state + // from the switch/peer so it won't be loaded back in if the + // peer reconnects. + if err := dbChan.MarkBorked(); err != nil { + return nil, err + } + remotePub := dbChan.IdentityPub + if peer, err := r.server.FindPeer(remotePub); err == nil { + peer.WipeChannel(chanPoint) + } + + default: + return nil, err + } + + // Abandoning a channel is a three step process: remove from the open + // channel state, remove from the graph, remove from the contract + // court. Between any step it's possible that the users restarts the + // process all over again. As a result, each of the steps below are + // intended to be idempotent. + err = r.server.chanDB.AbandonChannel(chanPoint, uint32(bestHeight)) if err != nil { return nil, err } - summary := &channeldb.ChannelCloseSummary{ - CloseType: channeldb.Abandoned, - ChanPoint: *chanPoint, - ChainHash: dbChan.ChainHash, - CloseHeight: uint32(bestHeight), - RemotePub: dbChan.IdentityPub, - Capacity: dbChan.Capacity, - SettledBalance: dbChan.LocalCommitment.LocalBalance.ToSatoshis(), - ShortChanID: dbChan.ShortChanID(), - RemoteCurrentRevocation: dbChan.RemoteCurrentRevocation, - RemoteNextRevocation: dbChan.RemoteNextRevocation, - LocalChanConfig: dbChan.LocalChanCfg, - } - - // Finally, we'll close the channel in the DB, and return back to the - // caller. - err = dbChan.CloseChannel(summary) + err = abandonChanFromGraph( + r.server.chanDB.ChannelGraph(), chanPoint, + ) if err != nil { return nil, err } - - return &lnrpc.AbandonChannelResponse{}, nil -} - -// fetchActiveChannel attempts to locate a channel identified by its channel -// point from the database's set of all currently opened channels and -// return it as a fully populated state machine -func (r *rpcServer) fetchActiveChannel(chanPoint wire.OutPoint) ( - *lnwallet.LightningChannel, error) { - - dbChan, err := r.server.chanDB.FetchChannel(chanPoint) + err = r.server.chainArb.ResolveContract(*chanPoint) if err != nil { return nil, err } - // If the channel is successfully fetched from the database, - // we create a fully populated channel state machine which - // uses the db channel as backing storage. - return lnwallet.NewLightningChannel( - r.server.cc.wallet.Cfg.Signer, dbChan, nil, - ) + // If this channel was in the process of being closed, but didn't fully + // close, then it's possible that the nursery is hanging on to some + // state. To err on the side of caution, we'll now attempt to wipe any + // state for this channel from the nursery. + err = r.server.utxoNursery.cfg.Store.RemoveChannel(chanPoint) + if err != nil && err != ErrContractNotFound { + return nil, err + } + + // Finally, notify the backup listeners that the channel can be removed + // from any channel backups. + r.server.channelNotifier.NotifyClosedChannelEvent(*chanPoint) + + return &lnrpc.AbandonChannelResponse{}, nil } // GetInfo returns general information concerning the lightning node including @@ -2059,6 +2375,22 @@ func (r *rpcServer) GetInfo(ctx context.Context, isGraphSynced := r.server.authGossiper.SyncManager().IsGraphSynced() + features := make(map[uint32]*lnrpc.Feature) + sets := r.server.featureMgr.ListSets() + + for _, set := range sets { + // Get the a list of lnrpc features for each set we support. + featureVector := r.server.featureMgr.Get(set) + rpcFeatures := invoicesrpc.CreateRPCFeatures(featureVector) + + // Add the features to our map of features, allowing over writing of + // existing values because features in different sets with the same bit + // are duplicated across sets. + for bit, feature := range rpcFeatures { + features[bit] = feature + } + } + // TODO(roasbeef): add synced height n stuff return &lnrpc.GetInfoResponse{ IdentityPubkey: encodedIDPub, @@ -2075,8 +2407,10 @@ func (r *rpcServer) GetInfo(ctx context.Context, Alias: nodeAnn.Alias.String(), Color: routing.EncodeHexColor(nodeAnn.RGBColor), BestHeaderTimestamp: int64(bestHeaderTimestamp), - Version: build.Version(), + Version: build.Version() + " commit=" + build.Commit, + CommitHash: build.CommitHash, SyncedToGraph: isGraphSynced, + Features: features, }, nil } @@ -2136,6 +2470,10 @@ func (r *rpcServer) ListPeers(ctx context.Context, } } + features := invoicesrpc.CreateRPCFeatures( + serverPeer.RemoteFeatures(), + ) + peer := &lnrpc.Peer{ PubKey: hex.EncodeToString(nodePub[:]), Address: serverPeer.conn.RemoteAddr().String(), @@ -2146,6 +2484,34 @@ func (r *rpcServer) ListPeers(ctx context.Context, SatRecv: satRecv, PingTime: serverPeer.PingTime(), SyncType: lnrpcSyncType, + Features: features, + } + + var peerErrors []interface{} + + // If we only want the most recent error, get the most recent + // error from the buffer and add it to our list of errors if + // it is non-nil. If we want all the stored errors, simply + // add the full list to our set of errors. + if in.LatestError { + latestErr := serverPeer.errorBuffer.Latest() + if latestErr != nil { + peerErrors = []interface{}{latestErr} + } + } else { + peerErrors = serverPeer.errorBuffer.List() + } + + // Add the relevant peer errors to our response. + for _, error := range peerErrors { + tsError := error.(*timestampedError) + + rpcErr := &lnrpc.TimestampedError{ + Timestamp: uint64(tsError.timestamp.Unix()), + Error: tsError.error.Error(), + } + + peer.Errors = append(peer.Errors, rpcErr) } resp.Peers = append(resp.Peers, peer) @@ -2156,6 +2522,51 @@ func (r *rpcServer) ListPeers(ctx context.Context, return resp, nil } +// SubscribePeerEvents returns a uni-directional stream (server -> client) +// for notifying the client of peer online and offline events. +func (r *rpcServer) SubscribePeerEvents(req *lnrpc.PeerEventSubscription, + eventStream lnrpc.Lightning_SubscribePeerEventsServer) error { + + peerEventSub, err := r.server.peerNotifier.SubscribePeerEvents() + if err != nil { + return err + } + defer peerEventSub.Cancel() + + for { + select { + // A new update has been sent by the peer notifier, we'll + // marshal it into the form expected by the gRPC client, then + // send it off to the client. + case e := <-peerEventSub.Updates(): + var event *lnrpc.PeerEvent + + switch peerEvent := e.(type) { + case peernotifier.PeerOfflineEvent: + event = &lnrpc.PeerEvent{ + PubKey: hex.EncodeToString(peerEvent.PubKey[:]), + Type: lnrpc.PeerEvent_PEER_OFFLINE, + } + + case peernotifier.PeerOnlineEvent: + event = &lnrpc.PeerEvent{ + PubKey: hex.EncodeToString(peerEvent.PubKey[:]), + Type: lnrpc.PeerEvent_PEER_ONLINE, + } + + default: + return fmt.Errorf("unexpected peer event: %v", event) + } + + if err := eventStream.Send(event); err != nil { + return err + } + case <-r.quit: + return nil + } + } +} + // WalletBalance returns total unspent outputs(confirmed and unconfirmed), all // confirmed unspent outputs and all unconfirmed unspent outputs under control // by the wallet. This method can be modified by having the request specify @@ -2236,6 +2647,16 @@ func (r *rpcServer) PendingChannels(ctx context.Context, resp := &lnrpc.PendingChannelsResponse{} + // rpcInitiator returns the correct lnrpc initiator for channels where + // we have a record of the opening channel. + rpcInitiator := func(isInitiator bool) lnrpc.Initiator { + if isInitiator { + return lnrpc.Initiator_INITIATOR_LOCAL + } + + return lnrpc.Initiator_INITIATOR_REMOTE + } + // First, we'll populate the response with all the channels that are // soon to be opened. We can easily fetch this data from the database // and map the db struct to the proto response. @@ -2270,6 +2691,8 @@ func (r *rpcServer) PendingChannels(ctx context.Context, RemoteBalance: int64(localCommitment.RemoteBalance.ToSatoshis()), LocalChanReserveSat: int64(pendingChan.LocalChanCfg.ChanReserve), RemoteChanReserveSat: int64(pendingChan.RemoteChanCfg.ChanReserve), + Initiator: rpcInitiator(pendingChan.IsInitiator), + CommitmentType: rpcCommitmentType(pendingChan.ChanType), }, CommitWeight: commitWeight, CommitFee: int64(localCommitment.CommitFee), @@ -2295,33 +2718,59 @@ func (r *rpcServer) PendingChannels(ctx context.Context, // needed regardless of how this channel was closed. pub := pendingClose.RemotePub.SerializeCompressed() chanPoint := pendingClose.ChanPoint + + // Create the pending channel. If this channel was closed before + // we started storing historical channel data, we will not know + // who initiated the channel, so we set the initiator field to + // unknown. channel := &lnrpc.PendingChannelsResponse_PendingChannel{ - RemoteNodePub: hex.EncodeToString(pub), - ChannelPoint: chanPoint.String(), - Capacity: int64(pendingClose.Capacity), - LocalBalance: int64(pendingClose.SettledBalance), + RemoteNodePub: hex.EncodeToString(pub), + ChannelPoint: chanPoint.String(), + Capacity: int64(pendingClose.Capacity), + LocalBalance: int64(pendingClose.SettledBalance), + CommitmentType: lnrpc.CommitmentType_UNKNOWN_COMMITMENT_TYPE, + Initiator: lnrpc.Initiator_INITIATOR_UNKNOWN, + } + + // Lookup the channel in the historical channel bucket to obtain + // initiator information. If the historical channel bucket was + // not found, or the channel itself, this channel was closed + // in a version before we started persisting historical + // channels, so we silence the error. + historical, err := r.server.chanDB.FetchHistoricalChannel( + &pendingClose.ChanPoint, + ) + switch err { + // If the channel was closed in a version that did not record + // historical channels, ignore the error. + case channeldb.ErrNoHistoricalBucket: + case channeldb.ErrChannelNotFound: + + case nil: + channel.Initiator = rpcInitiator(historical.IsInitiator) + channel.CommitmentType = rpcCommitmentType( + historical.ChanType, + ) + + // If the error is non-nil, and not due to older versions of lnd + // not persisting historical channels, return it. + default: + return nil, err } closeTXID := pendingClose.ClosingTXID.String() switch pendingClose.CloseType { - // If the channel was closed cooperatively, then we'll only - // need to tack on the closing txid. - // TODO(halseth): remove. After recent changes, a coop closed - // channel should never be in the "pending close" state. - // Keeping for now to let someone that upgraded in the middle - // of a close let their closing tx confirm. + // A coop closed channel should never be in the "pending close" + // state. If a node upgraded from an older lnd version in the + // middle of a their channel confirming, it will be in this + // state. We log a warning that the channel will not be included + // in the now deprecated pending close channels field. case channeldb.CooperativeClose: - resp.PendingClosingChannels = append( - resp.PendingClosingChannels, - &lnrpc.PendingChannelsResponse_ClosedChannel{ - Channel: channel, - ClosingTxid: closeTXID, - }, - ) - - resp.TotalLimboBalance += channel.LocalBalance + rpcsLog.Warn("channel %v cooperatively closed and "+ + "in pending close state", + pendingClose.ChanPoint) // If the channel was force closed, then we'll need to query // the utxoNursery for additional information. @@ -2373,6 +2822,53 @@ func (r *rpcServer) PendingChannels(ctx context.Context, for _, waitingClose := range waitingCloseChans { pub := waitingClose.IdentityPub.SerializeCompressed() chanPoint := waitingClose.FundingOutpoint + + var commitments lnrpc.PendingChannelsResponse_Commitments + + // Report local commit. May not be present when DLP is active. + if waitingClose.LocalCommitment.CommitTx != nil { + commitments.LocalTxid = + waitingClose.LocalCommitment.CommitTx.TxHash(). + String() + + commitments.LocalCommitFeeSat = uint64( + waitingClose.LocalCommitment.CommitFee, + ) + } + + // Report remote commit. May not be present when DLP is active. + if waitingClose.RemoteCommitment.CommitTx != nil { + commitments.RemoteTxid = + waitingClose.RemoteCommitment.CommitTx.TxHash(). + String() + + commitments.RemoteCommitFeeSat = uint64( + waitingClose.RemoteCommitment.CommitFee, + ) + } + + // Report the remote pending commit if any. + remoteCommitDiff, err := waitingClose.RemoteCommitChainTip() + + switch { + + // Don't set hash if there is no pending remote commit. + case err == channeldb.ErrNoPendingCommit: + + // An unexpected error occurred. + case err != nil: + return nil, err + + // There is a pending remote commit. Set its hash in the + // response. + default: + hash := remoteCommitDiff.Commitment.CommitTx.TxHash() + commitments.RemotePendingTxid = hash.String() + commitments.RemoteCommitFeeSat = uint64( + remoteCommitDiff.Commitment.CommitFee, + ) + } + channel := &lnrpc.PendingChannelsResponse_PendingChannel{ RemoteNodePub: hex.EncodeToString(pub), ChannelPoint: chanPoint.String(), @@ -2381,16 +2877,20 @@ func (r *rpcServer) PendingChannels(ctx context.Context, RemoteBalance: int64(waitingClose.LocalCommitment.RemoteBalance.ToSatoshis()), LocalChanReserveSat: int64(waitingClose.LocalChanCfg.ChanReserve), RemoteChanReserveSat: int64(waitingClose.RemoteChanCfg.ChanReserve), + Initiator: rpcInitiator(waitingClose.IsInitiator), + CommitmentType: rpcCommitmentType(waitingClose.ChanType), + } + + waitingCloseResp := &lnrpc.PendingChannelsResponse_WaitingCloseChannel{ + Channel: channel, + LimboBalance: channel.LocalBalance, + Commitments: &commitments, } // A close tx has been broadcasted, all our balance will be in // limbo until it confirms. resp.WaitingCloseChannels = append( - resp.WaitingCloseChannels, - &lnrpc.PendingChannelsResponse_WaitingCloseChannel{ - Channel: channel, - LimboBalance: channel.LocalBalance, - }, + resp.WaitingCloseChannels, waitingCloseResp, ) resp.TotalLimboBalance += channel.LocalBalance @@ -2413,23 +2913,72 @@ func (r *rpcServer) arbitratorPopulateForceCloseResp(chanPoint *wire.OutPoint, reports := arbitrator.Report() for _, report := range reports { - htlc := &lnrpc.PendingHTLC{ - Incoming: report.Incoming, - Amount: int64(report.Amount), - Outpoint: report.Outpoint.String(), - MaturityHeight: report.MaturityHeight, - Stage: report.Stage, - } + switch report.Type { + + // For a direct output, populate/update the top level + // response properties. + case contractcourt.ReportOutputUnencumbered: + // Populate the maturity height fields for the direct + // commitment output to us. + forceClose.MaturityHeight = report.MaturityHeight + + // If the transaction has been confirmed, then we can + // compute how many blocks it has left. + if forceClose.MaturityHeight != 0 { + forceClose.BlocksTilMaturity = + int32(forceClose.MaturityHeight) - + currentHeight + } - if htlc.MaturityHeight != 0 { - htlc.BlocksTilMaturity = - int32(htlc.MaturityHeight) - currentHeight + // Add htlcs to the PendingHtlcs response property. + case contractcourt.ReportOutputIncomingHtlc, + contractcourt.ReportOutputOutgoingHtlc: + + // Don't report details on htlcs that are no longer in + // limbo. + if report.LimboBalance == 0 { + break + } + + incoming := report.Type == contractcourt.ReportOutputIncomingHtlc + htlc := &lnrpc.PendingHTLC{ + Incoming: incoming, + Amount: int64(report.Amount), + Outpoint: report.Outpoint.String(), + MaturityHeight: report.MaturityHeight, + Stage: report.Stage, + } + + if htlc.MaturityHeight != 0 { + htlc.BlocksTilMaturity = + int32(htlc.MaturityHeight) - currentHeight + } + + forceClose.PendingHtlcs = append(forceClose.PendingHtlcs, htlc) + + case contractcourt.ReportOutputAnchor: + // There are three resolution states for the anchor: + // limbo, lost and recovered. Derive the current state + // from the limbo and recovered balances. + switch { + + case report.RecoveredBalance != 0: + forceClose.Anchor = lnrpc.PendingChannelsResponse_ForceClosedChannel_RECOVERED + + case report.LimboBalance != 0: + forceClose.Anchor = lnrpc.PendingChannelsResponse_ForceClosedChannel_LIMBO + + default: + forceClose.Anchor = lnrpc.PendingChannelsResponse_ForceClosedChannel_LOST + } + + default: + return fmt.Errorf("unknown report output type: %v", + report.Type) } forceClose.LimboBalance += int64(report.LimboBalance) forceClose.RecoveredBalance += int64(report.RecoveredBalance) - - forceClose.PendingHtlcs = append(forceClose.PendingHtlcs, htlc) } return nil @@ -2460,15 +3009,6 @@ func (r *rpcServer) nurseryPopulateForceCloseResp(chanPoint *wire.OutPoint, // wallet. forceClose.LimboBalance = int64(nurseryInfo.limboBalance) forceClose.RecoveredBalance = int64(nurseryInfo.recoveredBalance) - forceClose.MaturityHeight = nurseryInfo.maturityHeight - - // If the transaction has been confirmed, then we can compute how many - // blocks it has left. - if forceClose.MaturityHeight != 0 { - forceClose.BlocksTilMaturity = - int32(forceClose.MaturityHeight) - - currentHeight - } for _, htlcReport := range nurseryInfo.htlcs { // TODO(conner) set incoming flag appropriately after handling @@ -2551,7 +3091,11 @@ func (r *rpcServer) ClosedChannels(ctx context.Context, } } - channel := createRPCClosedChannel(dbChannel) + channel, err := r.createRPCClosedChannel(dbChannel) + if err != nil { + return nil, err + } + resp.Channels = append(resp.Channels, channel) } @@ -2573,6 +3117,11 @@ func (r *rpcServer) ListChannels(ctx context.Context, "`private_only` can be set, but not both") } + if len(in.Peer) > 0 && len(in.Peer) != 33 { + _, err := route.NewVertexFromBytes(in.Peer) + return nil, fmt.Errorf("invalid `peer` key: %v", err) + } + resp := &lnrpc.ListChannelsResponse{} graph := r.server.chanDB.ChannelGraph() @@ -2587,8 +3136,15 @@ func (r *rpcServer) ListChannels(ctx context.Context, for _, dbChannel := range dbChannels { nodePub := dbChannel.IdentityPub + nodePubBytes := nodePub.SerializeCompressed() chanPoint := dbChannel.FundingOutpoint + // If the caller requested channels for a target node, skip any + // that don't match the provided pubkey. + if len(in.Peer) > 0 && !bytes.Equal(nodePubBytes, in.Peer) { + continue + } + var peerOnline bool if _, err := r.server.FindPeer(nodePub); err == nil { peerOnline = true @@ -2606,7 +3162,10 @@ func (r *rpcServer) ListChannels(ctx context.Context, // Next, we'll determine whether we should add this channel to // our list depending on the type of channels requested to us. isActive := peerOnline && linkActive - channel := createRPCOpenChannel(r, graph, dbChannel, isActive) + channel, err := createRPCOpenChannel(r, graph, dbChannel, isActive) + if err != nil { + return nil, err + } // We'll only skip returning this channel if we were requested // for a specific kind and this channel doesn't satisfy it. @@ -2627,9 +3186,26 @@ func (r *rpcServer) ListChannels(ctx context.Context, return resp, nil } +// rpcCommitmentType takes the channel type and converts it to an rpc commitment +// type value. +func rpcCommitmentType(chanType channeldb.ChannelType) lnrpc.CommitmentType { + // Extract the commitment type from the channel type flags. We must + // first check whether it has anchors, since in that case it would also + // be tweakless. + if chanType.HasAnchors() { + return lnrpc.CommitmentType_ANCHORS + } + + if chanType.IsTweakless() { + return lnrpc.CommitmentType_STATIC_REMOTE_KEY + } + + return lnrpc.CommitmentType_LEGACY +} + // createRPCOpenChannel creates an *lnrpc.Channel from the *channeldb.Channel. func createRPCOpenChannel(r *rpcServer, graph *channeldb.ChannelGraph, - dbChannel *channeldb.OpenChannel, isActive bool) *lnrpc.Channel { + dbChannel *channeldb.OpenChannel, isActive bool) (*lnrpc.Channel, error) { nodePub := dbChannel.IdentityPub nodeID := hex.EncodeToString(nodePub.SerializeCompressed()) @@ -2664,6 +3240,9 @@ func createRPCOpenChannel(r *rpcServer, graph *channeldb.ChannelGraph, } externalCommitFee := dbChannel.Capacity - sumOutputs + // Extract the commitment type from the channel type flags. + commitmentType := rpcCommitmentType(dbChannel.ChanType) + channel := &lnrpc.Channel{ Active: isActive, Private: !isPublic, @@ -2685,7 +3264,9 @@ func createRPCOpenChannel(r *rpcServer, graph *channeldb.ChannelGraph, ChanStatusFlags: dbChannel.ChanStatus().String(), LocalChanReserveSat: int64(dbChannel.LocalChanCfg.ChanReserve), RemoteChanReserveSat: int64(dbChannel.RemoteChanCfg.ChanReserve), - StaticRemoteKey: dbChannel.ChanType.IsTweakless(), + StaticRemoteKey: commitmentType == lnrpc.CommitmentType_STATIC_REMOTE_KEY, + CommitmentType: commitmentType, + ThawHeight: dbChannel.ThawHeight, } for i, htlc := range localCommit.Htlcs { @@ -2702,18 +3283,107 @@ func createRPCOpenChannel(r *rpcServer, graph *channeldb.ChannelGraph, channel.UnsettledBalance += channel.PendingHtlcs[i].Amount } - return channel + // Lookup our balances at height 0, because they will reflect any + // push amounts that may have been present when this channel was + // created. + localBalance, remoteBalance, err := dbChannel.BalancesAtHeight(0) + if err != nil { + return nil, err + } + + // If we initiated opening the channel, the zero height remote balance + // is the push amount. Otherwise, our starting balance is the push + // amount. If there is no push amount, these values will simply be zero. + if dbChannel.IsInitiator { + channel.PushAmountSat = uint64(remoteBalance.ToSatoshis()) + } else { + channel.PushAmountSat = uint64(localBalance.ToSatoshis()) + } + + outpoint := dbChannel.FundingOutpoint + + // Get the lifespan observed by the channel event store. If the channel is + // not known to the channel event store, return early because we cannot + // calculate any further uptime information. + startTime, endTime, err := r.server.chanEventStore.GetLifespan(outpoint) + switch err { + case chanfitness.ErrChannelNotFound: + rpcsLog.Infof("channel: %v not found by channel event store", + outpoint) + + return channel, nil + case nil: + // If there is no error getting lifespan, continue to uptime + // calculation. + default: + return nil, err + } + + // If endTime is zero, the channel is still open, progress endTime to + // the present so we can calculate lifetime. + if endTime.IsZero() { + endTime = time.Now() + } + channel.Lifetime = int64(endTime.Sub(startTime).Seconds()) + + // Once we have successfully obtained channel lifespan, we know that the + // channel is known to the event store, so we can return any non-nil error + // that occurs. + uptime, err := r.server.chanEventStore.GetUptime( + outpoint, startTime, endTime, + ) + if err != nil { + return nil, err + } + channel.Uptime = int64(uptime.Seconds()) + + if len(dbChannel.LocalShutdownScript) > 0 { + _, addresses, _, err := txscript.ExtractPkScriptAddrs( + dbChannel.LocalShutdownScript, activeNetParams.Params, + ) + if err != nil { + return nil, err + } + + // We only expect one upfront shutdown address for a channel. If + // LocalShutdownScript is non-zero, there should be one payout address + // set. + if len(addresses) != 1 { + return nil, fmt.Errorf("expected one upfront shutdown address, "+ + "got: %v", len(addresses)) + } + + channel.CloseAddress = addresses[0].String() + } + + return channel, nil } // createRPCClosedChannel creates an *lnrpc.ClosedChannelSummary from a // *channeldb.ChannelCloseSummary. -func createRPCClosedChannel( - dbChannel *channeldb.ChannelCloseSummary) *lnrpc.ChannelCloseSummary { +func (r *rpcServer) createRPCClosedChannel( + dbChannel *channeldb.ChannelCloseSummary) (*lnrpc.ChannelCloseSummary, error) { nodePub := dbChannel.RemotePub nodeID := hex.EncodeToString(nodePub.SerializeCompressed()) - var closeType lnrpc.ChannelCloseSummary_ClosureType + var ( + closeType lnrpc.ChannelCloseSummary_ClosureType + openInit lnrpc.Initiator + closeInitiator lnrpc.Initiator + err error + ) + + // Lookup local and remote cooperative initiators. If these values + // are not known they will just return unknown. + openInit, closeInitiator, err = r.getInitiators( + &dbChannel.ChanPoint, + ) + if err != nil { + return nil, err + } + + // Convert the close type to rpc type. switch dbChannel.CloseType { case channeldb.CooperativeClose: closeType = lnrpc.ChannelCloseSummary_COOPERATIVE_CLOSE @@ -2740,7 +3410,75 @@ func createRPCClosedChannel( TimeLockedBalance: int64(dbChannel.TimeLockedBalance), ChainHash: dbChannel.ChainHash.String(), ClosingTxHash: dbChannel.ClosingTXID.String(), + OpenInitiator: openInit, + CloseInitiator: closeInitiator, + }, nil +} + +// getInitiators returns an initiator enum that provides information about the +// party that initiated channel's open and close. This information is obtained +// from the historical channel bucket, so unknown values are returned when the +// channel is not present (which indicates that it was closed before we started +// writing channels to the historical close bucket). +func (r *rpcServer) getInitiators(chanPoint *wire.OutPoint) ( + lnrpc.Initiator, + lnrpc.Initiator, error) { + + var ( + openInitiator = lnrpc.Initiator_INITIATOR_UNKNOWN + closeInitiator = lnrpc.Initiator_INITIATOR_UNKNOWN + ) + + // To get the close initiator for cooperative closes, we need + // to get the channel status from the historical channel bucket. + histChan, err := r.server.chanDB.FetchHistoricalChannel(chanPoint) + switch { + // The node has upgraded from a version where we did not store + // historical channels, and has not closed a channel since. Do + // not return an error, initiator values are unknown. + case err == channeldb.ErrNoHistoricalBucket: + return openInitiator, closeInitiator, nil + + // The channel was closed before we started storing historical + // channels. Do not return an error, initiator values are unknown. + case err == channeldb.ErrChannelNotFound: + return openInitiator, closeInitiator, nil + + case err != nil: + return 0, 0, err + } + + // If we successfully looked up the channel, determine initiator based + // on channels status. + if histChan.IsInitiator { + openInitiator = lnrpc.Initiator_INITIATOR_LOCAL + } else { + openInitiator = lnrpc.Initiator_INITIATOR_REMOTE + } + + localInit := histChan.HasChanStatus( + channeldb.ChanStatusLocalCloseInitiator, + ) + + remoteInit := histChan.HasChanStatus( + channeldb.ChanStatusRemoteCloseInitiator, + ) + + switch { + // There is a possible case where closes were attempted by both parties. + // We return the initiator as both in this case to provide full + // information about the close. + case localInit && remoteInit: + closeInitiator = lnrpc.Initiator_INITIATOR_BOTH + + case localInit: + closeInitiator = lnrpc.Initiator_INITIATOR_LOCAL + + case remoteInit: + closeInitiator = lnrpc.Initiator_INITIATOR_REMOTE } + + return openInitiator, closeInitiator, nil } // SubscribeChannelEvents returns a uni-directional stream (server -> client) @@ -2767,23 +3505,45 @@ func (r *rpcServer) SubscribeChannelEvents(req *lnrpc.ChannelEventSubscription, case e := <-channelEventSub.Updates(): var update *lnrpc.ChannelEventUpdate switch event := e.(type) { + case channelnotifier.PendingOpenChannelEvent: + update = &lnrpc.ChannelEventUpdate{ + Type: lnrpc.ChannelEventUpdate_PENDING_OPEN_CHANNEL, + Channel: &lnrpc.ChannelEventUpdate_PendingOpenChannel{ + PendingOpenChannel: &lnrpc.PendingUpdate{ + Txid: event.ChannelPoint.Hash[:], + OutputIndex: event.ChannelPoint.Index, + }, + }, + } case channelnotifier.OpenChannelEvent: - channel := createRPCOpenChannel(r, graph, + channel, err := createRPCOpenChannel(r, graph, event.Channel, true) + if err != nil { + return err + } + update = &lnrpc.ChannelEventUpdate{ Type: lnrpc.ChannelEventUpdate_OPEN_CHANNEL, Channel: &lnrpc.ChannelEventUpdate_OpenChannel{ OpenChannel: channel, }, } + case channelnotifier.ClosedChannelEvent: - closedChannel := createRPCClosedChannel(event.CloseSummary) + closedChannel, err := r.createRPCClosedChannel( + event.CloseSummary, + ) + if err != nil { + return err + } + update = &lnrpc.ChannelEventUpdate{ Type: lnrpc.ChannelEventUpdate_CLOSED_CHANNEL, Channel: &lnrpc.ChannelEventUpdate_ClosedChannel{ ClosedChannel: closedChannel, }, } + case channelnotifier.ActiveChannelEvent: update = &lnrpc.ChannelEventUpdate{ Type: lnrpc.ChannelEventUpdate_ACTIVE_CHANNEL, @@ -2796,6 +3556,7 @@ func (r *rpcServer) SubscribeChannelEvents(req *lnrpc.ChannelEventSubscription, }, }, } + case channelnotifier.InactiveChannelEvent: update = &lnrpc.ChannelEventUpdate{ Type: lnrpc.ChannelEventUpdate_INACTIVE_CHANNEL, @@ -2808,6 +3569,12 @@ func (r *rpcServer) SubscribeChannelEvents(req *lnrpc.ChannelEventSubscription, }, }, } + + // Completely ignore ActiveLinkEvent as this is explicitly not + // exposed to the RPC. + case channelnotifier.ActiveLinkEvent: + continue + default: return fmt.Errorf("unexpected channel event update: %v", event) } @@ -2837,27 +3604,6 @@ type rpcPaymentRequest struct { route *route.Route } -// calculateFeeLimit returns the fee limit in millisatoshis. If a percentage -// based fee limit has been requested, we'll factor in the ratio provided with -// the amount of the payment. -func calculateFeeLimit(feeLimit *lnrpc.FeeLimit, - amount lnwire.MilliSatoshi) lnwire.MilliSatoshi { - - switch feeLimit.GetLimit().(type) { - case *lnrpc.FeeLimit_Fixed: - return lnwire.NewMSatFromSatoshis( - btcutil.Amount(feeLimit.GetFixed()), - ) - case *lnrpc.FeeLimit_Percent: - return amount * lnwire.MilliSatoshi(feeLimit.GetPercent()) / 100 - default: - // If a fee limit was not specified, we'll use the payment's - // amount as an upper bound in order to avoid payment attempts - // from incurring fees higher than the payment amount itself. - return amount - } -} - // SendPayment dispatches a bi-directional streaming RPC for sending payments // through the Lightning Network. A single RPC invocation creates a persistent // bi-directional stream allowing clients to rapidly send payments through the @@ -2947,9 +3693,12 @@ type rpcPaymentIntent struct { cltvDelta uint16 routeHints [][]zpay32.HopHint outgoingChannelID *uint64 + lastHop *route.Vertex + destFeatures *lnwire.FeatureVector + paymentAddr *[32]byte payReq []byte - destTLV []tlv.Record + destCustomRecords record.CustomSet route *route.Route } @@ -2958,7 +3707,7 @@ type rpcPaymentIntent struct { // dispatch a client from the information presented by an RPC client. There are // three ways a client can specify their payment details: a payment request, // via manual details, or via a complete route. -func extractPaymentIntent(rpcPayReq *rpcPaymentRequest) (rpcPaymentIntent, error) { +func (r *rpcServer) extractPaymentIntent(rpcPayReq *rpcPaymentRequest) (rpcPaymentIntent, error) { payIntent := rpcPaymentIntent{} // If a route was specified, then we can use that directly. @@ -2988,6 +3737,17 @@ func extractPaymentIntent(rpcPayReq *rpcPaymentRequest) (rpcPaymentIntent, error payIntent.outgoingChannelID = &rpcPayReq.OutgoingChanId } + // Pass along a last hop restriction if specified. + if len(rpcPayReq.LastHopPubkey) > 0 { + lastHop, err := route.NewVertexFromBytes( + rpcPayReq.LastHopPubkey, + ) + if err != nil { + return payIntent, err + } + payIntent.lastHop = &lastHop + } + // Take the CLTV limit from the request if set, otherwise use the max. cltvLimit, err := routerrpc.ValidateCLTVLimit( rpcPayReq.CltvLimit, cfg.MaxOutgoingCltvExpiry, @@ -2997,14 +3757,22 @@ func extractPaymentIntent(rpcPayReq *rpcPaymentRequest) (rpcPaymentIntent, error } payIntent.cltvLimit = cltvLimit - if len(rpcPayReq.DestTlv) != 0 { - var err error - payIntent.destTLV, err = tlv.MapToRecords( - rpcPayReq.DestTlv, - ) - if err != nil { - return payIntent, err + customRecords := record.CustomSet(rpcPayReq.DestCustomRecords) + if err := customRecords.Validate(); err != nil { + return payIntent, err + } + payIntent.destCustomRecords = customRecords + + validateDest := func(dest route.Vertex) error { + if rpcPayReq.AllowSelfPayment { + return nil + } + + if dest == r.selfNode { + return errors.New("self-payments not allowed") } + + return nil } // If the payment request field isn't blank, then the details of the @@ -3029,21 +3797,25 @@ func extractPaymentIntent(rpcPayReq *rpcPaymentRequest) (rpcPaymentIntent, error // We override the amount to pay with the amount provided from // the payment request. if payReq.MilliSat == nil { - if rpcPayReq.Amt == 0 { + amt, err := lnrpc.UnmarshallAmt( + rpcPayReq.Amt, rpcPayReq.AmtMsat, + ) + if err != nil { + return payIntent, err + } + if amt == 0 { return payIntent, errors.New("amount must be " + "specified when paying a zero amount " + "invoice") } - payIntent.msat = lnwire.NewMSatFromSatoshis( - btcutil.Amount(rpcPayReq.Amt), - ) + payIntent.msat = amt } else { payIntent.msat = *payReq.MilliSat } // Calculate the fee limit that should be used for this payment. - payIntent.feeLimit = calculateFeeLimit( + payIntent.feeLimit = lnrpc.CalculateFeeLimit( rpcPayReq.FeeLimit, payIntent.msat, ) @@ -3053,6 +3825,12 @@ func extractPaymentIntent(rpcPayReq *rpcPaymentRequest) (rpcPaymentIntent, error payIntent.cltvDelta = uint16(payReq.MinFinalCLTVExpiry()) payIntent.routeHints = payReq.RouteHints payIntent.payReq = []byte(rpcPayReq.PaymentRequest) + payIntent.destFeatures = payReq.Features + payIntent.paymentAddr = payReq.PaymentAddr + + if err := validateDest(payIntent.dest); err != nil { + return payIntent, err + } return payIntent, nil } @@ -3075,22 +3853,33 @@ func extractPaymentIntent(rpcPayReq *rpcPaymentRequest) (rpcPaymentIntent, error } copy(payIntent.dest[:], pubBytes) + if err := validateDest(payIntent.dest); err != nil { + return payIntent, err + } + // Otherwise, If the payment request field was not specified // (and a custom route wasn't specified), construct the payment // from the other fields. - payIntent.msat = lnwire.NewMSatFromSatoshis( - btcutil.Amount(rpcPayReq.Amt), + payIntent.msat, err = lnrpc.UnmarshallAmt( + rpcPayReq.Amt, rpcPayReq.AmtMsat, ) + if err != nil { + return payIntent, err + } // Calculate the fee limit that should be used for this payment. - payIntent.feeLimit = calculateFeeLimit( + payIntent.feeLimit = lnrpc.CalculateFeeLimit( rpcPayReq.FeeLimit, payIntent.msat, ) if rpcPayReq.FinalCltvDelta != 0 { payIntent.cltvDelta = uint16(rpcPayReq.FinalCltvDelta) } else { - payIntent.cltvDelta = zpay32.DefaultFinalCLTVDelta + // If no final cltv delta is given, assume the default that we + // use when creating an invoice. We do not assume the default of + // 9 blocks that is defined in BOLT-11, because this is never + // enough for other lnd nodes. + payIntent.cltvDelta = uint16(cfg.Bitcoin.TimeLockDelta) } // If the user is manually specifying payment details, then the payment @@ -3110,6 +3899,14 @@ func extractPaymentIntent(rpcPayReq *rpcPaymentRequest) (rpcPaymentIntent, error copy(payIntent.rHash[:], rpcPayReq.PaymentHash) } + // Unmarshal any custom destination features. + payIntent.destFeatures, err = routerrpc.UnmarshalFeatures( + rpcPayReq.DestFeatures, + ) + if err != nil { + return payIntent, err + } + // Currently, within the bootstrap phase of the network, we limit the // largest payment size allotted to (2^32) - 1 mSAT or 4.29 million // satoshis. @@ -3160,9 +3957,16 @@ func (r *rpcServer) dispatchPaymentIntent( PaymentHash: payIntent.rHash, RouteHints: payIntent.routeHints, OutgoingChannelID: payIntent.outgoingChannelID, + LastHop: payIntent.lastHop, PaymentRequest: payIntent.payReq, PayAttemptTimeout: routing.DefaultPayAttemptTimeout, - FinalDestRecords: payIntent.destTLV, + DestCustomRecords: payIntent.destCustomRecords, + DestFeatures: payIntent.destFeatures, + PaymentAddr: payIntent.paymentAddr, + + // Don't enable multi-part payments on the main rpc. + // Users need to use routerrpc for that. + MaxParts: 1, } preImage, route, routerErr = r.server.chanRouter.SendPayment( @@ -3205,8 +4009,7 @@ func (r *rpcServer) sendPayment(stream *paymentStream) error { // syncing as we may be trying to sent a payment over a "stale" // channel. if !r.server.Started() { - return fmt.Errorf("chain backend is still syncing, server " + - "not active yet") + return ErrServerNotActive } // TODO(roasbeef): check payment filter to see if already used? @@ -3220,25 +4023,28 @@ func (r *rpcServer) sendPayment(stream *paymentStream) error { htlcSema <- struct{}{} } + // We keep track of the running goroutines and set up a quit signal we + // can use to request them to exit if the method returns because of an + // encountered error. + var wg sync.WaitGroup + reqQuit := make(chan struct{}) + defer close(reqQuit) + // Launch a new goroutine to handle reading new payment requests from // the client. This way we can handle errors independently of blocking // and waiting for the next payment request to come through. - reqQuit := make(chan struct{}) - defer func() { - close(reqQuit) - }() - // TODO(joostjager): Callers expect result to come in in the same order // as the request were sent, but this is far from guarantueed in the // code below. + wg.Add(1) go func() { + defer wg.Done() + for { select { case <-reqQuit: return - case <-r.quit: - errChan <- nil - return + default: // Receive the next pending payment within the // stream sent by the client. If we read the @@ -3246,13 +4052,15 @@ func (r *rpcServer) sendPayment(stream *paymentStream) error { // stream, and we can exit normally. nextPayment, err := stream.recv() if err == io.EOF { - errChan <- nil + close(payChan) return } else if err != nil { + rpcsLog.Errorf("Failed receiving from "+ + "stream: %v", err) + select { case errChan <- err: - case <-reqQuit: - return + default: } return } @@ -3262,24 +4070,30 @@ func (r *rpcServer) sendPayment(stream *paymentStream) error { // fields. If the payment proto wasn't well // formed, then we'll send an error reply and // wait for the next payment. - payIntent, err := extractPaymentIntent(nextPayment) + payIntent, err := r.extractPaymentIntent( + nextPayment, + ) if err != nil { if err := stream.send(&lnrpc.SendResponse{ PaymentError: err.Error(), PaymentHash: payIntent.rHash[:], }); err != nil { + rpcsLog.Errorf("Failed "+ + "sending on "+ + "stream: %v", err) + select { case errChan <- err: - case <-reqQuit: - return + default: } + return } continue } // If the payment was well formed, then we'll // send to the dispatch goroutine, or exit, - // which ever comes first + // which ever comes first. select { case payChan <- &payIntent: case <-reqQuit: @@ -3289,20 +4103,41 @@ func (r *rpcServer) sendPayment(stream *paymentStream) error { } }() +sendLoop: for { select { + + // If we encounter and error either during sending or + // receiving, we return directly, closing the stream. case err := <-errChan: return err - case payIntent := <-payChan: + case <-r.quit: + return errors.New("rpc server shutting down") + + case payIntent, ok := <-payChan: + // If the receive loop is done, we break the send loop + // and wait for the ongoing payments to finish before + // exiting. + if !ok { + break sendLoop + } + // We launch a new goroutine to execute the current // payment so we can continue to serve requests while // this payment is being dispatched. + wg.Add(1) go func() { + defer wg.Done() + // Attempt to grab a free semaphore slot, using // a defer to eventually release the slot // regardless of payment success. - <-htlcSema + select { + case <-htlcSema: + case <-reqQuit: + return + } defer func() { htlcSema <- struct{}{} }() @@ -3316,7 +4151,13 @@ func (r *rpcServer) sendPayment(stream *paymentStream) error { // payment, then we'll return the error to the // user, and terminate. case saveErr != nil: - errChan <- saveErr + rpcsLog.Errorf("Failed dispatching "+ + "payment intent: %v", saveErr) + + select { + case errChan <- saveErr: + default: + } return // If we receive payment error than, instead of @@ -3328,7 +4169,14 @@ func (r *rpcServer) sendPayment(stream *paymentStream) error { PaymentHash: payIntent.rHash[:], }) if err != nil { - errChan <- err + rpcsLog.Errorf("Failed "+ + "sending error "+ + "response: %v", err) + + select { + case errChan <- err: + default: + } } return } @@ -3348,12 +4196,22 @@ func (r *rpcServer) sendPayment(stream *paymentStream) error { PaymentRoute: marshalledRouted, }) if err != nil { - errChan <- err + rpcsLog.Errorf("Failed sending "+ + "response: %v", err) + + select { + case errChan <- err: + default: + } return } }() } } + + // Wait for all goroutines to finish before closing the stream. + wg.Wait() + return nil } // SendPaymentSync is the synchronous non-streaming version of SendPayment. @@ -3396,13 +4254,12 @@ func (r *rpcServer) sendPaymentSync(ctx context.Context, // syncing as we may be trying to sent a payment over a "stale" // channel. if !r.server.Started() { - return nil, fmt.Errorf("chain backend is still syncing, server " + - "not active yet") + return nil, ErrServerNotActive } // First we'll attempt to map the proto describing the next payment to // an intent that we can pass to local sub-systems. - payIntent, err := extractPaymentIntent(nextPayment) + payIntent, err := r.extractPaymentIntent(nextPayment) if err != nil { return nil, err } @@ -3449,15 +4306,21 @@ func (r *rpcServer) AddInvoice(ctx context.Context, IsChannelActive: r.server.htlcSwitch.HasActiveLink, ChainParams: activeNetParams.Params, NodeSigner: r.server.nodeSigner, - MaxPaymentMSat: MaxPaymentMSat, DefaultCLTVExpiry: defaultDelta, ChanDB: r.server.chanDB, + GenInvoiceFeatures: func() *lnwire.FeatureVector { + return r.server.featureMgr.Get(feature.SetInvoice) + }, + } + + value, err := lnrpc.UnmarshallAmt(invoice.Value, invoice.ValueMsat) + if err != nil { + return nil, err } addInvoiceData := &invoicesrpc.AddInvoiceData{ Memo: invoice.Memo, - Receipt: invoice.Receipt, - Value: btcutil.Amount(invoice.Value), + Value: value, DescriptionHash: invoice.DescriptionHash, Expiry: invoice.Expiry, FallbackAddr: invoice.FallbackAddr, @@ -3744,7 +4607,7 @@ func (r *rpcServer) DescribeGraph(ctx context.Context, // First iterate through all the known nodes (connected or unconnected // within the graph), collating their current state into the RPC // response. - err := graph.ForEachNode(nil, func(_ *bbolt.Tx, node *channeldb.LightningNode) error { + err := graph.ForEachNode(nil, func(_ kvdb.ReadTx, node *channeldb.LightningNode) error { nodeAddrs := make([]*lnrpc.NodeAddress, 0) for _, addr := range node.Addresses { nodeAddr := &lnrpc.NodeAddress{ @@ -3754,13 +4617,16 @@ func (r *rpcServer) DescribeGraph(ctx context.Context, nodeAddrs = append(nodeAddrs, nodeAddr) } - resp.Nodes = append(resp.Nodes, &lnrpc.LightningNode{ + lnNode := &lnrpc.LightningNode{ LastUpdate: uint32(node.LastUpdate.Unix()), PubKey: hex.EncodeToString(node.PubKeyBytes[:]), Addresses: nodeAddrs, Alias: node.Alias, Color: routing.EncodeHexColor(node.Color), - }) + Features: invoicesrpc.CreateRPCFeatures(node.Features), + } + + resp.Nodes = append(resp.Nodes, lnNode) return nil }) @@ -3849,6 +4715,63 @@ func marshalDbEdge(edgeInfo *channeldb.ChannelEdgeInfo, return edge } +// GetNodeMetrics returns all available node metrics calculated from the +// current channel graph. +func (r *rpcServer) GetNodeMetrics(ctx context.Context, + req *lnrpc.NodeMetricsRequest) (*lnrpc.NodeMetricsResponse, error) { + + // Get requested metric types. + getCentrality := false + for _, t := range req.Types { + if t == lnrpc.NodeMetricType_BETWEENNESS_CENTRALITY { + getCentrality = true + } + } + + // Only centrality can be requested for now. + if !getCentrality { + return nil, nil + } + + resp := &lnrpc.NodeMetricsResponse{ + BetweennessCentrality: make(map[string]*lnrpc.FloatMetric), + } + + // Obtain the pointer to the global singleton channel graph, this will + // provide a consistent view of the graph due to bolt db's + // transactional model. + graph := r.server.chanDB.ChannelGraph() + + // Calculate betweenness centrality if requested. Note that depending on the + // graph size, this may take up to a few minutes. + channelGraph := autopilot.ChannelGraphFromDatabase(graph) + centralityMetric, err := autopilot.NewBetweennessCentralityMetric( + runtime.NumCPU(), + ) + if err != nil { + return nil, err + } + if err := centralityMetric.Refresh(channelGraph); err != nil { + return nil, err + } + + // Fill normalized and non normalized centrality. + centrality := centralityMetric.GetMetric(true) + for nodeID, val := range centrality { + resp.BetweennessCentrality[hex.EncodeToString(nodeID[:])] = + &lnrpc.FloatMetric{ + NormalizedValue: val, + } + } + + centrality = centralityMetric.GetMetric(false) + for nodeID, val := range centrality { + resp.BetweennessCentrality[hex.EncodeToString(nodeID[:])].Value = val + } + + return resp, nil +} + // GetChanInfo returns the latest authenticated network announcement for the // given channel identified by its channel ID: an 8-byte integer which uniquely // identifies the location of transaction's funding output within the block @@ -3880,11 +4803,7 @@ func (r *rpcServer) GetNodeInfo(ctx context.Context, // First, parse the hex-encoded public key into a full in-memory public // key object we can work with for querying. - pubKeyBytes, err := hex.DecodeString(in.PubKey) - if err != nil { - return nil, err - } - pubKey, err := btcec.ParsePubKey(pubKeyBytes, btcec.S256()) + pubKey, err := route.NewVertexFromStr(in.PubKey) if err != nil { return nil, err } @@ -3892,7 +4811,7 @@ func (r *rpcServer) GetNodeInfo(ctx context.Context, // With the public key decoded, attempt to fetch the node corresponding // to this public key. If the node cannot be found, then an error will // be returned. - node, err := graph.FetchLightningNode(pubKey) + node, err := graph.FetchLightningNode(nil, pubKey) if err != nil { return nil, err } @@ -3905,7 +4824,7 @@ func (r *rpcServer) GetNodeInfo(ctx context.Context, channels []*lnrpc.ChannelEdge ) - if err := node.ForEachChannel(nil, func(_ *bbolt.Tx, + if err := node.ForEachChannel(nil, func(_ kvdb.ReadTx, edge *channeldb.ChannelEdgeInfo, c1, c2 *channeldb.ChannelEdgePolicy) error { @@ -3941,6 +4860,8 @@ func (r *rpcServer) GetNodeInfo(ctx context.Context, nodeAddrs = append(nodeAddrs, nodeAddr) } + features := invoicesrpc.CreateRPCFeatures(node.Features) + return &lnrpc.NodeInfo{ Node: &lnrpc.LightningNode{ LastUpdate: uint32(node.LastUpdate.Unix()), @@ -3948,6 +4869,7 @@ func (r *rpcServer) GetNodeInfo(ctx context.Context, Addresses: nodeAddrs, Alias: node.Alias, Color: routing.EncodeHexColor(node.Color), + Features: features, }, NumChannels: numChannels, TotalCapacity: int64(totalCapacity), @@ -4000,7 +4922,7 @@ func (r *rpcServer) GetNetworkInfo(ctx context.Context, // network, tallying up the total number of nodes, and also gathering // each node so we can measure the graph diameter and degree stats // below. - if err := graph.ForEachNode(nil, func(tx *bbolt.Tx, node *channeldb.LightningNode) error { + if err := graph.ForEachNode(nil, func(tx kvdb.ReadTx, node *channeldb.LightningNode) error { // Increment the total number of nodes with each iteration. numNodes++ @@ -4010,7 +4932,7 @@ func (r *rpcServer) GetNetworkInfo(ctx context.Context, // through the db transaction from the outer view so we can // re-use it within this inner view. var outDegree uint32 - if err := node.ForEachChannel(tx, func(_ *bbolt.Tx, + if err := node.ForEachChannel(tx, func(_ kvdb.ReadTx, edge *channeldb.ChannelEdgeInfo, _, _ *channeldb.ChannelEdgePolicy) error { // Bump up the out degree for this node for each @@ -4235,95 +5157,52 @@ func marshallTopologyChange(topChange *routing.TopologyChange) *lnrpc.GraphTopol } } -// ListPayments returns a list of all outgoing payments. +// ListPayments returns a list of outgoing payments determined by a paginated +// database query. func (r *rpcServer) ListPayments(ctx context.Context, req *lnrpc.ListPaymentsRequest) (*lnrpc.ListPaymentsResponse, error) { rpcsLog.Debugf("[ListPayments]") - payments, err := r.server.chanDB.FetchPayments() - if err != nil { - return nil, err + query := channeldb.PaymentsQuery{ + IndexOffset: req.IndexOffset, + MaxPayments: req.MaxPayments, + Reversed: req.Reversed, + IncludeIncomplete: req.IncludeIncomplete, } - paymentsResp := &lnrpc.ListPaymentsResponse{} - for _, payment := range payments { - // To keep compatibility with the old API, we only return - // non-suceeded payments if requested. - if payment.Status != channeldb.StatusSucceeded && - !req.IncludeIncomplete { - continue - } + // If the maximum number of payments wasn't specified, then we'll + // default to return the maximal number of payments representable. + if req.MaxPayments == 0 { + query.MaxPayments = math.MaxUint64 + } - // If a payment attempt has been made we can fetch the route. - // Otherwise we'll just populate the RPC response with an empty - // one. - var route route.Route - if payment.Attempt != nil { - route = payment.Attempt.Route - } - path := make([]string, len(route.Hops)) - for i, hop := range route.Hops { - path[i] = hex.EncodeToString(hop.PubKeyBytes[:]) - } + paymentsQuerySlice, err := r.server.chanDB.QueryPayments(query) + if err != nil { + return nil, err + } - // If this payment is settled, the preimage will be available. - var preimage lntypes.Preimage - if payment.PaymentPreimage != nil { - preimage = *payment.PaymentPreimage - } + paymentsResp := &lnrpc.ListPaymentsResponse{ + LastIndexOffset: paymentsQuerySlice.LastIndexOffset, + FirstIndexOffset: paymentsQuerySlice.FirstIndexOffset, + } - msatValue := int64(payment.Info.Value) - satValue := int64(payment.Info.Value.ToSatoshis()) + for _, payment := range paymentsQuerySlice.Payments { + payment := payment - status, err := convertPaymentStatus(payment.Status) + rpcPayment, err := r.routerBackend.MarshallPayment(&payment) if err != nil { return nil, err } - paymentHash := payment.Info.PaymentHash - paymentsResp.Payments = append(paymentsResp.Payments, &lnrpc.Payment{ - PaymentHash: hex.EncodeToString(paymentHash[:]), - Value: satValue, - ValueMsat: msatValue, - ValueSat: satValue, - CreationDate: payment.Info.CreationDate.Unix(), - Path: path, - Fee: int64(route.TotalFees().ToSatoshis()), - FeeSat: int64(route.TotalFees().ToSatoshis()), - FeeMsat: int64(route.TotalFees()), - PaymentPreimage: hex.EncodeToString(preimage[:]), - PaymentRequest: string(payment.Info.PaymentRequest), - Status: status, - }) + paymentsResp.Payments = append( + paymentsResp.Payments, rpcPayment, + ) } return paymentsResp, nil } -// convertPaymentStatus converts a channeldb.PaymentStatus to the type expected -// by the RPC. -func convertPaymentStatus(dbStatus channeldb.PaymentStatus) ( - lnrpc.Payment_PaymentStatus, error) { - - switch dbStatus { - case channeldb.StatusUnknown: - return lnrpc.Payment_UNKNOWN, nil - - case channeldb.StatusInFlight: - return lnrpc.Payment_IN_FLIGHT, nil - - case channeldb.StatusSucceeded: - return lnrpc.Payment_SUCCEEDED, nil - - case channeldb.StatusFailed: - return lnrpc.Payment_FAILED, nil - - default: - return 0, fmt.Errorf("unhandled payment status %v", dbStatus) - } -} - // DeleteAllPayments deletes all outgoing payments from DB. func (r *rpcServer) DeleteAllPayments(ctx context.Context, _ *lnrpc.DeleteAllPaymentsRequest) (*lnrpc.DeleteAllPaymentsResponse, error) { @@ -4348,7 +5227,9 @@ func (r *rpcServer) DebugLevel(ctx context.Context, // sub-systems. if req.Show { return &lnrpc.DebugLevelResponse{ - SubSystems: strings.Join(supportedSubsystems(), " "), + SubSystems: strings.Join( + logWriter.SupportedSubsystems(), " ", + ), }, nil } @@ -4356,7 +5237,8 @@ func (r *rpcServer) DebugLevel(ctx context.Context, // Otherwise, we'll attempt to set the logging level using the // specified level spec. - if err := parseAndSetDebugLevels(req.LevelSpec); err != nil { + err := build.ParseAndSetDebugLevels(req.LevelSpec, logWriter) + if err != nil { return nil, err } @@ -4402,16 +5284,24 @@ func (r *rpcServer) DecodePayReq(ctx context.Context, // Convert between the `lnrpc` and `routing` types. routeHints := invoicesrpc.CreateRPCRouteHints(payReq.RouteHints) - amt := int64(0) + var amtSat, amtMsat int64 if payReq.MilliSat != nil { - amt = int64(payReq.MilliSat.ToSatoshis()) + amtSat = int64(payReq.MilliSat.ToSatoshis()) + amtMsat = int64(*payReq.MilliSat) + } + + // Extract the payment address from the payment request, if present. + var paymentAddr []byte + if payReq.PaymentAddr != nil { + paymentAddr = payReq.PaymentAddr[:] } dest := payReq.Destination.SerializeCompressed() return &lnrpc.PayReq{ Destination: hex.EncodeToString(dest), PaymentHash: hex.EncodeToString(payReq.PaymentHash[:]), - NumSatoshis: amt, + NumSatoshis: amtSat, + NumMsat: amtMsat, Timestamp: payReq.Timestamp.Unix(), Description: desc, DescriptionHash: hex.EncodeToString(descHash[:]), @@ -4419,6 +5309,8 @@ func (r *rpcServer) DecodePayReq(ctx context.Context, Expiry: expiry, CltvExpiry: int64(payReq.MinFinalCLTVExpiry()), RouteHints: routeHints, + PaymentAddr: paymentAddr, + Features: invoicesrpc.CreateRPCFeatures(payReq.Features), }, nil } @@ -4444,7 +5336,7 @@ func (r *rpcServer) FeeReport(ctx context.Context, } var feeReports []*lnrpc.ChannelFeeReport - err = selfNode.ForEachChannel(nil, func(_ *bbolt.Tx, chanInfo *channeldb.ChannelEdgeInfo, + err = selfNode.ForEachChannel(nil, func(_ kvdb.ReadTx, chanInfo *channeldb.ChannelEdgeInfo, edgePolicy, _ *channeldb.ChannelEdgePolicy) error { // Self node should always have policies for its channels. @@ -4463,10 +5355,11 @@ func (r *rpcServer) FeeReport(ctx context.Context, // TODO(roasbeef): also add stats for revenue for each channel feeReports = append(feeReports, &lnrpc.ChannelFeeReport{ - ChanPoint: chanInfo.ChannelPoint.String(), - BaseFeeMsat: int64(edgePolicy.FeeBaseMSat), - FeePerMil: int64(feeRateFixedPoint), - FeeRate: feeRate, + ChanId: chanInfo.ChannelID, + ChannelPoint: chanInfo.ChannelPoint.String(), + BaseFeeMsat: int64(edgePolicy.FeeBaseMSat), + FeePerMil: int64(feeRateFixedPoint), + FeeRate: feeRate, }) return nil @@ -4626,15 +5519,25 @@ func (r *rpcServer) UpdateChannelPolicy(ctx context.Context, FeeRate: feeRateFixed, } + maxHtlc := lnwire.MilliSatoshi(req.MaxHtlcMsat) + var minHtlc *lnwire.MilliSatoshi + if req.MinHtlcMsatSpecified { + min := lnwire.MilliSatoshi(req.MinHtlcMsat) + minHtlc = &min + } + chanPolicy := routing.ChannelPolicy{ FeeSchema: feeSchema, TimeLockDelta: req.TimeLockDelta, - MaxHTLC: lnwire.MilliSatoshi(req.MaxHtlcMsat), + MaxHTLC: maxHtlc, + MinHTLC: minHtlc, } rpcsLog.Debugf("[updatechanpolicy] updating channel policy base_fee=%v, "+ - "rate_float=%v, rate_fixed=%v, time_lock_delta: %v, targets=%v", + "rate_float=%v, rate_fixed=%v, time_lock_delta: %v, "+ + "min_htlc=%v, max_htlc=%v, targets=%v", req.BaseFeeMsat, req.FeeRate, feeRateFixed, req.TimeLockDelta, + minHtlc, maxHtlc, spew.Sdump(targetChans)) // With the scope resolved, we'll now send this to the local channel @@ -4720,18 +5623,20 @@ func (r *rpcServer) ForwardingHistory(ctx context.Context, LastOffsetIndex: timeSlice.LastIndexOffset, } for i, event := range timeSlice.ForwardingEvents { - amtInSat := event.AmtIn.ToSatoshis() - amtOutSat := event.AmtOut.ToSatoshis() + amtInMsat := event.AmtIn + amtOutMsat := event.AmtOut feeMsat := event.AmtIn - event.AmtOut resp.ForwardingEvents[i] = &lnrpc.ForwardingEvent{ - Timestamp: uint64(event.Timestamp.Unix()), - ChanIdIn: event.IncomingChanID.ToUint64(), - ChanIdOut: event.OutgoingChanID.ToUint64(), - AmtIn: uint64(amtInSat), - AmtOut: uint64(amtOutSat), - Fee: uint64(feeMsat.ToSatoshis()), - FeeMsat: uint64(feeMsat), + Timestamp: uint64(event.Timestamp.Unix()), + ChanIdIn: event.IncomingChanID.ToUint64(), + ChanIdOut: event.OutgoingChanID.ToUint64(), + AmtIn: uint64(amtInMsat.ToSatoshis()), + AmtOut: uint64(amtOutMsat.ToSatoshis()), + Fee: uint64(feeMsat.ToSatoshis()), + FeeMsat: uint64(feeMsat), + AmtInMsat: uint64(amtInMsat), + AmtOutMsat: uint64(amtOutMsat), } } @@ -5020,7 +5925,7 @@ func (r *rpcServer) SubscribeChannelBackups(req *lnrpc.ChannelBackupSubscription updateStream lnrpc.Lightning_SubscribeChannelBackupsServer) error { // First, we'll subscribe to the primary channel notifier so we can - // obtain events for new opened/closed channels. + // obtain events for new pending/opened/closed channels. chanSubscription, err := r.server.channelNotifier.SubscribeChannelEvents() if err != nil { return err @@ -5038,10 +5943,15 @@ func (r *rpcServer) SubscribeChannelBackups(req *lnrpc.ChannelBackupSubscription // We only care about new/closed channels, so we'll // skip any events for active/inactive channels. + // To make the subscription behave the same way as the + // synchronous call and the file based backup, we also + // include pending channels in the update. case channelnotifier.ActiveChannelEvent: continue case channelnotifier.InactiveChannelEvent: continue + case channelnotifier.ActiveLinkEvent: + continue } // Now that we know the channel state has changed, @@ -5238,3 +6148,191 @@ func (r *rpcServer) ChannelAcceptor(stream lnrpc.Lightning_ChannelAcceptorServer } } } + +// BakeMacaroon allows the creation of a new macaroon with custom read and write +// permissions. No first-party caveats are added since this can be done offline. +func (r *rpcServer) BakeMacaroon(ctx context.Context, + req *lnrpc.BakeMacaroonRequest) (*lnrpc.BakeMacaroonResponse, error) { + + rpcsLog.Debugf("[bakemacaroon]") + + // If the --no-macaroons flag is used to start lnd, the macaroon service + // is not initialized. Therefore we can't bake new macaroons. + if r.macService == nil { + return nil, fmt.Errorf("macaroon authentication disabled, " + + "remove --no-macaroons flag to enable") + } + + helpMsg := fmt.Sprintf("supported actions are %v, supported entities "+ + "are %v", validActions, validEntities) + + // Don't allow empty permission list as it doesn't make sense to have + // a macaroon that is not allowed to access any RPC. + if len(req.Permissions) == 0 { + return nil, fmt.Errorf("permission list cannot be empty. "+ + "specify at least one action/entity pair. %s", helpMsg) + } + + // Validate and map permission struct used by gRPC to the one used by + // the bakery. + requestedPermissions := make([]bakery.Op, len(req.Permissions)) + for idx, op := range req.Permissions { + if !stringInSlice(op.Action, validActions) { + return nil, fmt.Errorf("invalid permission action. %s", + helpMsg) + } + if !stringInSlice(op.Entity, validEntities) { + return nil, fmt.Errorf("invalid permission entity. %s", + helpMsg) + } + + requestedPermissions[idx] = bakery.Op{ + Entity: op.Entity, + Action: op.Action, + } + } + + // Bake new macaroon with the given permissions and send it binary + // serialized and hex encoded to the client. + newMac, err := r.macService.Oven.NewMacaroon( + ctx, bakery.LatestVersion, nil, requestedPermissions..., + ) + if err != nil { + return nil, err + } + newMacBytes, err := newMac.M().MarshalBinary() + if err != nil { + return nil, err + } + resp := &lnrpc.BakeMacaroonResponse{} + resp.Macaroon = hex.EncodeToString(newMacBytes) + + return resp, nil +} + +// FundingStateStep is an advanced funding related call that allows the caller +// to either execute some preparatory steps for a funding workflow, or manually +// progress a funding workflow. The primary way a funding flow is identified is +// via its pending channel ID. As an example, this method can be used to +// specify that we're expecting a funding flow for a particular pending channel +// ID, for which we need to use specific parameters. Alternatively, this can +// be used to interactively drive PSBT signing for funding for partially +// complete funding transactions. +func (r *rpcServer) FundingStateStep(ctx context.Context, + in *lnrpc.FundingTransitionMsg) (*lnrpc.FundingStateStepResp, error) { + + var pendingChanID [32]byte + switch { + + // If this is a message to register a new shim that is an external + // channel point, then we'll contact the wallet to register this new + // shim. A user will use this method to register a new channel funding + // workflow which has already been partially negotiated outside of the + // core protocol. + case in.GetShimRegister() != nil && + in.GetShimRegister().GetChanPointShim() != nil: + + rpcShimIntent := in.GetShimRegister().GetChanPointShim() + + // Using the rpc shim as a template, we'll construct a new + // chanfunding.Assembler that is able to express proper + // formulation of this expected channel. + shimAssembler, err := newFundingShimAssembler( + rpcShimIntent, false, r.server.cc.keyRing, + ) + if err != nil { + return nil, err + } + req := &chanfunding.Request{ + RemoteAmt: btcutil.Amount(rpcShimIntent.Amt), + } + shimIntent, err := shimAssembler.ProvisionChannel(req) + if err != nil { + return nil, err + } + + // Once we have the intent, we'll register it with the wallet. + // Once we receive an incoming funding request that uses this + // pending channel ID, then this shim will be dispatched in + // place of our regular funding workflow. + copy(pendingChanID[:], rpcShimIntent.PendingChanId) + err = r.server.cc.wallet.RegisterFundingIntent( + pendingChanID, shimIntent, + ) + if err != nil { + return nil, err + } + + // There is no need to register a PSBT shim before opening the channel, + // even though our RPC message structure allows for it. Inform the user + // by returning a proper error instead of just doing nothing. + case in.GetShimRegister() != nil && + in.GetShimRegister().GetPsbtShim() != nil: + + return nil, fmt.Errorf("PSBT shim must only be sent when " + + "opening a channel") + + // If this is a transition to cancel an existing shim, then we'll pass + // this message along to the wallet, informing it that the intent no + // longer needs to be considered and should be cleaned up. + case in.GetShimCancel() != nil: + rpcsLog.Debugf("Canceling funding shim for pending_id=%x", + in.GetShimCancel().PendingChanId) + + copy(pendingChanID[:], in.GetShimCancel().PendingChanId) + err := r.server.cc.wallet.CancelFundingIntent(pendingChanID) + if err != nil { + return nil, err + } + + // If this is a transition to verify the PSBT for an existing shim, + // we'll do so and then store the verified PSBT for later so we can + // compare it to the final, signed one. + case in.GetPsbtVerify() != nil: + rpcsLog.Debugf("Verifying PSBT for pending_id=%x", + in.GetPsbtVerify().PendingChanId) + + copy(pendingChanID[:], in.GetPsbtVerify().PendingChanId) + packet, err := psbt.NewFromRawBytes( + bytes.NewReader(in.GetPsbtVerify().FundedPsbt), false, + ) + if err != nil { + return nil, fmt.Errorf("error parsing psbt: %v", err) + } + + err = r.server.cc.wallet.PsbtFundingVerify( + pendingChanID, packet, + ) + if err != nil { + return nil, err + } + + // If this is a transition to finalize the PSBT funding flow, we compare + // the final PSBT to the previously verified one and if nothing + // unexpected was changed, continue the channel opening process. + case in.GetPsbtFinalize() != nil: + rpcsLog.Debugf("Finalizing PSBT for pending_id=%x", + in.GetPsbtFinalize().PendingChanId) + + copy(pendingChanID[:], in.GetPsbtFinalize().PendingChanId) + packet, err := psbt.NewFromRawBytes( + bytes.NewReader(in.GetPsbtFinalize().SignedPsbt), false, + ) + if err != nil { + return nil, fmt.Errorf("error parsing psbt: %v", err) + } + + err = r.server.cc.wallet.PsbtFundingFinalize( + pendingChanID, packet, + ) + if err != nil { + return nil, err + } + } + + // TODO(roasbeef): extend PendingChannels to also show shims + + // TODO(roasbeef): return resulting state? also add a method to query + // current state? + return &lnrpc.FundingStateStepResp{}, nil +} diff --git a/sample-lnd.conf b/sample-lnd.conf index e39f482ac0..973e72b9e3 100644 --- a/sample-lnd.conf +++ b/sample-lnd.conf @@ -1,4 +1,8 @@ ; vim: ft=dosini +; +; Example configuration for lnd. +; +; Boolean values can be specified as true/false or 1/0. [Application Options] @@ -26,11 +30,11 @@ ; Path to TLS private key for lnd's RPC and REST services. ; tlskeypath=~/.lnd-grs/tls.key -; Adds an extra ip to the generated certificate +; Adds an extra ip to the generated certificate. Setting multiple tlsextraip= entries is allowed. ; (old tls files must be deleted if changed) ; tlsextraip= -; Adds an extra domain to the generate certificate +; Adds an extra domain to the generate certificate. Setting multiple tlsextradomain= entries is allowed. ; (old tls files must be deleted if changed) ; tlsextradomain= @@ -70,7 +74,7 @@ ; Disable listening for incoming p2p connections. This will override all ; listeners. -; nolisten=1 +; nolisten=true ; Specify the interfaces to listen on for gRPC connections. One listen ; address per line. @@ -133,7 +137,7 @@ ; If true, then automatic network bootstrapping will not be attempted. This ; means that your node won't attempt to automatically seek out peers on the ; network. -; nobootstrap=1 +; nobootstrap=true ; The smallest channel size (in satoshis) that we should accept. Incoming ; channels smaller than this will be rejected, default value 20000. @@ -152,13 +156,13 @@ ; If the Groestlcoin chain should be active. Atm, only a single chain can be ; active. -groestlcoin.active=1 +groestlcoin.active=true ; Use Groestlcoin's test network. -; groestlcoin.testnet=1 +; groestlcoin.testnet=true ; ; Use Groestlcoin's simulation test network -groestlcoin.simnet=1 +groestlcoin.simnet=true ; Use Groestlcoin's regression test network ; groestlcoin.regtest=false @@ -228,6 +232,9 @@ groestlcoin.node=grsd ; groestlcoind.zmqpubrawblock=tcp://127.0.0.1:28332 ; groestlcoind.zmqpubrawtx=tcp://127.0.0.1:28333 +; Fee estimate mode for groestlcoind. It must be either "ECONOMICAL" or "CONSERVATIVE". +; If unset, the default value is "CONSERVATIVE". +; groestlcoind.estimatemode=CONSERVATIVE [neutrino] @@ -248,7 +255,7 @@ groestlcoin.node=grsd ; If the autopilot agent should be active or not. The autopilot agent will ; attempt to automatically open up channels to put your node in an advantageous ; position within the network graph. -; autopilot.active=1 +; autopilot.active=true ; The maximum number of channels that should be created. ; autopilot.maxchannels=5 @@ -274,11 +281,11 @@ groestlcoin.node=grsd ; connection. With this mode active, each connection will use a new circuit. ; This means that multiple applications (other than lnd) using Tor won't be mixed ; in with lnd's traffic. -; tor.streamisolation=1 +; tor.streamisolation=true [watchtower] ; Enable integrated watchtower listening on :9911 by default. -; watchtower.active=1 +; watchtower.active=true ; Specify the interfaces to listen on for watchtower client connections. One ; listen address per line. If no port is specified the default port of 9911 will @@ -311,12 +318,9 @@ groestlcoin.node=grsd ; watchtower.writetimeout=15s [wtclient] -; Configure the private tower to which lnd will connect to backup encrypted -; justice transactions. The format should be pubkey@host:port, where the port is -; optional and assumed to be 9911 otherwise. At most one private tower URI is -; supported at this time; if none are provided then the watchtower client will -; be inactive. -; wtclient.private-tower-uris= +; Activate Watchtower Client. To get more information or configure watchtowers +; run `lncli wtclient -h`. +; wtclient.active=true ; Specify the fee rate with which justice transactions will be signed. This fee ; rate should be chosen as a maximum fee rate one is willing to pay in order to diff --git a/scripts/install_bitcoind.sh b/scripts/install_bitcoind.sh index f70f8a3d78..ea3d4e5244 100755 --- a/scripts/install_bitcoind.sh +++ b/scripts/install_bitcoind.sh @@ -2,7 +2,7 @@ set -ev -export BITCOIND_VERSION=0.18.1 +export BITCOIND_VERSION=0.19.1 if sudo cp ~/bitcoin/bitcoin-$BITCOIND_VERSION/bin/bitcoind /usr/local/bin/bitcoind then diff --git a/scripts/install_travis_proto.sh b/scripts/install_travis_proto.sh new file mode 100755 index 0000000000..8208bd327c --- /dev/null +++ b/scripts/install_travis_proto.sh @@ -0,0 +1,72 @@ +#!/usr/bin/env bash + +# Abort on error (-e) and print commands (-v). +set -ev + +# See README.md in lnrpc why we need these specific versions/commits. +PROTOC_VERSION=3.4.0 +PROTOBUF_VERSION="b5d812f8a3706043e23a9cd5babf2e5423744d30" +GENPROTO_VERSION="a8101f21cf983e773d0c1133ebc5424792003214" +GRPC_GATEWAY_VERSION="v1.8.6" + +# This script is specific to Travis CI so we only need to support linux x64. +PROTOC_URL="https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOC_VERSION}/protoc-${PROTOC_VERSION}-linux-x86_64.zip" +PROTOC_DL_CACHE_DIR="${DOWNLOAD_CACHE:-/tmp/download_cache}/protoc" + +# install_protoc copies the cached protoc binary to the $PATH or downloads it +# if no cached version is found. +install_protoc() { + if [ -f "${PROTOC_DL_CACHE_DIR}/bin/protoc" ]; then + echo "Using cached version of protoc" + else + wget -O /tmp/protoc.zip $PROTOC_URL + mkdir -p "${PROTOC_DL_CACHE_DIR}" + unzip -o /tmp/protoc.zip -d "${PROTOC_DL_CACHE_DIR}" + chmod -R a+rx "${PROTOC_DL_CACHE_DIR}/" + fi + sudo cp "${PROTOC_DL_CACHE_DIR}/bin/protoc" /usr/local/bin + sudo cp -r "${PROTOC_DL_CACHE_DIR}/include" /usr/local +} + +# install_protobuf downloads and compiles the Golang protobuf library that +# encodes/decodes all protobuf messages from/to Go structs. +install_protobuf() { + local install_path="$GOPATH/src/github.com/golang/protobuf" + if [ ! -d "$install_path" ]; then + git clone https://github.com/golang/protobuf "$install_path" + fi + pushd "$install_path" + git reset --hard $PROTOBUF_VERSION + make + popd +} + +# install_genproto downloads the Golang protobuf generator that converts the +# .proto files into Go interface stubs. +install_genproto() { + local install_path="$GOPATH/src/google.golang.org/genproto" + if [ ! -d "$install_path" ]; then + git clone https://github.com/google/go-genproto "$install_path" + fi + pushd "$install_path" + git reset --hard $GENPROTO_VERSION + popd +} + +# install_grpc_gateway downloads and installs the gRPC gateway that converts +# .proto files into REST gateway code. +install_grpc_gateway() { + local install_path="$GOPATH/src/github.com/grpc-ecosystem/grpc-gateway" + if [ ! -d "$install_path" ]; then + git clone https://github.com/grpc-ecosystem/grpc-gateway "$install_path" + fi + pushd "$install_path" + git reset --hard $GRPC_GATEWAY_VERSION + GO111MODULE=on go install ./protoc-gen-grpc-gateway ./protoc-gen-swagger + popd +} + +install_protoc +install_protobuf +install_genproto +install_grpc_gateway diff --git a/server.go b/server.go index 6297fc8552..3fdffb2c31 100644 --- a/server.go +++ b/server.go @@ -23,17 +23,20 @@ import ( "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcutil" - "github.com/coreos/bbolt" "github.com/go-errors/errors" sphinx "github.com/lightningnetwork/lightning-onion" "github.com/lightningnetwork/lnd/autopilot" "github.com/lightningnetwork/lnd/brontide" "github.com/lightningnetwork/lnd/chanacceptor" "github.com/lightningnetwork/lnd/chanbackup" + "github.com/lightningnetwork/lnd/chanfitness" "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/channelnotifier" + "github.com/lightningnetwork/lnd/clock" "github.com/lightningnetwork/lnd/contractcourt" "github.com/lightningnetwork/lnd/discovery" + "github.com/lightningnetwork/lnd/feature" "github.com/lightningnetwork/lnd/htlcswitch" "github.com/lightningnetwork/lnd/htlcswitch/hop" "github.com/lightningnetwork/lnd/input" @@ -43,11 +46,14 @@ import ( "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lnrpc/routerrpc" "github.com/lightningnetwork/lnd/lnwallet" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" + "github.com/lightningnetwork/lnd/lnwallet/chanfunding" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/nat" "github.com/lightningnetwork/lnd/netann" "github.com/lightningnetwork/lnd/peernotifier" "github.com/lightningnetwork/lnd/pool" + "github.com/lightningnetwork/lnd/queue" "github.com/lightningnetwork/lnd/routing" "github.com/lightningnetwork/lnd/routing/localchans" "github.com/lightningnetwork/lnd/routing/route" @@ -89,6 +95,11 @@ var ( // given peer. ErrPeerNotConnected = errors.New("peer is not connected") + // ErrServerNotActive indicates that the server has started but hasn't + // fully finished the startup process. + ErrServerNotActive = errors.New("server is still in the process of " + + "starting") + // ErrServerShuttingDown indicates that the server is in the process of // gracefully exiting. ErrServerShuttingDown = errors.New("server is shutting down") @@ -166,6 +177,12 @@ type server struct { persistentConnReqs map[string][]*connmgr.ConnReq persistentRetryCancels map[string]chan struct{} + // peerErrors keeps a set of peer error buffers for peers that have + // disconnected from us. This allows us to track historic peer errors + // over connections. The string of the peer's compressed pubkey is used + // as a key for this map. + peerErrors map[string]*queue.CircularBuffer + // ignorePeerTermination tracks peers for which the server has initiated // a disconnect. Adding a peer to this map causes the peer termination // watcher to short circuit in the event that peers are purposefully @@ -193,6 +210,8 @@ type server struct { peerNotifier *peernotifier.PeerNotifier + htlcNotifier *htlcswitch.HtlcNotifier + witnessBeacon contractcourt.WitnessBeacon breachArbiter *breachArbiter @@ -225,9 +244,9 @@ type server struct { readPool *pool.Read - // globalFeatures feature vector which affects HTLCs and thus are also - // advertised to other nodes. - globalFeatures *lnwire.FeatureVector + // featureMgr dispatches feature vectors for various contexts within the + // daemon. + featureMgr *feature.Manager // currentNodeAnn is the node announcement that has been broadcast to // the network upon startup, if the attributes of the node (us) has @@ -243,6 +262,10 @@ type server struct { // channelNotifier to be notified of newly opened and closed channels. chanSubSwapper *chanbackup.SubSwapper + // chanEventStore tracks the behaviour of channels and their remote peers to + // provide insights into their health and performance. + chanEventStore *chanfitness.ChannelEventStore + quit chan struct{} wg sync.WaitGroup @@ -299,7 +322,8 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, towerClientDB *wtdb.ClientDB, cc *chainControl, privKey *btcec.PrivateKey, chansToRestore walletunlocker.ChannelsToRecover, - chanPredicate chanacceptor.ChannelAcceptor) (*server, error) { + chanPredicate chanacceptor.ChannelAcceptor, + torController *tor.Controller) (*server, error) { var err error @@ -320,16 +344,23 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, // Only if we're not being forced to use the legacy onion format, will // we signal our knowledge of the new TLV onion format. - if !cfg.LegacyProtocol.LegacyOnion() { + if !cfg.ProtocolOptions.LegacyOnion() { globalFeatures.Set(lnwire.TLVOnionPayloadOptional) } - // Similarly, we default to the new modern commitment format unless the - // legacy commitment config is set to true. - if !cfg.LegacyProtocol.LegacyCommitment() { + // Similarly, we default to supporting the new modern commitment format + // where the remote key is static unless the protocol config is set to + // keep using the older format. + if !cfg.ProtocolOptions.NoStaticRemoteKey() { globalFeatures.Set(lnwire.StaticRemoteKeyOptional) } + // We only signal that we support the experimental anchor commitments + // if explicitly enabled in the config. + if cfg.ProtocolOptions.AnchorCommitments() { + globalFeatures.Set(lnwire.AnchorsOptional) + } + var serializedPubKey [33]byte copy(serializedPubKey[:], privKey.PubKey().SerializeCompressed()) @@ -358,6 +389,22 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, readBufferPool, cfg.Workers.Read, pool.DefaultWorkerTimeout, ) + featureMgr, err := feature.NewManager(feature.Config{ + NoTLVOnion: cfg.ProtocolOptions.LegacyOnion(), + NoStaticRemoteKey: cfg.ProtocolOptions.NoStaticRemoteKey(), + NoAnchors: !cfg.ProtocolOptions.AnchorCommitments(), + }) + if err != nil { + return nil, err + } + + registryConfig := invoices.RegistryConfig{ + FinalCltvRejectDelta: defaultFinalCltvRejectDelta, + HtlcHoldDuration: invoices.DefaultHtlcHoldDuration, + Clock: clock.NewDefaultClock(), + AcceptKeySend: cfg.AcceptKeySend, + } + s := &server{ chanDB: chanDB, cc: cc, @@ -367,7 +414,8 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, chansToRestore: chansToRestore, invoices: invoices.NewRegistry( - chanDB, defaultFinalCltvRejectDelta, + chanDB, invoices.NewInvoiceExpiryWatcher(clock.NewDefaultClock()), + ®istryConfig, ), channelNotifier: channelnotifier.New(chanDB), @@ -381,10 +429,13 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, // schedule sphinx: hop.NewOnionProcessor(sphinxRouter), + torController: torController, + persistentPeers: make(map[string]bool), persistentPeersBackoff: make(map[string]time.Duration), persistentConnReqs: make(map[string][]*connmgr.ConnReq), persistentRetryCancels: make(map[string]chan struct{}), + peerErrors: make(map[string]*queue.CircularBuffer), ignorePeerTermination: make(map[*peer]struct{}), scheduledPeerConnection: make(map[string]func()), @@ -394,10 +445,8 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, peerConnectedListeners: make(map[string][]chan<- lnpeer.Peer), peerDisconnectedListeners: make(map[string][]chan<- struct{}), - globalFeatures: lnwire.NewFeatureVector( - globalFeatures, lnwire.GlobalFeatures, - ), - quit: make(chan struct{}), + featureMgr: featureMgr, + quit: make(chan struct{}), } s.witnessBeacon = &preimageBeacon{ @@ -410,6 +459,8 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, return nil, err } + s.htlcNotifier = htlcswitch.NewHtlcNotifier(time.Now) + s.htlcSwitch, err = htlcswitch.New(htlcswitch.Config{ DB: chanDB, LocalChannelClose: func(pubKey []byte, @@ -439,10 +490,14 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, ExtractErrorEncrypter: s.sphinx.ExtractErrorEncrypter, FetchLastChannelUpdate: s.fetchLastChanUpdate(), Notifier: s.cc.chainNotifier, + HtlcNotifier: s.htlcNotifier, FwdEventTicker: ticker.New(htlcswitch.DefaultFwdEventInterval), LogEventTicker: ticker.New(htlcswitch.DefaultLogInterval), AckEventTicker: ticker.New(htlcswitch.DefaultAckInterval), + AllowCircularRoute: cfg.AllowCircularRoute, RejectHTLC: cfg.RejectHTLC, + Clock: clock.NewDefaultClock(), + HTLCExpiry: htlcswitch.DefaultHTLCExpiry, }, uint32(currentHeight)) if err != nil { return nil, err @@ -548,13 +603,6 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, selfAddrs = append(selfAddrs, ip) } - // If we were requested to route connections through Tor and to - // automatically create an onion service, we'll initiate our Tor - // controller and establish a connection to the Tor server. - if cfg.Tor.Active && (cfg.Tor.V2 || cfg.Tor.V3) { - s.torController = tor.NewController(cfg.Tor.Control) - } - chanGraph := chanDB.ChannelGraph() // We'll now reconstruct a node announcement based on our current @@ -583,7 +631,7 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, LastUpdate: time.Now(), Addresses: selfAddrs, Alias: nodeAlias.String(), - Features: s.globalFeatures, + Features: s.featureMgr.Get(feature.SetNodeAnn), Color: color, } copy(selfNode.PubKeyBytes[:], privKey.PubKey().SerializeCompressed()) @@ -598,7 +646,7 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, // With the announcement generated, we'll sign it to properly // authenticate the message on the network. - authSig, err := discovery.SignAnnouncement( + authSig, err := netann.SignAnnouncement( s.nodeSigner, s.identityPriv.PubKey(), nodeAnn, ) if err != nil { @@ -655,11 +703,14 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, routingConfig := routerrpc.GetRoutingConfig(cfg.SubRPCServers.RouterRPC) s.missionControl, err = routing.NewMissionControl( - chanDB.DB, + chanDB, &routing.MissionControlConfig{ - AprioriHopProbability: routingConfig.AprioriHopProbability, - PenaltyHalfLife: routingConfig.PenaltyHalfLife, - MaxMcHistory: routingConfig.MaxMcHistory, + AprioriHopProbability: routingConfig.AprioriHopProbability, + PenaltyHalfLife: routingConfig.PenaltyHalfLife, + MaxMcHistory: routingConfig.MaxMcHistory, + AprioriWeight: routingConfig.AprioriWeight, + SelfNode: selfNode.PubKeyBytes, + MinFailureRelaxInterval: routing.DefaultMinFailureRelaxInterval, }, ) if err != nil { @@ -682,7 +733,6 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, Graph: chanGraph, MissionControl: s.missionControl, QueryBandwidth: queryBandwidth, - SelfNode: selfNode, PathFindingConfig: pathFindingConfig, } @@ -704,6 +754,7 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, AssumeChannelValid: cfg.Routing.UseAssumeChannelValid(), NextPaymentID: sequencer.NextID, PathFindingConfig: pathFindingConfig, + Clock: clock.NewDefaultClock(), }) if err != nil { return nil, fmt.Errorf("can't create router: %v", err) @@ -772,15 +823,14 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, } s.sweeper = sweep.New(&sweep.UtxoSweeperConfig{ - FeeEstimator: cc.feeEstimator, - GenSweepScript: newSweepPkScriptGen(cc.wallet), - Signer: cc.wallet.Cfg.Signer, - PublishTransaction: cc.wallet.PublishTransaction, + FeeEstimator: cc.feeEstimator, + GenSweepScript: newSweepPkScriptGen(cc.wallet), + Signer: cc.wallet.Cfg.Signer, + Wallet: cc.wallet, NewBatchTimer: func() <-chan time.Time { return time.NewTimer(sweep.DefaultBatchWindowDuration).C }, Notifier: cc.chainNotifier, - ChainIO: cc.chainIO, Store: sweeperStore, MaxInputsPerTx: sweep.DefaultMaxInputsPerTx, MaxSweepAttempts: sweep.DefaultMaxSweepAttempts, @@ -805,7 +855,11 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, closureType htlcswitch.ChannelCloseType) { // TODO(conner): Properly respect the update and error channels // returned by CloseLink. - s.htlcSwitch.CloseLink(chanPoint, closureType, 0) + + // Instruct the switch to close the channel. Provide no close out + // delivery script or target fee per kw because user input is not + // available when the remote peer closes the channel. + s.htlcSwitch.CloseLink(chanPoint, closureType, 0, nil) } // We will use the following channel to reliably hand off contract @@ -828,7 +882,6 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, return nil }, IncubateOutputs: func(chanPoint wire.OutPoint, - commitRes *lnwallet.CommitOutputResolution, outHtlcRes *lnwallet.OutgoingHtlcResolution, inHtlcRes *lnwallet.IncomingHtlcResolution, broadcastHeight uint32) error { @@ -845,7 +898,7 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, } return s.utxoNursery.IncubateOutputs( - chanPoint, commitRes, outRes, inRes, + chanPoint, outRes, inRes, broadcastHeight, ) }, @@ -883,10 +936,14 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, return ErrServerShuttingDown } }, - DisableChannel: s.chanStatusMgr.RequestDisable, - Sweeper: s.sweeper, - Registry: s.invoices, - NotifyClosedChannel: s.channelNotifier.NotifyClosedChannelEvent, + DisableChannel: s.chanStatusMgr.RequestDisable, + Sweeper: s.sweeper, + Registry: s.invoices, + NotifyClosedChannel: s.channelNotifier.NotifyClosedChannelEvent, + OnionProcessor: s.sphinx, + PaymentsExpirationGracePeriod: cfg.PaymentsExpirationGracePeriod, + IsForwardedHTLC: s.htlcSwitch.IsForwardedHTLC, + Clock: clock.NewDefaultClock(), }, chanDB) s.breachArbiter = newBreachArbiter(&BreachConfig{ @@ -925,7 +982,7 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, Notifier: cc.chainNotifier, FeeEstimator: cc.feeEstimator, SignMessage: func(pubKey *btcec.PublicKey, - msg []byte) (*btcec.Signature, error) { + msg []byte) (input.Signature, error) { if pubKey.IsEqual(privKey.PubKey()) { return s.nodeSigner.SignMessage(pubKey, msg) @@ -962,6 +1019,7 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, return nil, fmt.Errorf("unable to find channel") }, DefaultRoutingPolicy: cc.routingPolicy, + DefaultMinHtlcIn: cc.minHtlcIn, NumRequiredConfs: func(chanAmt btcutil.Amount, pushAmt lnwire.MilliSatoshi) uint16 { // For large channels we increase the number @@ -1079,13 +1137,14 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, // channel bandwidth. return uint16(input.MaxHTLCNumber / 2) }, - ZombieSweeperInterval: 1 * time.Minute, - ReservationTimeout: 10 * time.Minute, - MinChanSize: btcutil.Amount(cfg.MinChanSize), - MaxPendingChannels: cfg.MaxPendingChannels, - RejectPush: cfg.RejectPush, - NotifyOpenChannelEvent: s.channelNotifier.NotifyOpenChannelEvent, - OpenChannelPredicate: chanPredicate, + ZombieSweeperInterval: 1 * time.Minute, + ReservationTimeout: 10 * time.Minute, + MinChanSize: btcutil.Amount(cfg.MinChanSize), + MaxPendingChannels: cfg.MaxPendingChannels, + RejectPush: cfg.RejectPush, + NotifyOpenChannelEvent: s.channelNotifier.NotifyOpenChannelEvent, + OpenChannelPredicate: chanPredicate, + NotifyPendingOpenChannelEvent: s.channelNotifier.NotifyPendingOpenChannelEvent, }) if err != nil { return nil, err @@ -1113,13 +1172,20 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, // to peer online and offline events. s.peerNotifier = peernotifier.New() + // Create a channel event store which monitors all open channels. + s.chanEventStore = chanfitness.NewChannelEventStore(&chanfitness.Config{ + SubscribeChannelEvents: s.channelNotifier.SubscribeChannelEvents, + SubscribePeerEvents: s.peerNotifier.SubscribePeerEvents, + GetOpenChannels: s.chanDB.FetchAllOpenChannels, + }) + if cfg.WtClient.Active { policy := wtpolicy.DefaultPolicy() if cfg.WtClient.SweepFeeRate != 0 { // We expose the sweep fee rate in sat/byte, but the // tower protocol operations on sat/kw. - sweepRateSatPerByte := lnwallet.SatPerKVByte( + sweepRateSatPerByte := chainfee.SatPerKVByte( 1000 * cfg.WtClient.SweepFeeRate, ) policy.SweepFeeRate = sweepRateSatPerByte.FeePerKWeight() @@ -1179,7 +1245,7 @@ func (s *server) Start() error { var startErr error s.start.Do(func() { if s.torController != nil { - if err := s.initTorController(); err != nil { + if err := s.createNewHiddenService(); err != nil { startErr = err return } @@ -1219,6 +1285,10 @@ func (s *server) Start() error { startErr = err return } + if err := s.htlcNotifier.Start(); err != nil { + startErr = err + return + } if err := s.sphinx.Start(); err != nil { startErr = err return @@ -1270,6 +1340,11 @@ func (s *server) Start() error { return } + if err := s.chanEventStore.Start(); err != nil { + startErr = err + return + } + // Before we start the connMgr, we'll check to see if we have // any backups to recover. We do this now as we want to ensure // that have all the information we need to handle channel @@ -1361,10 +1436,6 @@ func (s *server) Stop() error { close(s.quit) - if s.torController != nil { - s.torController.Stop() - } - // Shutdown the wallet, funding manager, and the rpc server. s.chanStatusMgr.Stop() s.cc.chainNotifier.Stop() @@ -1378,6 +1449,7 @@ func (s *server) Stop() error { s.sweeper.Stop() s.channelNotifier.Stop() s.peerNotifier.Stop() + s.htlcNotifier.Stop() s.cc.wallet.Shutdown() s.cc.chainView.Stop() s.connMgr.Stop() @@ -1385,6 +1457,7 @@ func (s *server) Stop() error { s.invoices.Stop() s.fundingMgr.Stop() s.chanSubSwapper.Stop() + s.chanEventStore.Stop() // Disconnect from each active peers to ensure that // peerTerminationWatchers signal completion to each peer. @@ -1575,7 +1648,7 @@ out: // announcement with the updated addresses and broadcast // it to our peers. newNodeAnn, err := s.genNodeAnnouncement( - true, lnwire.UpdateNodeAnnAddrs(newAddrs), + true, netann.NodeAnnSetAddrs(newAddrs), ) if err != nil { srvrLog.Debugf("Unable to generate new node "+ @@ -1882,14 +1955,9 @@ func (s *server) initialPeerBootstrap(ignore map[autopilot.NodeID]struct{}, } } -// initTorController initiliazes the Tor controller backed by lnd and -// automatically sets up a v2 onion service in order to listen for inbound -// connections over Tor. -func (s *server) initTorController() error { - if err := s.torController.Start(); err != nil { - return err - } - +// createNewHiddenService automatically sets up a v2 or v3 onion service in +// order to listen for inbound connections over Tor. +func (s *server) createNewHiddenService() error { // Determine the different ports the server is listening on. The onion // service's virtual port will map to these ports and one will be picked // at random when the onion service is being accessed. @@ -1903,9 +1971,9 @@ func (s *server) initTorController() error { // create our onion service. The service's private key will be saved to // disk in order to regain access to this service when restarting `lnd`. onionCfg := tor.AddOnionConfig{ - VirtualPort: defaultPeerPort, - TargetPorts: listenPorts, - PrivateKeyPath: cfg.Tor.PrivateKeyPath, + VirtualPort: defaultPeerPort, + TargetPorts: listenPorts, + Store: tor.NewOnionFile(cfg.Tor.PrivateKeyPath, 0600), } switch { @@ -1940,7 +2008,7 @@ func (s *server) initTorController() error { Addresses: newNodeAnn.Addresses, Alias: newNodeAnn.Alias.String(), Features: lnwire.NewFeatureVector( - newNodeAnn.Features, lnwire.GlobalFeatures, + newNodeAnn.Features, lnwire.Features, ), Color: newNodeAnn.RGBColor, AuthSigBytes: newNodeAnn.Signature.ToSignatureBytes(), @@ -1957,7 +2025,7 @@ func (s *server) initTorController() error { // announcement. If refresh is true, then the time stamp of the announcement // will be updated in order to ensure it propagates through the network. func (s *server) genNodeAnnouncement(refresh bool, - updates ...func(*lnwire.NodeAnnouncement)) (lnwire.NodeAnnouncement, error) { + modifiers ...netann.NodeAnnModifier) (lnwire.NodeAnnouncement, error) { s.mu.Lock() defer s.mu.Unlock() @@ -1968,34 +2036,19 @@ func (s *server) genNodeAnnouncement(refresh bool, return *s.currentNodeAnn, nil } - // Now that we know we need to update our copy, we'll apply all the - // function updates that'll mutate the current version of our node - // announcement. - for _, update := range updates { - update(s.currentNodeAnn) - } - - // We'll now update the timestamp, ensuring that with each update, the - // timestamp monotonically increases. - newStamp := uint32(time.Now().Unix()) - if newStamp <= s.currentNodeAnn.Timestamp { - newStamp = s.currentNodeAnn.Timestamp + 1 - } - s.currentNodeAnn.Timestamp = newStamp + // Always update the timestamp when refreshing to ensure the update + // propagates. + modifiers = append(modifiers, netann.NodeAnnSetTimestamp) - // Now that the announcement is fully updated, we'll generate a new - // signature over the announcement to ensure nodes on the network - // accepted the new authenticated announcement. - sig, err := discovery.SignAnnouncement( + // Otherwise, we'll sign a new update after applying all of the passed + // modifiers. + err := netann.SignNodeAnnouncement( s.nodeSigner, s.identityPriv.PubKey(), s.currentNodeAnn, + modifiers..., ) if err != nil { return lnwire.NodeAnnouncement{}, err } - s.currentNodeAnn.Signature, err = lnwire.NewSigFromSignature(sig) - if err != nil { - return lnwire.NodeAnnouncement{}, err - } return *s.currentNodeAnn, nil } @@ -2044,7 +2097,7 @@ func (s *server) establishPersistentConnections() error { // each of the nodes. selfPub := s.identityPriv.PubKey().SerializeCompressed() err = sourceNode.ForEachChannel(nil, func( - tx *bbolt.Tx, + tx kvdb.ReadTx, chanInfo *channeldb.ChannelEdgeInfo, policy, _ *channeldb.ChannelEdgePolicy) error { @@ -2703,14 +2756,23 @@ func (s *server) peerConnected(conn net.Conn, connReq *connmgr.ConnReq, ChainNet: activeNetParams.Net, } - // With the brontide connection established, we'll now craft the local - // feature vector to advertise to the remote node. - localFeatures := lnwire.NewRawFeatureVector() + // With the brontide connection established, we'll now craft the feature + // vectors to advertise to the remote node. + initFeatures := s.featureMgr.Get(feature.SetInit) + legacyFeatures := s.featureMgr.Get(feature.SetLegacyGlobal) - // We'll signal that we understand the data loss protection feature, - // and also that we support the new gossip query features. - localFeatures.Set(lnwire.DataLossProtectRequired) - localFeatures.Set(lnwire.GossipQueriesOptional) + // Lookup past error caches for the peer in the server. If no buffer is + // found, create a fresh buffer. + pkStr := string(peerAddr.IdentityKey.SerializeCompressed()) + errBuffer, ok := s.peerErrors[pkStr] + if !ok { + var err error + errBuffer, err = queue.NewCircularBuffer(errorBufferSize) + if err != nil { + srvrLog.Errorf("unable to create peer %v", err) + return + } + } // Now that we've established a connection, create a peer, and it to the // set of currently active peers. Configure the peer with the incoming @@ -2719,9 +2781,9 @@ func (s *server) peerConnected(conn net.Conn, connReq *connmgr.ConnReq, // htlcs, an extra block is added to prevent the channel from being // closed when the htlc is outstanding and a new block comes in. p, err := newPeer( - conn, connReq, s, peerAddr, inbound, localFeatures, - cfg.ChanEnableTimeout, - defaultOutgoingCltvRejectDelta, + conn, connReq, s, peerAddr, inbound, initFeatures, + legacyFeatures, cfg.ChanEnableTimeout, + defaultOutgoingCltvRejectDelta, errBuffer, ) if err != nil { srvrLog.Errorf("unable to create peer %v", err) @@ -2733,6 +2795,11 @@ func (s *server) peerConnected(conn net.Conn, connReq *connmgr.ConnReq, s.addPeer(p) + // Once we have successfully added the peer to the server, we can + // delete the previous error buffer from the server's map of error + // buffers. + delete(s.peerErrors, pkStr) + // Dispatch a goroutine to asynchronously start the peer. This process // includes sending and receiving Init messages, which would be a DOS // vector if we held the server's mutex throughout the procedure. @@ -2891,7 +2958,7 @@ func (s *server) peerTerminationWatcher(p *peer, ready chan struct{}) { // If there were any notification requests for when this peer // disconnected, we can trigger them now. - srvrLog.Debugf("Notifying that peer %x is offline", p) + srvrLog.Debugf("Notifying that peer %v is offline", p) pubStr := string(pubKey.SerializeCompressed()) for _, offlineChan := range s.peerDisconnectedListeners[pubStr] { close(offlineChan) @@ -3027,6 +3094,12 @@ func (s *server) removePeer(p *peer) { delete(s.outboundPeers, pubStr) } + // Copy the peer's error buffer across to the server if it has any items + // in it so that we can restore peer errors across connections. + if p.errorBuffer.Total() > 0 { + s.peerErrors[pubStr] = p.errorBuffer + } + // Inform the peer notifier of a peer offline event so that it can be // reported to clients listening for peer events. var pubKey [33]byte @@ -3048,11 +3121,12 @@ type openChanReq struct { pushAmt lnwire.MilliSatoshi - fundingFeePerKw lnwallet.SatPerKWeight + fundingFeePerKw chainfee.SatPerKWeight private bool - minHtlc lnwire.MilliSatoshi + // minHtlcIn is the minimum incoming htlc that we accept. + minHtlcIn lnwire.MilliSatoshi remoteCsvDelay uint16 @@ -3060,8 +3134,23 @@ type openChanReq struct { // output selected to fund the channel should satisfy. minConfs int32 + // shutdownScript is an optional upfront shutdown script for the channel. + // This value is optional, so may be nil. + shutdownScript lnwire.DeliveryAddress + // TODO(roasbeef): add ability to specify channel constraints as well + // chanFunder is an optional channel funder that allows the caller to + // control exactly how the channel funding is carried out. If not + // specified, then the default chanfunding.WalletAssembler will be + // used. + chanFunder chanfunding.Assembler + + // pendingChanID is not all zeroes (the default value), then this will + // be the pending channel ID used for the funding flow within the wire + // protocol. + pendingChanID [32]byte + updates chan *lnrpc.OpenStatusUpdate err chan error } @@ -3323,7 +3412,12 @@ func computeNextBackoff(currBackoff time.Duration) time.Duration { // fetchNodeAdvertisedAddr attempts to fetch an advertised address of a node. func (s *server) fetchNodeAdvertisedAddr(pub *btcec.PublicKey) (net.Addr, error) { - node, err := s.chanDB.ChannelGraph().FetchLightningNode(pub) + vertex, err := route.NewVertexFromBytes(pub.SerializeCompressed()) + if err != nil { + return nil, err + } + + node, err := s.chanDB.ChannelGraph().FetchLightningNode(nil, vertex) if err != nil { return nil, err } diff --git a/signal/signal.go b/signal/signal.go index 29cbb9f5cf..82e503d037 100644 --- a/signal/signal.go +++ b/signal/signal.go @@ -26,7 +26,8 @@ var ( shutdownChannel = make(chan struct{}) ) -func init() { +// Intercept starts the interception of interrupt signals. +func Intercept() { signalsToCatch := []os.Signal{ os.Interrupt, os.Kill, diff --git a/subrpcserver_config.go b/subrpcserver_config.go index d00e3a43d8..316320289f 100644 --- a/subrpcserver_config.go +++ b/subrpcserver_config.go @@ -18,6 +18,7 @@ import ( "github.com/lightningnetwork/lnd/lnrpc/walletrpc" "github.com/lightningnetwork/lnd/lnrpc/watchtowerrpc" "github.com/lightningnetwork/lnd/lnrpc/wtclientrpc" + "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/macaroons" "github.com/lightningnetwork/lnd/netann" "github.com/lightningnetwork/lnd/routing" @@ -92,7 +93,8 @@ func (s *subRPCServerConfigs) PopulateDependencies(cc *chainControl, sweeper *sweep.UtxoSweeper, tower *watchtower.Standalone, towerClient wtclient.Client, - tcpResolver lncfg.TCPResolver) error { + tcpResolver lncfg.TCPResolver, + genInvoiceFeatures func() *lnwire.FeatureVector) error { // First, we'll use reflect to obtain a version of the config struct // that allows us to programmatically inspect its fields. @@ -130,6 +132,9 @@ func (s *subRPCServerConfigs) PopulateDependencies(cc *chainControl, subCfgValue.FieldByName("Signer").Set( reflect.ValueOf(cc.signer), ) + subCfgValue.FieldByName("KeyRing").Set( + reflect.ValueOf(cc.keyRing), + ) case *walletrpc.Config: subCfgValue := extractReflectValue(subCfg) @@ -197,9 +202,6 @@ func (s *subRPCServerConfigs) PopulateDependencies(cc *chainControl, subCfgValue.FieldByName("NodeSigner").Set( reflect.ValueOf(nodeSigner), ) - subCfgValue.FieldByName("MaxPaymentMSat").Set( - reflect.ValueOf(MaxPaymentMSat), - ) defaultDelta := cfg.Bitcoin.TimeLockDelta if registeredChains.PrimaryChain() == litecoinChain { defaultDelta = cfg.Litecoin.TimeLockDelta @@ -210,22 +212,13 @@ func (s *subRPCServerConfigs) PopulateDependencies(cc *chainControl, subCfgValue.FieldByName("ChanDB").Set( reflect.ValueOf(chanDB), ) + subCfgValue.FieldByName("GenInvoiceFeatures").Set( + reflect.ValueOf(genInvoiceFeatures), + ) + // RouterRPC isn't conditionally compiled and doesn't need to be + // populated using reflection. case *routerrpc.Config: - subCfgValue := extractReflectValue(subCfg) - - subCfgValue.FieldByName("NetworkDir").Set( - reflect.ValueOf(networkDir), - ) - subCfgValue.FieldByName("MacService").Set( - reflect.ValueOf(macService), - ) - subCfgValue.FieldByName("Router").Set( - reflect.ValueOf(chanRouter), - ) - subCfgValue.FieldByName("RouterBackend").Set( - reflect.ValueOf(routerBackend), - ) case *watchtowerrpc.Config: subCfgValue := extractReflectValue(subCfg) @@ -258,6 +251,12 @@ func (s *subRPCServerConfigs) PopulateDependencies(cc *chainControl, } } + // Populate routerrpc dependencies. + s.RouterRPC.NetworkDir = networkDir + s.RouterRPC.MacService = macService + s.RouterRPC.Router = chanRouter + s.RouterRPC.RouterBackend = routerBackend + return nil } diff --git a/sweep/backend_mock_test.go b/sweep/backend_mock_test.go index 43699be309..644ba59cc0 100644 --- a/sweep/backend_mock_test.go +++ b/sweep/backend_mock_test.go @@ -2,6 +2,8 @@ package sweep import ( "sync" + "testing" + "time" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" @@ -11,6 +13,8 @@ import ( // mockBackend simulates a chain backend for realistic behaviour in unit tests // around double spends. type mockBackend struct { + t *testing.T + lock sync.Mutex notifier *MockNotifier @@ -19,14 +23,20 @@ type mockBackend struct { unconfirmedTxes map[chainhash.Hash]*wire.MsgTx unconfirmedSpendInputs map[wire.OutPoint]struct{} + + publishChan chan wire.MsgTx + + walletUtxos []*lnwallet.Utxo } -func newMockBackend(notifier *MockNotifier) *mockBackend { +func newMockBackend(t *testing.T, notifier *MockNotifier) *mockBackend { return &mockBackend{ + t: t, notifier: notifier, unconfirmedTxes: make(map[chainhash.Hash]*wire.MsgTx), confirmedSpendInputs: make(map[wire.OutPoint]struct{}), unconfirmedSpendInputs: make(map[wire.OutPoint]struct{}), + publishChan: make(chan wire.MsgTx, 2), } } @@ -65,6 +75,27 @@ func (b *mockBackend) publishTransaction(tx *wire.MsgTx) error { return nil } +func (b *mockBackend) PublishTransaction(tx *wire.MsgTx) error { + log.Tracef("Publishing tx %v", tx.TxHash()) + err := b.publishTransaction(tx) + select { + case b.publishChan <- *tx: + case <-time.After(defaultTestTimeout): + b.t.Fatalf("unexpected tx published") + } + return err +} + +func (b *mockBackend) ListUnspentWitness(minconfirms, maxconfirms int32) ( + []*lnwallet.Utxo, error) { + + return b.walletUtxos, nil +} + +func (b *mockBackend) WithCoinSelectLock(f func() error) error { + return f() +} + func (b *mockBackend) deleteUnconfirmed(txHash chainhash.Hash) { b.lock.Lock() defer b.lock.Unlock() diff --git a/sweep/bucket_list.go b/sweep/bucket_list.go new file mode 100644 index 0000000000..4b3c67cd4d --- /dev/null +++ b/sweep/bucket_list.go @@ -0,0 +1,45 @@ +package sweep + +// bucket contains a set of inputs that are not mutually exclusive. +type bucket pendingInputs + +// tryAdd tries to add a new input to this bucket. +func (b bucket) tryAdd(input *pendingInput) bool { + exclusiveGroup := input.params.ExclusiveGroup + if exclusiveGroup != nil { + for _, input := range b { + existingGroup := input.params.ExclusiveGroup + if existingGroup != nil && + *existingGroup == *exclusiveGroup { + + return false + } + } + } + + b[*input.OutPoint()] = input + + return true +} + +// bucketList is a list of buckets that contain non-mutually exclusive inputs. +type bucketList struct { + buckets []bucket +} + +// add adds a new input. If the input is not accepted by any of the existing +// buckets, a new bucket will be created. +func (b *bucketList) add(input *pendingInput) { + for _, existingBucket := range b.buckets { + if existingBucket.tryAdd(input) { + return + } + } + + // Create a new bucket and add the input. It is not necessary to check + // the return value of tryAdd because it will always succeed on an empty + // bucket. + newBucket := make(bucket) + newBucket.tryAdd(input) + b.buckets = append(b.buckets, newBucket) +} diff --git a/sweep/fee_estimator_mock_test.go b/sweep/fee_estimator_mock_test.go index b0510f368e..4ca89f0c5b 100644 --- a/sweep/fee_estimator_mock_test.go +++ b/sweep/fee_estimator_mock_test.go @@ -3,38 +3,38 @@ package sweep import ( "sync" - "github.com/lightningnetwork/lnd/lnwallet" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" ) // mockFeeEstimator implements a mock fee estimator. It closely resembles // lnwallet.StaticFeeEstimator with the addition that fees can be changed for // testing purposes in a thread safe manner. type mockFeeEstimator struct { - feePerKW lnwallet.SatPerKWeight + feePerKW chainfee.SatPerKWeight - relayFee lnwallet.SatPerKWeight + relayFee chainfee.SatPerKWeight - blocksToFee map[uint32]lnwallet.SatPerKWeight + blocksToFee map[uint32]chainfee.SatPerKWeight // A closure that when set is used instead of the // mockFeeEstimator.EstimateFeePerKW method. - estimateFeePerKW func(numBlocks uint32) (lnwallet.SatPerKWeight, error) + estimateFeePerKW func(numBlocks uint32) (chainfee.SatPerKWeight, error) lock sync.Mutex } func newMockFeeEstimator(feePerKW, - relayFee lnwallet.SatPerKWeight) *mockFeeEstimator { + relayFee chainfee.SatPerKWeight) *mockFeeEstimator { return &mockFeeEstimator{ feePerKW: feePerKW, relayFee: relayFee, - blocksToFee: make(map[uint32]lnwallet.SatPerKWeight), + blocksToFee: make(map[uint32]chainfee.SatPerKWeight), } } func (e *mockFeeEstimator) updateFees(feePerKW, - relayFee lnwallet.SatPerKWeight) { + relayFee chainfee.SatPerKWeight) { e.lock.Lock() defer e.lock.Unlock() @@ -44,7 +44,7 @@ func (e *mockFeeEstimator) updateFees(feePerKW, } func (e *mockFeeEstimator) EstimateFeePerKW(numBlocks uint32) ( - lnwallet.SatPerKWeight, error) { + chainfee.SatPerKWeight, error) { e.lock.Lock() defer e.lock.Unlock() @@ -60,7 +60,7 @@ func (e *mockFeeEstimator) EstimateFeePerKW(numBlocks uint32) ( return e.feePerKW, nil } -func (e *mockFeeEstimator) RelayFeePerKW() lnwallet.SatPerKWeight { +func (e *mockFeeEstimator) RelayFeePerKW() chainfee.SatPerKWeight { e.lock.Lock() defer e.lock.Unlock() @@ -75,4 +75,4 @@ func (e *mockFeeEstimator) Stop() error { return nil } -var _ lnwallet.FeeEstimator = (*mockFeeEstimator)(nil) +var _ chainfee.Estimator = (*mockFeeEstimator)(nil) diff --git a/sweep/interface.go b/sweep/interface.go new file mode 100644 index 0000000000..a9ff82cc61 --- /dev/null +++ b/sweep/interface.go @@ -0,0 +1,27 @@ +package sweep + +import ( + "github.com/btcsuite/btcd/wire" + "github.com/lightningnetwork/lnd/lnwallet" +) + +// Wallet contains all wallet related functionality required by sweeper. +type Wallet interface { + // PublishTransaction performs cursory validation (dust checks, etc) and + // broadcasts the passed transaction to the Bitcoin network. + PublishTransaction(tx *wire.MsgTx) error + + // ListUnspentWitness returns all unspent outputs which are version 0 + // witness programs. The 'minconfirms' and 'maxconfirms' parameters + // indicate the minimum and maximum number of confirmations an output + // needs in order to be returned by this method. + ListUnspentWitness(minconfirms, maxconfirms int32) ([]*lnwallet.Utxo, + error) + + // WithCoinSelectLock will execute the passed function closure in a + // synchronized manner preventing any coin selection operations from + // proceeding while the closure is executing. This can be seen as the + // ability to execute a function closure under an exclusive coin + // selection lock. + WithCoinSelectLock(f func() error) error +} diff --git a/sweep/store.go b/sweep/store.go index ef6ba99c94..287646a7b0 100644 --- a/sweep/store.go +++ b/sweep/store.go @@ -8,8 +8,7 @@ import ( "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" - "github.com/coreos/bbolt" - "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/channeldb/kvdb" ) var ( @@ -57,26 +56,28 @@ type SweeperStore interface { } type sweeperStore struct { - db *channeldb.DB + db kvdb.Backend } // NewSweeperStore returns a new store instance. -func NewSweeperStore(db *channeldb.DB, chainHash *chainhash.Hash) ( +func NewSweeperStore(db kvdb.Backend, chainHash *chainhash.Hash) ( SweeperStore, error) { - err := db.Update(func(tx *bbolt.Tx) error { - _, err := tx.CreateBucketIfNotExists( + err := kvdb.Update(db, func(tx kvdb.RwTx) error { + _, err := tx.CreateTopLevelBucket( lastTxBucketKey, ) if err != nil { return err } - if tx.Bucket(txHashesBucketKey) != nil { + if tx.ReadWriteBucket(txHashesBucketKey) != nil { return nil } - txHashesBucket, err := tx.CreateBucket(txHashesBucketKey) + txHashesBucket, err := tx.CreateTopLevelBucket( + txHashesBucketKey, + ) if err != nil { return err } @@ -98,7 +99,7 @@ func NewSweeperStore(db *channeldb.DB, chainHash *chainhash.Hash) ( // migrateTxHashes migrates nursery finalized txes to the tx hashes bucket. This // is not implemented as a database migration, to keep the downgrade path open. -func migrateTxHashes(tx *bbolt.Tx, txHashesBucket *bbolt.Bucket, +func migrateTxHashes(tx kvdb.RwTx, txHashesBucket kvdb.RwBucket, chainHash *chainhash.Hash) error { log.Infof("Migrating UTXO nursery finalized TXIDs") @@ -114,20 +115,20 @@ func migrateTxHashes(tx *bbolt.Tx, txHashesBucket *bbolt.Bucket, } // Get chain bucket if exists. - chainBucket := tx.Bucket(b.Bytes()) + chainBucket := tx.ReadWriteBucket(b.Bytes()) if chainBucket == nil { return nil } // Retrieve the existing height index. - hghtIndex := chainBucket.Bucket(utxnHeightIndexKey) + hghtIndex := chainBucket.NestedReadWriteBucket(utxnHeightIndexKey) if hghtIndex == nil { return nil } // Retrieve all heights. err := hghtIndex.ForEach(func(k, v []byte) error { - heightBucket := hghtIndex.Bucket(k) + heightBucket := hghtIndex.NestedReadWriteBucket(k) if heightBucket == nil { return nil } @@ -164,13 +165,13 @@ func migrateTxHashes(tx *bbolt.Tx, txHashesBucket *bbolt.Bucket, // NotifyPublishTx signals that we are about to publish a tx. func (s *sweeperStore) NotifyPublishTx(sweepTx *wire.MsgTx) error { - return s.db.Update(func(tx *bbolt.Tx) error { - lastTxBucket := tx.Bucket(lastTxBucketKey) + return kvdb.Update(s.db, func(tx kvdb.RwTx) error { + lastTxBucket := tx.ReadWriteBucket(lastTxBucketKey) if lastTxBucket == nil { return errors.New("last tx bucket does not exist") } - txHashesBucket := tx.Bucket(txHashesBucketKey) + txHashesBucket := tx.ReadWriteBucket(txHashesBucketKey) if txHashesBucket == nil { return errors.New("tx hashes bucket does not exist") } @@ -195,8 +196,8 @@ func (s *sweeperStore) NotifyPublishTx(sweepTx *wire.MsgTx) error { func (s *sweeperStore) GetLastPublishedTx() (*wire.MsgTx, error) { var sweepTx *wire.MsgTx - err := s.db.View(func(tx *bbolt.Tx) error { - lastTxBucket := tx.Bucket(lastTxBucketKey) + err := kvdb.View(s.db, func(tx kvdb.ReadTx) error { + lastTxBucket := tx.ReadBucket(lastTxBucketKey) if lastTxBucket == nil { return errors.New("last tx bucket does not exist") } @@ -226,8 +227,8 @@ func (s *sweeperStore) GetLastPublishedTx() (*wire.MsgTx, error) { func (s *sweeperStore) IsOurTx(hash chainhash.Hash) (bool, error) { var ours bool - err := s.db.View(func(tx *bbolt.Tx) error { - txHashesBucket := tx.Bucket(txHashesBucketKey) + err := kvdb.View(s.db, func(tx kvdb.ReadTx) error { + txHashesBucket := tx.ReadBucket(txHashesBucketKey) if txHashesBucket == nil { return errors.New("tx hashes bucket does not exist") } diff --git a/sweep/store_test.go b/sweep/store_test.go index 23714c781b..8d83e1faec 100644 --- a/sweep/store_test.go +++ b/sweep/store_test.go @@ -141,7 +141,7 @@ func testStore(t *testing.T, createStore func() (SweeperStore, error)) { t.Fatal("expected tx to be ours") } - // An different hash should be reported on as not being ours. + // An different hash should be reported as not being ours. var unknownHash chainhash.Hash ours, err = store.IsOurTx(unknownHash) if err != nil { diff --git a/sweep/sweeper.go b/sweep/sweeper.go index 03fcf6ca73..cd93a121c5 100644 --- a/sweep/sweeper.go +++ b/sweep/sweeper.go @@ -3,7 +3,6 @@ package sweep import ( "errors" "fmt" - "math" "math/rand" "sort" "sync" @@ -16,13 +15,14 @@ import ( "github.com/lightningnetwork/lnd/chainntnfs" "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/lnwallet" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" ) const ( // DefaultMaxFeeRate is the default maximum fee rate allowed within the // UtxoSweeper. The current value is equivalent to a fee rate of 10,000 // sat/vbyte. - DefaultMaxFeeRate = lnwallet.FeePerKwFloor * 1e4 + DefaultMaxFeeRate = chainfee.FeePerKwFloor * 1e4 // DefaultFeeRateBucketSize is the default size of fee rate buckets // we'll use when clustering inputs into buckets with similar fee rates @@ -50,9 +50,14 @@ var ( // request from a client whom did not specify a fee preference. ErrNoFeePreference = errors.New("no fee preference specified") + // ErrExclusiveGroupSpend is returned in case a different input of the + // same exclusive group was spent. + ErrExclusiveGroupSpend = errors.New("other member of exclusive group " + + "was spent") + // ErrSweeperShuttingDown is an error returned when a client attempts to // make a request to the UtxoSweeper, but it is unable to handle it as - // it is/has already been stoppepd. + // it is/has already been stopped. ErrSweeperShuttingDown = errors.New("utxo sweeper shutting down") // DefaultMaxSweepAttempts specifies the default maximum number of times @@ -61,17 +66,50 @@ var ( DefaultMaxSweepAttempts = 10 ) +// Params contains the parameters that control the sweeping process. +type Params struct { + // Fee is the fee preference of the client who requested the input to be + // swept. If a confirmation target is specified, then we'll map it into + // a fee rate whenever we attempt to cluster inputs for a sweep. + Fee FeePreference + + // Force indicates whether the input should be swept regardless of + // whether it is economical to do so. + Force bool + + // ExclusiveGroup is an identifier that, if set, prevents other inputs + // with the same identifier from being batched together. + ExclusiveGroup *uint64 +} + +// ParamsUpdate contains a new set of parameters to update a pending sweep with. +type ParamsUpdate struct { + // Fee is the fee preference of the client who requested the input to be + // swept. If a confirmation target is specified, then we'll map it into + // a fee rate whenever we attempt to cluster inputs for a sweep. + Fee FeePreference + + // Force indicates whether the input should be swept regardless of + // whether it is economical to do so. + Force bool +} + +// String returns a human readable interpretation of the sweep parameters. +func (p Params) String() string { + return fmt.Sprintf("fee=%v, force=%v, exclusive_group=%v", + p.Fee, p.Force, p.ExclusiveGroup) +} + // pendingInput is created when an input reaches the main loop for the first -// time. It tracks all relevant state that is needed for sweeping. +// time. It wraps the input and tracks all relevant state that is needed for +// sweeping. type pendingInput struct { + input.Input + // listeners is a list of channels over which the final outcome of the // sweep needs to be broadcasted. listeners []chan Result - // input is the original struct that contains the input and sign - // descriptor. - input input.Input - // ntfnRegCancel is populated with a function that cancels the chain // notifier spend registration. ntfnRegCancel func() @@ -84,15 +122,19 @@ type pendingInput struct { // made to sweep this tx. publishAttempts int - // feePreference is the fee preference of the client who requested the - // input to be swept. If a confirmation target is specified, then we'll - // map it into a fee rate whenever we attempt to cluster inputs for a - // sweep. - feePreference FeePreference + // params contains the parameters that control the sweeping process. + params Params // lastFeeRate is the most recent fee rate used for this input within a // transaction broadcast to the network. - lastFeeRate lnwallet.SatPerKWeight + lastFeeRate chainfee.SatPerKWeight +} + +// parameters returns the sweep parameters for this input. +// +// NOTE: Part of the txInput interface. +func (p *pendingInput) parameters() Params { + return p.params } // pendingInputs is a type alias for a set of pending inputs. @@ -101,7 +143,7 @@ type pendingInputs = map[wire.OutPoint]*pendingInput // inputCluster is a helper struct to gather a set of pending inputs that should // be swept with the specified fee rate. type inputCluster struct { - sweepFeeRate lnwallet.SatPerKWeight + sweepFeeRate chainfee.SatPerKWeight inputs pendingInputs } @@ -126,7 +168,7 @@ type PendingInput struct { // LastFeeRate is the most recent fee rate used for the input being // swept within a transaction broadcast to the network. - LastFeeRate lnwallet.SatPerKWeight + LastFeeRate chainfee.SatPerKWeight // BroadcastAttempts is the number of attempts we've made to sweept the // input. @@ -135,19 +177,22 @@ type PendingInput struct { // NextBroadcastHeight is the next height of the chain at which we'll // attempt to broadcast a transaction sweeping the input. NextBroadcastHeight uint32 + + // Params contains the sweep parameters for this pending request. + Params Params } -// bumpFeeReq is an internal message we'll use to represent an external caller's -// intent to bump the fee rate of a given input. -type bumpFeeReq struct { - input wire.OutPoint - feePreference FeePreference - responseChan chan *bumpFeeResp +// updateReq is an internal message we'll use to represent an external caller's +// intent to update the sweep parameters of a given input. +type updateReq struct { + input wire.OutPoint + params ParamsUpdate + responseChan chan *updateResp } -// bumpFeeResp is an internal message we'll use to hand off the response of a -// bumpFeeReq from the UtxoSweeper's main event loop back to the caller. -type bumpFeeResp struct { +// updateResp is an internal message we'll use to hand off the response of a +// updateReq from the UtxoSweeper's main event loop back to the caller. +type updateResp struct { resultChan chan Result err error } @@ -167,9 +212,9 @@ type UtxoSweeper struct { // UtxoSweeper is attempting to sweep. pendingSweepsReqs chan *pendingSweepsReq - // bumpFeeReqs is a channel that will be sent requests by external + // updateReqs is a channel that will be sent requests by external // callers who wish to bump the fee rate of a given input. - bumpFeeReqs chan *bumpFeeReq + updateReqs chan *updateReq // pendingInputs is the total set of inputs the UtxoSweeper has been // requested to sweep. @@ -182,7 +227,7 @@ type UtxoSweeper struct { currentOutputScript []byte - relayFeeRate lnwallet.SatPerKWeight + relayFeeRate chainfee.SatPerKWeight quit chan struct{} wg sync.WaitGroup @@ -197,11 +242,10 @@ type UtxoSweeperConfig struct { // FeeEstimator is used when crafting sweep transactions to estimate // the necessary fee relative to the expected size of the sweep // transaction. - FeeEstimator lnwallet.FeeEstimator + FeeEstimator chainfee.Estimator - // PublishTransaction facilitates the process of broadcasting a signed - // transaction to the appropriate network. - PublishTransaction func(*wire.MsgTx) error + // Wallet contains the wallet functions that sweeper requires. + Wallet Wallet // NewBatchTimer creates a channel that will be sent on when a certain // time window has passed. During this time window, new inputs can still @@ -212,9 +256,6 @@ type UtxoSweeperConfig struct { // certain on-chain events. Notifier chainntnfs.ChainNotifier - // ChainIO is used to determine the current block height. - ChainIO lnwallet.BlockChainIO - // Store stores the published sweeper txes. Store SweeperStore @@ -238,7 +279,7 @@ type UtxoSweeperConfig struct { // MaxFeeRate is the the maximum fee rate allowed within the // UtxoSweeper. - MaxFeeRate lnwallet.SatPerKWeight + MaxFeeRate chainfee.SatPerKWeight // FeeRateBucketSize is the default size of fee rate buckets we'll use // when clustering inputs into buckets with similar fee rates within the @@ -248,8 +289,8 @@ type UtxoSweeperConfig struct { // of 10 would result in the following fee rate buckets up to the // maximum fee rate: // - // #1: min = 1 sat/vbyte, max = 10 sat/vbyte - // #2: min = 11 sat/vbyte, max = 20 sat/vbyte... + // #1: min = 1 sat/vbyte, max (exclusive) = 11 sat/vbyte + // #2: min = 11 sat/vbyte, max (exclusive) = 21 sat/vbyte... FeeRateBucketSize int } @@ -269,9 +310,9 @@ type Result struct { // sweepInputMessage structs are used in the internal channel between the // SweepInput call and the sweeper main loop. type sweepInputMessage struct { - input input.Input - feePreference FeePreference - resultChan chan Result + input input.Input + params Params + resultChan chan Result } // New returns a new Sweeper instance. @@ -280,7 +321,7 @@ func New(cfg *UtxoSweeperConfig) *UtxoSweeper { cfg: cfg, newInputs: make(chan *sweepInputMessage), spendChan: make(chan *chainntnfs.SpendDetail), - bumpFeeReqs: make(chan *bumpFeeReq), + updateReqs: make(chan *updateReq), pendingSweepsReqs: make(chan *pendingSweepsReq), quit: make(chan struct{}), pendingInputs: make(pendingInputs), @@ -313,7 +354,7 @@ func (s *UtxoSweeper) Start() error { // Error can be ignored. Because we are starting up, there are // no pending inputs to update based on the publish result. - err := s.cfg.PublishTransaction(lastTx) + err := s.cfg.Wallet.PublishTransaction(lastTx) if err != nil && err != lnwallet.ErrDoubleSpend { log.Errorf("last tx publish: %v", err) } @@ -323,20 +364,10 @@ func (s *UtxoSweeper) Start() error { // not change from here on. s.relayFeeRate = s.cfg.FeeEstimator.RelayFeePerKW() - // Register for block epochs to retry sweeping every block. - bestHash, bestHeight, err := s.cfg.ChainIO.GetBestBlock() - if err != nil { - return fmt.Errorf("get best block: %v", err) - } - - log.Debugf("Best height: %v", bestHeight) - - blockEpochs, err := s.cfg.Notifier.RegisterBlockEpochNtfn( - &chainntnfs.BlockEpoch{ - Height: bestHeight, - Hash: bestHash, - }, - ) + // We need to register for block epochs and retry sweeping every block. + // We should get a notification with the current best block immediately + // if we don't provide any epoch. We'll wait for that in the collector. + blockEpochs, err := s.cfg.Notifier.RegisterBlockEpochNtfn(nil) if err != nil { return fmt.Errorf("register block epoch ntfn: %v", err) } @@ -347,15 +378,18 @@ func (s *UtxoSweeper) Start() error { defer blockEpochs.Cancel() defer s.wg.Done() - err := s.collector(blockEpochs.Epochs, bestHeight) - if err != nil { - log.Errorf("sweeper stopped: %v", err) - } + s.collector(blockEpochs.Epochs) }() return nil } +// RelayFeePerKW returns the minimum fee rate required for transactions to be +// relayed. +func (s *UtxoSweeper) RelayFeePerKW() chainfee.SatPerKWeight { + return s.relayFeeRate +} + // Stop stops sweeper from listening to block epochs and constructing sweep // txes. func (s *UtxoSweeper) Stop() error { @@ -384,29 +418,29 @@ func (s *UtxoSweeper) Stop() error { // Because it is an interface and we don't know what is exactly behind it, we // cannot make a local copy in sweeper. func (s *UtxoSweeper) SweepInput(input input.Input, - feePreference FeePreference) (chan Result, error) { + params Params) (chan Result, error) { if input == nil || input.OutPoint() == nil || input.SignDesc() == nil { return nil, errors.New("nil input received") } // Ensure the client provided a sane fee preference. - if _, err := s.feeRateForPreference(feePreference); err != nil { + if _, err := s.feeRateForPreference(params.Fee); err != nil { return nil, err } log.Infof("Sweep request received: out_point=%v, witness_type=%v, "+ - "time_lock=%v, amount=%v, fee_preference=%v", input.OutPoint(), - input.WitnessType(), input.BlocksToMaturity(), - btcutil.Amount(input.SignDesc().Output.Value), feePreference) + "time_lock=%v, amount=%v, params=(%v)", + input.OutPoint(), input.WitnessType(), input.BlocksToMaturity(), + btcutil.Amount(input.SignDesc().Output.Value), params) sweeperInput := &sweepInputMessage{ - input: input, - feePreference: feePreference, - resultChan: make(chan Result, 1), + input: input, + params: params, + resultChan: make(chan Result, 1), } - // Deliver input to main event loop. + // Deliver input to the main event loop. select { case s.newInputs <- sweeperInput: case <-s.quit: @@ -419,7 +453,7 @@ func (s *UtxoSweeper) SweepInput(input input.Input, // feeRateForPreference returns a fee rate for the given fee preference. It // ensures that the fee rate respects the bounds of the UtxoSweeper. func (s *UtxoSweeper) feeRateForPreference( - feePreference FeePreference) (lnwallet.SatPerKWeight, error) { + feePreference FeePreference) (chainfee.SatPerKWeight, error) { // Ensure a type of fee preference is specified to prevent using a // default below. @@ -433,7 +467,7 @@ func (s *UtxoSweeper) feeRateForPreference( } if feeRate < s.relayFeeRate { return 0, fmt.Errorf("fee preference resulted in invalid fee "+ - "rate %v, mininum is %v", feeRate, s.relayFeeRate) + "rate %v, minimum is %v", feeRate, s.relayFeeRate) } if feeRate > s.cfg.MaxFeeRate { return 0, fmt.Errorf("fee preference resulted in invalid fee "+ @@ -445,14 +479,24 @@ func (s *UtxoSweeper) feeRateForPreference( // collector is the sweeper main loop. It processes new inputs, spend // notifications and counts down to publication of the sweep tx. -func (s *UtxoSweeper) collector(blockEpochs <-chan *chainntnfs.BlockEpoch, - bestHeight int32) error { +func (s *UtxoSweeper) collector(blockEpochs <-chan *chainntnfs.BlockEpoch) { + // We registered for the block epochs with a nil request. The notifier + // should send us the current best block immediately. So we need to wait + // for it here because we need to know the current best height. + var bestHeight int32 + select { + case bestBlock := <-blockEpochs: + bestHeight = bestBlock.Height + + case <-s.quit: + return + } for { select { // A new inputs is offered to the sweeper. We check to see if we // are already trying to sweep this input and if not, set up a - // listener for spend and schedule a sweep. + // listener to spend and schedule a sweep. case input := <-s.newInputs: outpoint := *input.input.OutPoint() pendInput, pending := s.pendingInputs[outpoint] @@ -474,9 +518,9 @@ func (s *UtxoSweeper) collector(blockEpochs <-chan *chainntnfs.BlockEpoch, // channel will be appended to this slice. pendInput = &pendingInput{ listeners: []chan Result{input.resultChan}, - input: input.input, + Input: input.input, minPublishHeight: bestHeight, - feePreference: input.feePreference, + params: input.params, } s.pendingInputs[outpoint] = pendInput @@ -508,7 +552,7 @@ func (s *UtxoSweeper) collector(blockEpochs <-chan *chainntnfs.BlockEpoch, s.testSpendChan <- *spend.SpentOutPoint } - // Query store to find out if we every published this + // Query store to find out if we ever published this // tx. spendHash := *spend.SpenderTxHash isOurTx, err := s.cfg.Store.IsOurTx(spendHash) @@ -536,7 +580,7 @@ func (s *UtxoSweeper) collector(blockEpochs <-chan *chainntnfs.BlockEpoch, // registration, deleted from pendingInputs but // the ntfn was in-flight already. Or this could // be not one of our inputs. - _, ok := s.pendingInputs[outpoint] + input, ok := s.pendingInputs[outpoint] if !ok { continue } @@ -552,6 +596,14 @@ func (s *UtxoSweeper) collector(blockEpochs <-chan *chainntnfs.BlockEpoch, Tx: spend.SpendingTx, Err: err, }) + + // Remove all other inputs in this exclusive + // group. + if input.params.ExclusiveGroup != nil { + s.removeExclusiveGroup( + *input.params.ExclusiveGroup, + ) + } } // Now that an input of ours is spent, we can try to @@ -567,9 +619,9 @@ func (s *UtxoSweeper) collector(blockEpochs <-chan *chainntnfs.BlockEpoch, // A new external request has been received to bump the fee rate // of a given input. - case req := <-s.bumpFeeReqs: - resultChan, err := s.handleBumpFeeReq(req, bestHeight) - req.responseChan <- &bumpFeeResp{ + case req := <-s.updateReqs: + resultChan, err := s.handleUpdateReq(req, bestHeight) + req.responseChan <- &updateResp{ resultChan: resultChan, err: err, } @@ -594,27 +646,10 @@ func (s *UtxoSweeper) collector(blockEpochs <-chan *chainntnfs.BlockEpoch, inputClusters[j].sweepFeeRate }) for _, cluster := range inputClusters { - // Examine pending inputs and try to construct - // lists of inputs. - inputLists, err := s.getInputLists( - cluster, bestHeight, - ) + err := s.sweepCluster(cluster, bestHeight) if err != nil { - log.Errorf("Unable to examine pending "+ - "inputs: %v", err) - continue - } - - // Sweep selected inputs. - for _, inputs := range inputLists { - err := s.sweep( - inputs, cluster.sweepFeeRate, - bestHeight, - ) - if err != nil { - log.Errorf("Unable to sweep "+ - "inputs: %v", err) - } + log.Errorf("input cluster sweep: %v", + err) } } @@ -622,7 +657,7 @@ func (s *UtxoSweeper) collector(blockEpochs <-chan *chainntnfs.BlockEpoch, // sweep. case epoch, ok := <-blockEpochs: if !ok { - return nil + return } bestHeight = epoch.Height @@ -635,20 +670,77 @@ func (s *UtxoSweeper) collector(blockEpochs <-chan *chainntnfs.BlockEpoch, } case <-s.quit: - return nil + return + } + } +} + +// removeExclusiveGroup removes all inputs in the given exclusive group. This +// function is called when one of the exclusive group inputs has been spent. The +// other inputs won't ever be spendable and can be removed. This also prevents +// them from being part of future sweep transactions that would fail. +func (s *UtxoSweeper) removeExclusiveGroup(group uint64) { + for outpoint, input := range s.pendingInputs { + outpoint := outpoint + + // Skip inputs that aren't exclusive. + if input.params.ExclusiveGroup == nil { + continue + } + + // Skip inputs from other exclusive groups. + if *input.params.ExclusiveGroup != group { + continue } + + // Signal result channels. + s.signalAndRemove(&outpoint, Result{ + Err: ErrExclusiveGroupSpend, + }) } } +// sweepCluster tries to sweep the given input cluster. +func (s *UtxoSweeper) sweepCluster(cluster inputCluster, + currentHeight int32) error { + + // Execute the sweep within a coin select lock. Otherwise the coins that + // we are going to spend may be selected for other transactions like + // funding of a channel. + return s.cfg.Wallet.WithCoinSelectLock(func() error { + // Examine pending inputs and try to construct + // lists of inputs. + inputLists, err := s.getInputLists(cluster, currentHeight) + if err != nil { + return fmt.Errorf("unable to examine pending inputs: %v", err) + } + + // Sweep selected inputs. + for _, inputs := range inputLists { + err := s.sweep(inputs, cluster.sweepFeeRate, currentHeight) + if err != nil { + return fmt.Errorf("unable to sweep inputs: %v", err) + } + } + + return nil + }) +} + // bucketForFeeReate determines the proper bucket for a fee rate. This is done // in order to batch inputs with similar fee rates together. func (s *UtxoSweeper) bucketForFeeRate( - feeRate lnwallet.SatPerKWeight) lnwallet.SatPerKWeight { + feeRate chainfee.SatPerKWeight) int { + + // Create an isolated bucket for sweeps at the minimum fee rate. This is + // to prevent very small outputs (anchors) from becoming uneconomical if + // their fee rate would be averaged with higher fee rate inputs in a + // regular bucket. + if feeRate == s.relayFeeRate { + return 0 + } - minBucket := s.relayFeeRate + lnwallet.SatPerKWeight(s.cfg.FeeRateBucketSize) - return lnwallet.SatPerKWeight( - math.Ceil(float64(feeRate) / float64(minBucket)), - ) + return 1 + int(feeRate-s.relayFeeRate)/s.cfg.FeeRateBucketSize } // clusterBySweepFeeRate takes the set of pending inputs within the UtxoSweeper @@ -656,43 +748,50 @@ func (s *UtxoSweeper) bucketForFeeRate( // sweep fee rate, which is determined by calculating the average fee rate of // all inputs within that cluster. func (s *UtxoSweeper) clusterBySweepFeeRate() []inputCluster { - bucketInputs := make(map[lnwallet.SatPerKWeight]pendingInputs) - inputFeeRates := make(map[wire.OutPoint]lnwallet.SatPerKWeight) + bucketInputs := make(map[int]*bucketList) + inputFeeRates := make(map[wire.OutPoint]chainfee.SatPerKWeight) // First, we'll group together all inputs with similar fee rates. This // is done by determining the fee rate bucket they should belong in. for op, input := range s.pendingInputs { - feeRate, err := s.feeRateForPreference(input.feePreference) + feeRate, err := s.feeRateForPreference(input.params.Fee) if err != nil { log.Warnf("Skipping input %v: %v", op, err) continue } - bucket := s.bucketForFeeRate(feeRate) + feeGroup := s.bucketForFeeRate(feeRate) - inputs, ok := bucketInputs[bucket] + // Create a bucket list for this fee rate if there isn't one + // yet. + buckets, ok := bucketInputs[feeGroup] if !ok { - inputs = make(pendingInputs) - bucketInputs[bucket] = inputs + buckets = &bucketList{} + bucketInputs[feeGroup] = buckets } + // Request the bucket list to add this input. The bucket list + // will take into account exclusive group constraints. + buckets.add(input) + input.lastFeeRate = feeRate - inputs[op] = input inputFeeRates[op] = feeRate } // We'll then determine the sweep fee rate for each set of inputs by // calculating the average fee rate of the inputs within each set. inputClusters := make([]inputCluster, 0, len(bucketInputs)) - for _, inputs := range bucketInputs { - var sweepFeeRate lnwallet.SatPerKWeight - for op := range inputs { - sweepFeeRate += inputFeeRates[op] + for _, buckets := range bucketInputs { + for _, inputs := range buckets.buckets { + var sweepFeeRate chainfee.SatPerKWeight + for op := range inputs { + sweepFeeRate += inputFeeRates[op] + } + sweepFeeRate /= chainfee.SatPerKWeight(len(inputs)) + inputClusters = append(inputClusters, inputCluster{ + sweepFeeRate: sweepFeeRate, + inputs: inputs, + }) } - sweepFeeRate /= lnwallet.SatPerKWeight(len(inputs)) - inputClusters = append(inputClusters, inputCluster{ - sweepFeeRate: sweepFeeRate, - inputs: inputs, - }) } return inputClusters @@ -712,6 +811,10 @@ func (s *UtxoSweeper) scheduleSweep(currentHeight int32) error { startTimer := false for _, cluster := range s.clusterBySweepFeeRate() { // Examine pending inputs and try to construct lists of inputs. + // We don't need to obtain the coin selection lock, because we + // just need an indication as to whether we can sweep. More + // inputs may be added until we publish the transaction and + // coins that we select now may be used in other transactions. inputLists, err := s.getInputLists(cluster, currentHeight) if err != nil { return fmt.Errorf("get input lists: %v", err) @@ -793,7 +896,7 @@ func (s *UtxoSweeper) getInputLists(cluster inputCluster, // contain inputs that failed before. Therefore we also add sets // consisting of only new inputs to the list, to make sure that new // inputs are given a good, isolated chance of being published. - var newInputs, retryInputs []input.Input + var newInputs, retryInputs []txInput for _, input := range cluster.inputs { // Skip inputs that have a minimum publish height that is not // yet reached. @@ -803,9 +906,9 @@ func (s *UtxoSweeper) getInputLists(cluster inputCluster, // Add input to the either one of the lists. if input.publishAttempts == 0 { - newInputs = append(newInputs, input.input) + newInputs = append(newInputs, input) } else { - retryInputs = append(retryInputs, input.input) + retryInputs = append(retryInputs, input) } } @@ -817,6 +920,7 @@ func (s *UtxoSweeper) getInputLists(cluster inputCluster, allSets, err = generateInputPartitionings( append(retryInputs, newInputs...), s.relayFeeRate, cluster.sweepFeeRate, s.cfg.MaxInputsPerTx, + s.cfg.Wallet, ) if err != nil { return nil, fmt.Errorf("input partitionings: %v", err) @@ -826,7 +930,7 @@ func (s *UtxoSweeper) getInputLists(cluster inputCluster, // Create sets for just the new inputs. newSets, err := generateInputPartitionings( newInputs, s.relayFeeRate, cluster.sweepFeeRate, - s.cfg.MaxInputsPerTx, + s.cfg.MaxInputsPerTx, s.cfg.Wallet, ) if err != nil { return nil, fmt.Errorf("input partitionings: %v", err) @@ -842,7 +946,7 @@ func (s *UtxoSweeper) getInputLists(cluster inputCluster, // sweep takes a set of preselected inputs, creates a sweep tx and publishes the // tx. The output address is only marked as used if the publish succeeds. -func (s *UtxoSweeper) sweep(inputs inputSet, feeRate lnwallet.SatPerKWeight, +func (s *UtxoSweeper) sweep(inputs inputSet, feeRate chainfee.SatPerKWeight, currentHeight int32) error { // Generate an output script if there isn't an unused script available. @@ -883,7 +987,7 @@ func (s *UtxoSweeper) sweep(inputs inputSet, feeRate lnwallet.SatPerKWeight, }), ) - err = s.cfg.PublishTransaction(tx) + err = s.cfg.Wallet.PublishTransaction(tx) // In case of an unexpected error, don't try to recover. if err != nil && err != lnwallet.ErrDoubleSpend { @@ -902,7 +1006,9 @@ func (s *UtxoSweeper) sweep(inputs inputSet, feeRate lnwallet.SatPerKWeight, if !ok { // It can be that the input has been removed because it // exceed the maximum number of attempts in a previous - // input set. + // input set. It could also be that this input is an + // additional wallet input that was attached. In that + // case there also isn't a pending input to update. continue } @@ -1005,44 +1111,45 @@ func (s *UtxoSweeper) handlePendingSweepsReq( for _, pendingInput := range s.pendingInputs { // Only the exported fields are set, as we expect the response // to only be consumed externally. - op := *pendingInput.input.OutPoint() + op := *pendingInput.OutPoint() pendingInputs[op] = &PendingInput{ OutPoint: op, - WitnessType: pendingInput.input.WitnessType(), + WitnessType: pendingInput.WitnessType(), Amount: btcutil.Amount( - pendingInput.input.SignDesc().Output.Value, + pendingInput.SignDesc().Output.Value, ), LastFeeRate: pendingInput.lastFeeRate, BroadcastAttempts: pendingInput.publishAttempts, NextBroadcastHeight: uint32(pendingInput.minPublishHeight), + Params: pendingInput.params, } } return pendingInputs } -// BumpFee allows bumping the fee of an input being swept by the UtxoSweeper -// according to the provided fee preference. The new fee preference will be used -// for a new sweep transaction of the input that will act as a replacement -// transaction (RBF) of the original sweeping transaction, if any. +// UpdateParams allows updating the sweep parameters of a pending input in the +// UtxoSweeper. This function can be used to provide an updated fee preference +// that will be used for a new sweep transaction of the input that will act as a +// replacement transaction (RBF) of the original sweeping transaction, if any. // // NOTE: This currently doesn't do any fee rate validation to ensure that a bump // is actually successful. The responsibility of doing so should be handled by // the caller. -func (s *UtxoSweeper) BumpFee(input wire.OutPoint, - feePreference FeePreference) (chan Result, error) { +func (s *UtxoSweeper) UpdateParams(input wire.OutPoint, + params ParamsUpdate) (chan Result, error) { // Ensure the client provided a sane fee preference. - if _, err := s.feeRateForPreference(feePreference); err != nil { + if _, err := s.feeRateForPreference(params.Fee); err != nil { return nil, err } - responseChan := make(chan *bumpFeeResp, 1) + responseChan := make(chan *updateResp, 1) select { - case s.bumpFeeReqs <- &bumpFeeReq{ - input: input, - feePreference: feePreference, - responseChan: responseChan, + case s.updateReqs <- &updateReq{ + input: input, + params: params, + responseChan: responseChan, }: case <-s.quit: return nil, ErrSweeperShuttingDown @@ -1056,9 +1163,9 @@ func (s *UtxoSweeper) BumpFee(input wire.OutPoint, } } -// handleBumpFeeReq handles a bump fee request by simply updating the inputs fee -// preference. Currently, no validation is done on the new fee preference to -// ensure it will properly create a replacement transaction. +// handleUpdateReq handles an update request by simply updating the sweep +// parameters of the pending input. Currently, no validation is done on the new +// fee preference to ensure it will properly create a replacement transaction. // // TODO(wilmer): // * Validate fee preference to ensure we'll create a valid replacement @@ -1067,8 +1174,8 @@ func (s *UtxoSweeper) BumpFee(input wire.OutPoint, // * Ensure we don't combine this input with any other unconfirmed inputs that // did not exist in the original sweep transaction, resulting in an invalid // replacement transaction. -func (s *UtxoSweeper) handleBumpFeeReq(req *bumpFeeReq, - bestHeight int32) (chan Result, error) { +func (s *UtxoSweeper) handleUpdateReq(req *updateReq, bestHeight int32) ( + chan Result, error) { // If the UtxoSweeper is already trying to sweep this input, then we can // simply just increase its fee rate. This will allow the input to be @@ -1080,10 +1187,16 @@ func (s *UtxoSweeper) handleBumpFeeReq(req *bumpFeeReq, return nil, lnwallet.ErrNotMine } - log.Debugf("Updating fee preference for %v from %v to %v", req.input, - pendingInput.feePreference, req.feePreference) + // Create the updated parameters struct. Leave the exclusive group + // unchanged. + newParams := pendingInput.params + newParams.Fee = req.params.Fee + newParams.Force = req.params.Force + + log.Debugf("Updating sweep parameters for %v from %v to %v", req.input, + pendingInput.params, newParams) - pendingInput.feePreference = req.feePreference + pendingInput.params = newParams // We'll reset the input's publish height to the current so that a new // transaction can be created that replaces the transaction currently diff --git a/sweep/sweeper_test.go b/sweep/sweeper_test.go index b4139fbac8..eaffec4046 100644 --- a/sweep/sweeper_test.go +++ b/sweep/sweeper_test.go @@ -15,6 +15,7 @@ import ( "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/keychain" "github.com/lightningnetwork/lnd/lnwallet" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" ) var ( @@ -24,7 +25,7 @@ var ( testMaxInputsPerTx = 3 - defaultFeePref = FeePreference{ConfTarget: 1} + defaultFeePref = Params{Fee: FeePreference{ConfTarget: 1}} ) type sweeperTestContext struct { @@ -97,14 +98,19 @@ func createSweeperTestContext(t *testing.T) *sweeperTestContext { store := NewMockSweeperStore() - backend := newMockBackend(notifier) + backend := newMockBackend(t, notifier) + backend.walletUtxos = []*lnwallet.Utxo{ + { + Value: btcutil.Amount(10000), + AddressType: lnwallet.WitnessPubKey, + }, + } - estimator := newMockFeeEstimator(10000, lnwallet.FeePerKwFloor) + estimator := newMockFeeEstimator(10000, chainfee.FeePerKwFloor) - publishChan := make(chan wire.MsgTx, 2) ctx := &sweeperTestContext{ notifier: notifier, - publishChan: publishChan, + publishChan: backend.publishChan, t: t, estimator: estimator, backend: backend, @@ -115,24 +121,14 @@ func createSweeperTestContext(t *testing.T) *sweeperTestContext { var outputScriptCount byte ctx.sweeper = New(&UtxoSweeperConfig{ Notifier: notifier, - PublishTransaction: func(tx *wire.MsgTx) error { - log.Tracef("Publishing tx %v", tx.TxHash()) - err := backend.publishTransaction(tx) - select { - case publishChan <- *tx: - case <-time.After(defaultTestTimeout): - t.Fatalf("unexpected tx published") - } - return err - }, + Wallet: backend, NewBatchTimer: func() <-chan time.Time { c := make(chan time.Time, 1) ctx.timeoutChan <- c return c }, - Store: store, - Signer: &mockSigner{}, - ChainIO: &mockChainIO{}, + Store: store, + Signer: &mockSigner{}, GenSweepScript: func() ([]byte, error) { script := []byte{outputScriptCount} outputScriptCount++ @@ -315,7 +311,7 @@ func assertTxSweepsInputs(t *testing.T, sweepTx *wire.MsgTx, // NOTE: This assumes that transactions only have one output, as this is the // only type of transaction the UtxoSweeper can create at the moment. func assertTxFeeRate(t *testing.T, tx *wire.MsgTx, - expectedFeeRate lnwallet.SatPerKWeight, inputs ...input.Input) { + expectedFeeRate chainfee.SatPerKWeight, inputs ...input.Input) { t.Helper() @@ -340,7 +336,7 @@ func assertTxFeeRate(t *testing.T, tx *wire.MsgTx, outputAmt := tx.TxOut[0].Value fee := btcutil.Amount(inputAmt - outputAmt) - _, txWeight, _, _ := getWeightEstimate(inputs) + _, txWeight := getWeightEstimate(inputs) expectedFee := expectedFeeRate.FeeForWeight(txWeight) if fee != expectedFee { @@ -354,7 +350,7 @@ func TestSuccess(t *testing.T) { ctx := createSweeperTestContext(t) // Sweeping an input without a fee preference should result in an error. - _, err := ctx.sweeper.SweepInput(spendableInputs[0], FeePreference{}) + _, err := ctx.sweeper.SweepInput(spendableInputs[0], Params{}) if err != ErrNoFeePreference { t.Fatalf("expected ErrNoFeePreference, got %v", err) } @@ -417,7 +413,10 @@ func TestDust(t *testing.T) { } // No sweep transaction is expected now. The sweeper should recognize - // that the sweep output will not be relayed and not generate the tx. + // that the sweep output will not be relayed and not generate the tx. It + // isn't possible to attach a wallet utxo either, because the added + // weight would create a negatively yielding transaction at this fee + // rate. // Sweep another input that brings the tx output above the dust limit. largeInput := createTestInput(100000, input.CommitmentTimeLock) @@ -443,6 +442,50 @@ func TestDust(t *testing.T) { ctx.finish(1) } +// TestWalletUtxo asserts that inputs that are not big enough to raise above the +// dust limit are accompanied by a wallet utxo to make them sweepable. +func TestWalletUtxo(t *testing.T) { + ctx := createSweeperTestContext(t) + + // Sweeping a single output produces a tx of 439 weight units. At the + // fee floor, the sweep tx will pay 439*253/1000 = 111 sat in fees. + // + // Create an input so that the output after paying fees is still + // positive (183 sat), but less than the dust limit (537 sat) for the + // sweep tx output script (P2WPKH). + // + // What we now expect is that the sweeper will attach a utxo from the + // wallet. This increases the tx weight to 712 units with a fee of 180 + // sats. The tx yield becomes then 294-180 = 114 sats. + dustInput := createTestInput(294, input.WitnessKeyHash) + + _, err := ctx.sweeper.SweepInput( + &dustInput, + Params{Fee: FeePreference{FeeRate: chainfee.FeePerKwFloor}}, + ) + if err != nil { + t.Fatal(err) + } + + ctx.tick() + + sweepTx := ctx.receiveTx() + if len(sweepTx.TxIn) != 2 { + t.Fatalf("Expected tx to sweep 2 inputs, but contains %v "+ + "inputs instead", len(sweepTx.TxIn)) + } + + // Calculate expected output value based on wallet utxo of 10000 sats. + expectedOutputValue := int64(294 + 10000 - 180) + if sweepTx.TxOut[0].Value != expectedOutputValue { + t.Fatalf("Expected output value of %v, but got %v", + expectedOutputValue, sweepTx.TxOut[0].Value) + } + + ctx.backend.mine() + ctx.finish(1) +} + // TestNegativeInput asserts that no inputs with a negative yield are swept. // Negative yield means that the value minus the added fee is negative. func TestNegativeInput(t *testing.T) { @@ -717,7 +760,7 @@ func TestRestart(t *testing.T) { // Expect last tx to be republished. ctx.receiveTx() - // Simulate other subsystem (eg contract resolver) re-offering inputs. + // Simulate other subsystem (e.g. contract resolver) re-offering inputs. spendChan1, err := ctx.sweeper.SweepInput(input1, defaultFeePref) if err != nil { t.Fatal(err) @@ -815,7 +858,7 @@ func TestRestartRemoteSpend(t *testing.T) { // Mine remote spending tx. ctx.backend.mine() - // Simulate other subsystem (eg contract resolver) re-offering input 0. + // Simulate other subsystem (e.g. contract resolver) re-offering input 0. spendChan, err := ctx.sweeper.SweepInput(input1, defaultFeePref) if err != nil { t.Fatal(err) @@ -858,7 +901,7 @@ func TestRestartConfirmed(t *testing.T) { // Mine the sweep tx. ctx.backend.mine() - // Simulate other subsystem (eg contract resolver) re-offering input 0. + // Simulate other subsystem (e.g. contract resolver) re-offering input 0. spendChan, err := ctx.sweeper.SweepInput(input, defaultFeePref) if err != nil { t.Fatal(err) @@ -995,25 +1038,31 @@ func TestDifferentFeePreferences(t *testing.T) { // this to ensure the sweeper can broadcast distinct transactions for // each sweep with a different fee preference. lowFeePref := FeePreference{ConfTarget: 12} - lowFeeRate := lnwallet.SatPerKWeight(5000) + lowFeeRate := chainfee.SatPerKWeight(5000) ctx.estimator.blocksToFee[lowFeePref.ConfTarget] = lowFeeRate highFeePref := FeePreference{ConfTarget: 6} - highFeeRate := lnwallet.SatPerKWeight(10000) + highFeeRate := chainfee.SatPerKWeight(10000) ctx.estimator.blocksToFee[highFeePref.ConfTarget] = highFeeRate input1 := spendableInputs[0] - resultChan1, err := ctx.sweeper.SweepInput(input1, highFeePref) + resultChan1, err := ctx.sweeper.SweepInput( + input1, Params{Fee: highFeePref}, + ) if err != nil { t.Fatal(err) } input2 := spendableInputs[1] - resultChan2, err := ctx.sweeper.SweepInput(input2, highFeePref) + resultChan2, err := ctx.sweeper.SweepInput( + input2, Params{Fee: highFeePref}, + ) if err != nil { t.Fatal(err) } input3 := spendableInputs[2] - resultChan3, err := ctx.sweeper.SweepInput(input3, lowFeePref) + resultChan3, err := ctx.sweeper.SweepInput( + input3, Params{Fee: lowFeePref}, + ) if err != nil { t.Fatal(err) } @@ -1067,16 +1116,23 @@ func TestPendingInputs(t *testing.T) { ctx.estimator.blocksToFee[highFeePref.ConfTarget] = highFeeRate input1 := spendableInputs[0] - resultChan1, err := ctx.sweeper.SweepInput(input1, highFeePref) + resultChan1, err := ctx.sweeper.SweepInput( + input1, Params{Fee: highFeePref}, + ) if err != nil { t.Fatal(err) } input2 := spendableInputs[1] - if _, err := ctx.sweeper.SweepInput(input2, highFeePref); err != nil { + _, err = ctx.sweeper.SweepInput( + input2, Params{Fee: highFeePref}, + ) + if err != nil { t.Fatal(err) } input3 := spendableInputs[2] - resultChan3, err := ctx.sweeper.SweepInput(input3, lowFeePref) + resultChan3, err := ctx.sweeper.SweepInput( + input3, Params{Fee: lowFeePref}, + ) if err != nil { t.Fatal(err) } @@ -1117,12 +1173,14 @@ func TestBumpFeeRBF(t *testing.T) { ctx := createSweeperTestContext(t) lowFeePref := FeePreference{ConfTarget: 144} - lowFeeRate := lnwallet.FeePerKwFloor + lowFeeRate := chainfee.FeePerKwFloor ctx.estimator.blocksToFee[lowFeePref.ConfTarget] = lowFeeRate // We'll first try to bump the fee of an output currently unknown to the // UtxoSweeper. Doing so should result in a lnwallet.ErrNotMine error. - bumpResult, err := ctx.sweeper.BumpFee(wire.OutPoint{}, lowFeePref) + _, err := ctx.sweeper.UpdateParams( + wire.OutPoint{}, ParamsUpdate{Fee: lowFeePref}, + ) if err != lnwallet.ErrNotMine { t.Fatalf("expected error lnwallet.ErrNotMine, got \"%v\"", err) } @@ -1132,7 +1190,9 @@ func TestBumpFeeRBF(t *testing.T) { input := createTestInput( btcutil.SatoshiPerBitcoin, input.CommitmentTimeLock, ) - sweepResult, err := ctx.sweeper.SweepInput(&input, lowFeePref) + sweepResult, err := ctx.sweeper.SweepInput( + &input, Params{Fee: lowFeePref}, + ) if err != nil { t.Fatal(err) } @@ -1148,12 +1208,14 @@ func TestBumpFeeRBF(t *testing.T) { ctx.estimator.blocksToFee[highFeePref.ConfTarget] = highFeeRate // We should expect to see an error if a fee preference isn't provided. - _, err = ctx.sweeper.BumpFee(*input.OutPoint(), FeePreference{}) + _, err = ctx.sweeper.UpdateParams(*input.OutPoint(), ParamsUpdate{}) if err != ErrNoFeePreference { t.Fatalf("expected ErrNoFeePreference, got %v", err) } - bumpResult, err = ctx.sweeper.BumpFee(*input.OutPoint(), highFeePref) + bumpResult, err := ctx.sweeper.UpdateParams( + *input.OutPoint(), ParamsUpdate{Fee: highFeePref}, + ) if err != nil { t.Fatalf("unable to bump input's fee: %v", err) } @@ -1170,3 +1232,63 @@ func TestBumpFeeRBF(t *testing.T) { ctx.finish(1) } + +// TestExclusiveGroup tests the sweeper exclusive group functionality. +func TestExclusiveGroup(t *testing.T) { + ctx := createSweeperTestContext(t) + + // Sweep three inputs in the same exclusive group. + var results []chan Result + for i := 0; i < 3; i++ { + exclusiveGroup := uint64(1) + result, err := ctx.sweeper.SweepInput( + spendableInputs[i], Params{ + Fee: FeePreference{ConfTarget: 6}, + ExclusiveGroup: &exclusiveGroup, + }, + ) + if err != nil { + t.Fatal(err) + } + results = append(results, result) + } + + // We expect all inputs to be published in separate transactions, even + // though they share the same fee preference. + ctx.tick() + for i := 0; i < 3; i++ { + sweepTx := ctx.receiveTx() + if len(sweepTx.TxOut) != 1 { + t.Fatal("expected a single tx out in the sweep tx") + } + + // Remove all txes except for the one that sweeps the first + // input. This simulates the sweeps being conflicting. + if sweepTx.TxIn[0].PreviousOutPoint != + *spendableInputs[0].OutPoint() { + + ctx.backend.deleteUnconfirmed(sweepTx.TxHash()) + } + } + + // Mine the first sweep tx. + ctx.backend.mine() + + // Expect the first input to be swept by the confirmed sweep tx. + result0 := <-results[0] + if result0.Err != nil { + t.Fatal("expected first input to be swept") + } + + // Expect the other two inputs to return an error. They have no chance + // of confirming. + result1 := <-results[1] + if result1.Err != ErrExclusiveGroupSpend { + t.Fatal("expected second input to be canceled") + } + + result2 := <-results[2] + if result2.Err != ErrExclusiveGroupSpend { + t.Fatal("expected third input to be canceled") + } +} diff --git a/sweep/test_utils.go b/sweep/test_utils.go index 46ee6dc3d0..7c28710be5 100644 --- a/sweep/test_utils.go +++ b/sweep/test_utils.go @@ -6,25 +6,36 @@ import ( "testing" "time" + "github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" "github.com/lightningnetwork/lnd/chainntnfs" "github.com/lightningnetwork/lnd/input" - "github.com/lightningnetwork/lnd/lnwallet" ) var ( defaultTestTimeout = 5 * time.Second - mockChainIOHeight = int32(100) + mockChainHash, _ = chainhash.NewHashFromStr("00aabbccddeeff") + mockChainHeight = int32(100) ) +type dummySignature struct{} + +func (s *dummySignature) Serialize() []byte { + return []byte{} +} + +func (s *dummySignature) Verify(_ []byte, _ *btcec.PublicKey) bool { + return true +} + type mockSigner struct { } func (m *mockSigner) SignOutputRaw(tx *wire.MsgTx, - signDesc *input.SignDescriptor) ([]byte, error) { + signDesc *input.SignDescriptor) (input.Signature, error) { - return []byte{}, nil + return &dummySignature{}, nil } func (m *mockSigner) ComputeInputScript(tx *wire.MsgTx, @@ -155,12 +166,22 @@ func (m *MockNotifier) RegisterBlockEpochNtfn( log.Tracef("Mock block ntfn registered") m.mutex.Lock() - epochChan := make(chan *chainntnfs.BlockEpoch, 0) - bestHeight := int32(0) - if bestBlock != nil { - bestHeight = bestBlock.Height + epochChan := make(chan *chainntnfs.BlockEpoch, 1) + + // The real notifier returns a notification with the current block hash + // and height immediately if no best block hash or height is specified + // in the request. We want to emulate this behaviour as well for the + // mock. + switch { + case bestBlock == nil: + epochChan <- &chainntnfs.BlockEpoch{ + Hash: mockChainHash, + Height: mockChainHeight, + } + m.epochChan[epochChan] = mockChainHeight + default: + m.epochChan[epochChan] = bestBlock.Height } - m.epochChan[epochChan] = bestHeight m.mutex.Unlock() return &chainntnfs.BlockEpochEvent{ @@ -206,7 +227,7 @@ func (m *MockNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint, m.mutex.Unlock() // If output has been spent already, signal now. Do this outside the - // lock to prevent a dead lock. + // lock to prevent a deadlock. if spent { m.sendSpend(channel, outpoint, spendingTx) } @@ -235,25 +256,3 @@ func (m *MockNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint, }, }, nil } - -type mockChainIO struct{} - -var _ lnwallet.BlockChainIO = (*mockChainIO)(nil) - -func (m *mockChainIO) GetBestBlock() (*chainhash.Hash, int32, error) { - return nil, mockChainIOHeight, nil -} - -func (m *mockChainIO) GetUtxo(op *wire.OutPoint, pkScript []byte, - heightHint uint32, _ <-chan struct{}) (*wire.TxOut, error) { - - return nil, nil -} - -func (m *mockChainIO) GetBlockHash(blockHeight int64) (*chainhash.Hash, error) { - return nil, nil -} - -func (m *mockChainIO) GetBlock(blockHash *chainhash.Hash) (*wire.MsgBlock, error) { - return nil, nil -} diff --git a/sweep/tx_input_set.go b/sweep/tx_input_set.go new file mode 100644 index 0000000000..1f21f9602c --- /dev/null +++ b/sweep/tx_input_set.go @@ -0,0 +1,295 @@ +package sweep + +import ( + "fmt" + "math" + + "github.com/btcsuite/btcd/txscript" + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" + "github.com/btcsuite/btcwallet/wallet/txrules" + "github.com/lightningnetwork/lnd/input" + "github.com/lightningnetwork/lnd/lnwallet" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" +) + +// addConstraints defines the constraints to apply when adding an input. +type addConstraints uint8 + +const ( + // constraintsRegular is for regular input sweeps that should have a positive + // yield. + constraintsRegular addConstraints = iota + + // constraintsWallet is for wallet inputs that are only added to bring up the tx + // output value. + constraintsWallet + + // constraintsForce is for inputs that should be swept even with a negative + // yield at the set fee rate. + constraintsForce +) + +// txInputSet is an object that accumulates tx inputs and keeps running counters +// on various properties of the tx. +type txInputSet struct { + // weightEstimate is the (worst case) tx weight with the current set of + // inputs. + weightEstimate input.TxWeightEstimator + + // inputTotal is the total value of all inputs. + inputTotal btcutil.Amount + + // outputValue is the value of the tx output. + outputValue btcutil.Amount + + // feePerKW is the fee rate used to calculate the tx fee. + feePerKW chainfee.SatPerKWeight + + // inputs is the set of tx inputs. + inputs []input.Input + + // dustLimit is the minimum output value of the tx. + dustLimit btcutil.Amount + + // maxInputs is the maximum number of inputs that will be accepted in + // the set. + maxInputs int + + // walletInputTotal is the total value of inputs coming from the wallet. + walletInputTotal btcutil.Amount + + // wallet contains wallet functionality required by the input set to + // retrieve utxos. + wallet Wallet + + // force indicates that this set must be swept even if the total yield + // is negative. + force bool +} + +// newTxInputSet constructs a new, empty input set. +func newTxInputSet(wallet Wallet, feePerKW, + relayFee chainfee.SatPerKWeight, maxInputs int) *txInputSet { + + dustLimit := txrules.GetDustThreshold( + input.P2WPKHSize, + btcutil.Amount(relayFee.FeePerKVByte()), + ) + + b := txInputSet{ + feePerKW: feePerKW, + dustLimit: dustLimit, + maxInputs: maxInputs, + wallet: wallet, + } + + // Add the sweep tx output to the weight estimate. + b.weightEstimate.AddP2WKHOutput() + + return &b +} + +// dustLimitReached returns true if we've accumulated enough inputs to meet the +// dust limit. +func (t *txInputSet) dustLimitReached() bool { + return t.outputValue >= t.dustLimit +} + +// add adds a new input to the set. It returns a bool indicating whether the +// input was added to the set. An input is rejected if it decreases the tx +// output value after paying fees. +func (t *txInputSet) add(input input.Input, constraints addConstraints) bool { + // Stop if max inputs is reached. Do not count additional wallet inputs, + // because we don't know in advance how many we may need. + if constraints != constraintsWallet && + len(t.inputs) >= t.maxInputs { + + return false + } + + // Can ignore error, because it has already been checked when + // calculating the yields. + size, isNestedP2SH, _ := input.WitnessType().SizeUpperBound() + + // Add weight of this new candidate input to a copy of the weight + // estimator. + newWeightEstimate := t.weightEstimate + if isNestedP2SH { + newWeightEstimate.AddNestedP2WSHInput(size) + } else { + newWeightEstimate.AddWitnessInput(size) + } + + value := btcutil.Amount(input.SignDesc().Output.Value) + newInputTotal := t.inputTotal + value + + weight := newWeightEstimate.Weight() + fee := t.feePerKW.FeeForWeight(int64(weight)) + + // Calculate the output value if the current input would be + // added to the set. + newOutputValue := newInputTotal - fee + + // Initialize new wallet total with the current wallet total. This is + // updated below if this input is a wallet input. + newWalletTotal := t.walletInputTotal + + // Calculate the yield of this input from the change in tx output value. + inputYield := newOutputValue - t.outputValue + + switch constraints { + + // Don't sweep inputs that cost us more to sweep than they give us. + case constraintsRegular: + if inputYield <= 0 { + return false + } + + // For force adds, no further constraints apply. + case constraintsForce: + t.force = true + + // We are attaching a wallet input to raise the tx output value above + // the dust limit. + case constraintsWallet: + // Skip this wallet input if adding it would lower the output + // value. + if inputYield <= 0 { + return false + } + + // Calculate the total value that we spend in this tx from the + // wallet if we'd add this wallet input. + newWalletTotal += value + + // In any case, we don't want to lose money by sweeping. If we + // don't get more out of the tx then we put in ourselves, do not + // add this wallet input. If there is at least one force sweep + // in the set, this does no longer apply. + // + // We should only add wallet inputs to get the tx output value + // above the dust limit, otherwise we'd only burn into fees. + // This is guarded by tryAddWalletInputsIfNeeded. + // + // TODO(joostjager): Possibly require a max ratio between the + // value of the wallet input and what we get out of this + // transaction. To prevent attaching and locking a big utxo for + // very little benefit. + if !t.force && newWalletTotal >= newOutputValue { + log.Debugf("Rejecting wallet input of %v, because it "+ + "would make a negative yielding transaction "+ + "(%v)", + value, newOutputValue-newWalletTotal) + + return false + } + } + + // Update running values. + // + // TODO: Return new instance? + t.inputTotal = newInputTotal + t.outputValue = newOutputValue + t.inputs = append(t.inputs, input) + t.weightEstimate = newWeightEstimate + t.walletInputTotal = newWalletTotal + + return true +} + +// addPositiveYieldInputs adds sweepableInputs that have a positive yield to the +// input set. This function assumes that the list of inputs is sorted descending +// by yield. +// +// TODO(roasbeef): Consider including some negative yield inputs too to clean +// up the utxo set even if it costs us some fees up front. In the spirit of +// minimizing any negative externalities we cause for the Bitcoin system as a +// whole. +func (t *txInputSet) addPositiveYieldInputs(sweepableInputs []txInput) { + for _, input := range sweepableInputs { + // Apply relaxed constraints for force sweeps. + constraints := constraintsRegular + if input.parameters().Force { + constraints = constraintsForce + } + + // Try to add the input to the transaction. If that doesn't + // succeed because it wouldn't increase the output value, + // return. Assuming inputs are sorted by yield, any further + // inputs wouldn't increase the output value either. + if !t.add(input, constraints) { + return + } + } + + // We managed to add all inputs to the set. +} + +// tryAddWalletInputsIfNeeded retrieves utxos from the wallet and tries adding as +// many as required to bring the tx output value above the given minimum. +func (t *txInputSet) tryAddWalletInputsIfNeeded() error { + // If we've already reached the dust limit, no action is needed. + if t.dustLimitReached() { + return nil + } + + // Retrieve wallet utxos. Only consider confirmed utxos to prevent + // problems around RBF rules for unconfirmed inputs. + utxos, err := t.wallet.ListUnspentWitness(1, math.MaxInt32) + if err != nil { + return err + } + + for _, utxo := range utxos { + input, err := createWalletTxInput(utxo) + if err != nil { + return err + } + + // If the wallet input isn't positively-yielding at this fee + // rate, skip it. + if !t.add(input, constraintsWallet) { + continue + } + + // Return if we've reached the minimum output amount. + if t.dustLimitReached() { + return nil + } + } + + // We were not able to reach the minimum output amount. + return nil +} + +// createWalletTxInput converts a wallet utxo into an object that can be added +// to the other inputs to sweep. +func createWalletTxInput(utxo *lnwallet.Utxo) (input.Input, error) { + var witnessType input.WitnessType + switch utxo.AddressType { + case lnwallet.WitnessPubKey: + witnessType = input.WitnessKeyHash + case lnwallet.NestedWitnessPubKey: + witnessType = input.NestedWitnessKeyHash + default: + return nil, fmt.Errorf("unknown address type %v", + utxo.AddressType) + } + + signDesc := &input.SignDescriptor{ + Output: &wire.TxOut{ + PkScript: utxo.PkScript, + Value: int64(utxo.Value), + }, + HashType: txscript.SigHashAll, + } + + // A height hint doesn't need to be set, because we don't monitor these + // inputs for spend. + heightHint := uint32(0) + + return input.NewBaseInput( + &utxo.OutPoint, witnessType, signDesc, heightHint, + ), nil +} diff --git a/sweep/tx_input_set_test.go b/sweep/tx_input_set_test.go new file mode 100644 index 0000000000..d9e98f7330 --- /dev/null +++ b/sweep/tx_input_set_test.go @@ -0,0 +1,119 @@ +package sweep + +import ( + "testing" + + "github.com/btcsuite/btcutil" + "github.com/lightningnetwork/lnd/input" + "github.com/lightningnetwork/lnd/lnwallet" +) + +// TestTxInputSet tests adding various sized inputs to the set. +func TestTxInputSet(t *testing.T) { + const ( + feeRate = 1000 + relayFee = 300 + maxInputs = 10 + ) + set := newTxInputSet(nil, feeRate, relayFee, maxInputs) + + if set.dustLimit != 537 { + t.Fatalf("incorrect dust limit") + } + + // Create a 300 sat input. The fee to sweep this input to a P2WKH output + // is 439 sats. That means that this input yields -139 sats and we + // expect it not to be added. + if set.add(createP2WKHInput(300), constraintsRegular) { + t.Fatal("expected add of negatively yielding input to fail") + } + + // A 700 sat input should be accepted into the set, because it yields + // positively. + if !set.add(createP2WKHInput(700), constraintsRegular) { + t.Fatal("expected add of positively yielding input to succeed") + } + + // The tx output should now be 700-439 = 261 sats. The dust limit isn't + // reached yet. + if set.outputValue != 261 { + t.Fatal("unexpected output value") + } + if set.dustLimitReached() { + t.Fatal("expected dust limit not yet to be reached") + } + + // Add a 1000 sat input. This increases the tx fee to 712 sats. The tx + // output should now be 1000+700 - 712 = 988 sats. + if !set.add(createP2WKHInput(1000), constraintsRegular) { + t.Fatal("expected add of positively yielding input to succeed") + } + if set.outputValue != 988 { + t.Fatal("unexpected output value") + } + if !set.dustLimitReached() { + t.Fatal("expected dust limit to be reached") + } +} + +// TestTxInputSetFromWallet tests adding a wallet input to a TxInputSet to reach +// the dust limit. +func TestTxInputSetFromWallet(t *testing.T) { + const ( + feeRate = 500 + relayFee = 300 + maxInputs = 10 + ) + + wallet := &mockWallet{} + set := newTxInputSet(wallet, feeRate, relayFee, maxInputs) + + // Add a 700 sat input to the set. It yields positively, but doesn't + // reach the output dust limit. + if !set.add(createP2WKHInput(700), constraintsRegular) { + t.Fatal("expected add of positively yielding input to succeed") + } + if set.dustLimitReached() { + t.Fatal("expected dust limit not yet to be reached") + } + + // Expect that adding a negative yield input fails. + if set.add(createP2WKHInput(50), constraintsRegular) { + t.Fatal("expected negative yield input add to fail") + } + + // Force add the negative yield input. It should succeed. + if !set.add(createP2WKHInput(50), constraintsForce) { + t.Fatal("expected forced add to succeed") + } + + err := set.tryAddWalletInputsIfNeeded() + if err != nil { + t.Fatal(err) + } + + if !set.dustLimitReached() { + t.Fatal("expected dust limit to be reached") + } +} + +// createP2WKHInput returns a P2WKH test input with the specified amount. +func createP2WKHInput(amt btcutil.Amount) input.Input { + input := createTestInput(int64(amt), input.WitnessKeyHash) + return &input +} + +type mockWallet struct { + Wallet +} + +func (m *mockWallet) ListUnspentWitness(minconfirms, maxconfirms int32) ( + []*lnwallet.Utxo, error) { + + return []*lnwallet.Utxo{ + { + AddressType: lnwallet.WitnessPubKey, + Value: 10000, + }, + }, nil +} diff --git a/sweep/txgenerator.go b/sweep/txgenerator.go index a06dad9630..1e47dad217 100644 --- a/sweep/txgenerator.go +++ b/sweep/txgenerator.go @@ -3,14 +3,14 @@ package sweep import ( "fmt" "sort" + "strings" "github.com/btcsuite/btcd/blockchain" "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcutil" - "github.com/btcsuite/btcwallet/wallet/txrules" "github.com/lightningnetwork/lnd/input" - "github.com/lightningnetwork/lnd/lnwallet" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" ) var ( @@ -20,6 +20,13 @@ var ( DefaultMaxInputsPerTx = 100 ) +// txInput is an interface that provides the input data required for tx +// generation. +type txInput interface { + input.Input + parameters() Params +} + // inputSet is a set of inputs that can be used as the basis to generate a tx // on. type inputSet []input.Input @@ -29,16 +36,9 @@ type inputSet []input.Input // contains up to the configured maximum number of inputs. Negative yield // inputs are skipped. No input sets with a total value after fees below the // dust limit are returned. -func generateInputPartitionings(sweepableInputs []input.Input, - relayFeePerKW, feePerKW lnwallet.SatPerKWeight, - maxInputsPerTx int) ([]inputSet, error) { - - // Calculate dust limit based on the P2WPKH output script of the sweep - // txes. - dustLimit := txrules.GetDustThreshold( - input.P2WPKHSize, - btcutil.Amount(relayFeePerKW.FeePerKVByte()), - ) +func generateInputPartitionings(sweepableInputs []txInput, + relayFeePerKW, feePerKW chainfee.SatPerKWeight, + maxInputsPerTx int, wallet Wallet) ([]inputSet, error) { // Sort input by yield. We will start constructing input sets starting // with the highest yield inputs. This is to prevent the construction @@ -56,7 +56,7 @@ func generateInputPartitionings(sweepableInputs []input.Input, // on the signature length, which is not known yet at this point. yields := make(map[wire.OutPoint]int64) for _, input := range sweepableInputs { - size, _, err := getInputWitnessSizeUpperBound(input) + size, _, err := input.WitnessType().SizeUpperBound() if err != nil { return nil, fmt.Errorf( "failed adding input weight: %v", err) @@ -67,6 +67,14 @@ func generateInputPartitionings(sweepableInputs []input.Input, } sort.Slice(sweepableInputs, func(i, j int) bool { + // Because of the specific ordering and termination condition + // that is described above, we place force sweeps at the start + // of the list. Otherwise we can't be sure that they will be + // included in an input set. + if sweepableInputs[i].parameters().Force { + return true + } + return yields[*sweepableInputs[i].OutPoint()] > yields[*sweepableInputs[j].OutPoint()] }) @@ -74,108 +82,59 @@ func generateInputPartitionings(sweepableInputs []input.Input, // Select blocks of inputs up to the configured maximum number. var sets []inputSet for len(sweepableInputs) > 0 { - // Get the maximum number of inputs from sweepableInputs that - // we can use to create a positive yielding set from. - count, outputValue := getPositiveYieldInputs( - sweepableInputs, maxInputsPerTx, feePerKW, + // Start building a set of positive-yield tx inputs under the + // condition that the tx will be published with the specified + // fee rate. + txInputs := newTxInputSet( + wallet, feePerKW, relayFeePerKW, maxInputsPerTx, ) - // If there are no positive yield inputs left, we can stop - // here. - if count == 0 { + // From the set of sweepable inputs, keep adding inputs to the + // input set until the tx output value no longer goes up or the + // maximum number of inputs is reached. + txInputs.addPositiveYieldInputs(sweepableInputs) + + // If there are no positive yield inputs, we can stop here. + inputCount := len(txInputs.inputs) + if inputCount == 0 { return sets, nil } + // Check the current output value and add wallet utxos if + // needed to push the output value to the lower limit. + if err := txInputs.tryAddWalletInputsIfNeeded(); err != nil { + return nil, err + } + // If the output value of this block of inputs does not reach // the dust limit, stop sweeping. Because of the sorting, // continuing with the remaining inputs will only lead to sets - // with a even lower output value. - if outputValue < dustLimit { + // with an even lower output value. + if !txInputs.dustLimitReached() { log.Debugf("Set value %v below dust limit of %v", - outputValue, dustLimit) + txInputs.outputValue, txInputs.dustLimit) return sets, nil } - log.Infof("Candidate sweep set of size=%v, has yield=%v", - count, outputValue) + log.Infof("Candidate sweep set of size=%v (+%v wallet inputs), "+ + "has yield=%v, weight=%v", + inputCount, len(txInputs.inputs)-inputCount, + txInputs.outputValue-txInputs.walletInputTotal, + txInputs.weightEstimate.Weight()) - sets = append(sets, sweepableInputs[:count]) - sweepableInputs = sweepableInputs[count:] + sets = append(sets, txInputs.inputs) + sweepableInputs = sweepableInputs[inputCount:] } return sets, nil } -// getPositiveYieldInputs returns the maximum of a number n for which holds -// that the inputs [0,n) of sweepableInputs have a positive yield. -// Additionally, the total values of these inputs minus the fee is returned. -// -// TODO(roasbeef): Consider including some negative yield inputs too to clean -// up the utxo set even if it costs us some fees up front. In the spirit of -// minimizing any negative externalities we cause for the Bitcoin system as a -// whole. -func getPositiveYieldInputs(sweepableInputs []input.Input, maxInputs int, - feePerKW lnwallet.SatPerKWeight) (int, btcutil.Amount) { - - var weightEstimate input.TxWeightEstimator - - // Add the sweep tx output to the weight estimate. - weightEstimate.AddP2WKHOutput() - - var total, outputValue btcutil.Amount - for idx, input := range sweepableInputs { - // Can ignore error, because it has already been checked when - // calculating the yields. - size, isNestedP2SH, _ := getInputWitnessSizeUpperBound(input) - - // Keep a running weight estimate of the input set. - if isNestedP2SH { - weightEstimate.AddNestedP2WSHInput(size) - } else { - weightEstimate.AddWitnessInput(size) - } - - newTotal := total + btcutil.Amount(input.SignDesc().Output.Value) - - weight := weightEstimate.Weight() - fee := feePerKW.FeeForWeight(int64(weight)) - - // Calculate the output value if the current input would be - // added to the set. - newOutputValue := newTotal - fee - - // If adding this input makes the total output value of the set - // decrease, this is a negative yield input. It shouldn't be - // added to the set. We return the current index as the number - // of inputs, so the current input is being excluded. - if newOutputValue <= outputValue { - return idx, outputValue - } - - // Update running values. - total = newTotal - outputValue = newOutputValue - - // Stop if max inputs is reached. - if idx == maxInputs-1 { - return maxInputs, outputValue - } - } - - // We could add all inputs to the set, so return them all. - return len(sweepableInputs), outputValue -} - // createSweepTx builds a signed tx spending the inputs to a the output script. func createSweepTx(inputs []input.Input, outputPkScript []byte, - currentBlockHeight uint32, feePerKw lnwallet.SatPerKWeight, + currentBlockHeight uint32, feePerKw chainfee.SatPerKWeight, signer input.Signer) (*wire.MsgTx, error) { - inputs, txWeight, csvCount, cltvCount := getWeightEstimate(inputs) - - log.Infof("Creating sweep transaction for %v inputs (%v CSV, %v CLTV) "+ - "using %v sat/kw", len(inputs), csvCount, cltvCount, - int64(feePerKw)) + inputs, txWeight := getWeightEstimate(inputs) txFee := feePerKw.FeeForWeight(txWeight) @@ -248,65 +207,16 @@ func createSweepTx(inputs []input.Input, outputPkScript []byte, } } - return sweepTx, nil -} - -// getInputWitnessSizeUpperBound returns the maximum length of the witness for -// the given input if it would be included in a tx. We also return if the -// output itself is a nested p2sh output, if so then we need to take into -// account the extra sigScript data size. -func getInputWitnessSizeUpperBound(inp input.Input) (int, bool, error) { - switch inp.WitnessType() { - - // Outputs on a remote commitment transaction that pay directly to us. - case input.CommitSpendNoDelayTweakless: - fallthrough - case input.WitnessKeyHash: - fallthrough - case input.CommitmentNoDelay: - return input.P2WKHWitnessSize, false, nil - - // Outputs on a past commitment transaction that pay directly - // to us. - case input.CommitmentTimeLock: - return input.ToLocalTimeoutWitnessSize, false, nil - - // Outgoing second layer HTLC's that have confirmed within the - // chain, and the output they produced is now mature enough to - // sweep. - case input.HtlcOfferedTimeoutSecondLevel: - return input.ToLocalTimeoutWitnessSize, false, nil - - // Incoming second layer HTLC's that have confirmed within the - // chain, and the output they produced is now mature enough to - // sweep. - case input.HtlcAcceptedSuccessSecondLevel: - return input.ToLocalTimeoutWitnessSize, false, nil - - // An HTLC on the commitment transaction of the remote party, - // that has had its absolute timelock expire. - case input.HtlcOfferedRemoteTimeout: - return input.AcceptedHtlcTimeoutWitnessSize, false, nil - - // An HTLC on the commitment transaction of the remote party, - // that can be swept with the preimage. - case input.HtlcAcceptedRemoteSuccess: - return input.OfferedHtlcSuccessWitnessSize, false, nil - - // A nested P2SH input that has a p2wkh witness script. We'll mark this - // as nested P2SH so the caller can estimate the weight properly - // including the sigScript. - case input.NestedWitnessKeyHash: - return input.P2WKHWitnessSize, true, nil - } + log.Infof("Creating sweep transaction %v for %v inputs (%s) "+ + "using %v sat/kw, tx_fee=%v", sweepTx.TxHash(), len(inputs), + inputTypeSummary(inputs), int64(feePerKw), txFee) - return 0, false, fmt.Errorf("unexpected witness type: %v", - inp.WitnessType()) + return sweepTx, nil } // getWeightEstimate returns a weight estimate for the given inputs. // Additionally, it returns counts for the number of csv and cltv inputs. -func getWeightEstimate(inputs []input.Input) ([]input.Input, int64, int, int) { +func getWeightEstimate(inputs []input.Input) ([]input.Input, int64) { // We initialize a weight estimator so we can accurately asses the // amount of fees we need to pay for this sweep transaction. // @@ -321,17 +231,12 @@ func getWeightEstimate(inputs []input.Input) ([]input.Input, int64, int, int) { // For each output, use its witness type to determine the estimate // weight of its witness, and add it to the proper set of spendable // outputs. - var ( - sweepInputs []input.Input - csvCount, cltvCount int - ) + var sweepInputs []input.Input for i := range inputs { inp := inputs[i] - // For fee estimation purposes, we'll now attempt to obtain an - // upper bound on the weight this input will add when fully - // populated. - size, isNestedP2SH, err := getInputWitnessSizeUpperBound(inp) + wt := inp.WitnessType() + err := wt.AddWeightEstimation(&weightEstimate) if err != nil { log.Warn(err) @@ -340,26 +245,29 @@ func getWeightEstimate(inputs []input.Input) ([]input.Input, int64, int, int) { continue } - // If this is a nested P2SH input, then we'll need to factor in - // the additional data push within the sigScript. - if isNestedP2SH { - weightEstimate.AddNestedP2WSHInput(size) - } else { - weightEstimate.AddWitnessInput(size) - } - - switch inp.WitnessType() { - case input.CommitmentTimeLock, - input.HtlcOfferedTimeoutSecondLevel, - input.HtlcAcceptedSuccessSecondLevel: - csvCount++ - case input.HtlcOfferedRemoteTimeout: - cltvCount++ - } sweepInputs = append(sweepInputs, inp) } - txWeight := int64(weightEstimate.Weight()) + return sweepInputs, int64(weightEstimate.Weight()) +} + +// inputSummary returns a string containing a human readable summary about the +// witness types of a list of inputs. +func inputTypeSummary(inputs []input.Input) string { + // Sort inputs by witness type. + sortedInputs := make([]input.Input, len(inputs)) + copy(sortedInputs, inputs) + sort.Slice(sortedInputs, func(i, j int) bool { + return sortedInputs[i].WitnessType().String() < + sortedInputs[j].WitnessType().String() + }) - return sweepInputs, txWeight, csvCount, cltvCount + var parts []string + for _, i := range sortedInputs { + part := fmt.Sprintf("%v (%v)", + *i.OutPoint(), i.WitnessType()) + + parts = append(parts, part) + } + return strings.Join(parts, ", ") } diff --git a/sweep/txgenerator_test.go b/sweep/txgenerator_test.go new file mode 100644 index 0000000000..575ae65aa3 --- /dev/null +++ b/sweep/txgenerator_test.go @@ -0,0 +1,52 @@ +package sweep + +import ( + "testing" + + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" + "github.com/lightningnetwork/lnd/input" +) + +var ( + witnessTypes = []input.WitnessType{ + input.CommitmentTimeLock, + input.HtlcAcceptedSuccessSecondLevel, + input.HtlcOfferedRemoteTimeout, + input.WitnessKeyHash, + } + expectedWeight = int64(1463) + expectedSummary = "0000000000000000000000000000000000000000000000000000000000000000:10 (CommitmentTimeLock), " + + "0000000000000000000000000000000000000000000000000000000000000001:11 (HtlcAcceptedSuccessSecondLevel), " + + "0000000000000000000000000000000000000000000000000000000000000002:12 (HtlcOfferedRemoteTimeout), " + + "0000000000000000000000000000000000000000000000000000000000000003:13 (WitnessKeyHash)" +) + +// TestWeightEstimate tests that the estimated weight and number of CSVs/CLTVs +// used is correct for a transaction that uses inputs with the witness types +// defined in witnessTypes. +func TestWeightEstimate(t *testing.T) { + t.Parallel() + + var inputs []input.Input + for i, witnessType := range witnessTypes { + inputs = append(inputs, input.NewBaseInput( + &wire.OutPoint{ + Hash: chainhash.Hash{byte(i)}, + Index: uint32(i) + 10, + }, witnessType, + &input.SignDescriptor{}, 0, + )) + } + + _, weight := getWeightEstimate(inputs) + if weight != expectedWeight { + t.Fatalf("unexpected weight. expected %d but got %d.", + expectedWeight, weight) + } + summary := inputTypeSummary(inputs) + if summary != expectedSummary { + t.Fatalf("unexpected summary. expected %s but got %s.", + expectedSummary, summary) + } +} diff --git a/sweep/walletsweep.go b/sweep/walletsweep.go index e21457788a..fb25ccf3a9 100644 --- a/sweep/walletsweep.go +++ b/sweep/walletsweep.go @@ -9,6 +9,7 @@ import ( "github.com/btcsuite/btcutil" "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/lnwallet" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" ) const ( @@ -27,7 +28,7 @@ type FeePreference struct { // FeeRate if non-zero, signals a fee pre fence expressed in the fee // rate expressed in sat/kw for a particular transaction. - FeeRate lnwallet.SatPerKWeight + FeeRate chainfee.SatPerKWeight } // String returns a human-readable string of the fee preference. @@ -42,8 +43,8 @@ func (p FeePreference) String() string { // an estimator, a confirmation target, and a manual value for sat/byte. A // value is chosen based on the two free parameters as one, or both of them can // be zero. -func DetermineFeePerKw(feeEstimator lnwallet.FeeEstimator, - feePref FeePreference) (lnwallet.SatPerKWeight, error) { +func DetermineFeePerKw(feeEstimator chainfee.Estimator, + feePref FeePreference) (chainfee.SatPerKWeight, error) { switch { // If both values are set, then we'll return an error as we require a @@ -70,12 +71,12 @@ func DetermineFeePerKw(feeEstimator lnwallet.FeeEstimator, // internally. case feePref.FeeRate != 0: feePerKW := feePref.FeeRate - if feePerKW < lnwallet.FeePerKwFloor { + if feePerKW < chainfee.FeePerKwFloor { log.Infof("Manual fee rate input of %d sat/kw is "+ "too low, using %d sat/kw instead", feePerKW, - lnwallet.FeePerKwFloor) + chainfee.FeePerKwFloor) - feePerKW = lnwallet.FeePerKwFloor + feePerKW = chainfee.FeePerKwFloor } return feePerKW, nil @@ -111,7 +112,7 @@ type UtxoSource interface { type CoinSelectionLocker interface { // WithCoinSelectLock will execute the passed function closure in a // synchronized manner preventing any coin selection operations from - // proceeding while the closure if executing. This can be seen as the + // proceeding while the closure is executing. This can be seen as the // ability to execute a function closure under an exclusive coin // selection lock. WithCoinSelectLock(func() error) error @@ -119,7 +120,7 @@ type CoinSelectionLocker interface { // OutpointLocker allows a caller to lock/unlock an outpoint. When locked, the // outpoints shouldn't be used for any sort of channel funding of coin -// selection. Locked outpoints are not expect to be persisted between restarts. +// selection. Locked outpoints are not expected to be persisted between restarts. type OutpointLocker interface { // LockOutpoint locks a target outpoint, rendering it unusable for coin // selection. @@ -152,10 +153,10 @@ type WalletSweepPackage struct { // by the delivery address. The sweep transaction will be crafted with the // target fee rate, and will use the utxoSource and outpointLocker as sources // for wallet funds. -func CraftSweepAllTx(feeRate lnwallet.SatPerKWeight, blockHeight uint32, +func CraftSweepAllTx(feeRate chainfee.SatPerKWeight, blockHeight uint32, deliveryAddr btcutil.Address, coinSelectLocker CoinSelectionLocker, utxoSource UtxoSource, outpointLocker OutpointLocker, - feeEstimator lnwallet.FeeEstimator, + feeEstimator chainfee.Estimator, signer input.Signer) (*WalletSweepPackage, error) { // TODO(roasbeef): turn off ATPL as well when available? diff --git a/sweep/walletsweep_test.go b/sweep/walletsweep_test.go index 0b96e00b9e..acd99c6a56 100644 --- a/sweep/walletsweep_test.go +++ b/sweep/walletsweep_test.go @@ -10,6 +10,7 @@ import ( "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcutil" "github.com/lightningnetwork/lnd/lnwallet" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" ) // TestDetermineFeePerKw tests that given a fee preference, the @@ -17,8 +18,8 @@ import ( func TestDetermineFeePerKw(t *testing.T) { t.Parallel() - defaultFee := lnwallet.SatPerKWeight(999) - relayFee := lnwallet.SatPerKWeight(300) + defaultFee := chainfee.SatPerKWeight(999) + relayFee := chainfee.SatPerKWeight(300) feeEstimator := newMockFeeEstimator(defaultFee, relayFee) @@ -35,7 +36,7 @@ func TestDetermineFeePerKw(t *testing.T) { // fee is the value the DetermineFeePerKw should return given // the FeePreference above - fee lnwallet.SatPerKWeight + fee chainfee.SatPerKWeight // fail determines if this test case should fail or not. fail bool @@ -43,9 +44,9 @@ func TestDetermineFeePerKw(t *testing.T) { // A fee rate below the fee rate floor should output the floor. { feePref: FeePreference{ - FeeRate: lnwallet.SatPerKWeight(99), + FeeRate: chainfee.SatPerKWeight(99), }, - fee: lnwallet.FeePerKwFloor, + fee: chainfee.FeePerKwFloor, }, // A fee rate above the floor, should pass through and return diff --git a/test_utils.go b/test_utils.go index 671bc0f46b..77b5700bdc 100644 --- a/test_utils.go +++ b/test_utils.go @@ -17,11 +17,13 @@ import ( "github.com/btcsuite/btcutil" "github.com/lightningnetwork/lnd/chainntnfs" "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/clock" "github.com/lightningnetwork/lnd/contractcourt" "github.com/lightningnetwork/lnd/htlcswitch" "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/keychain" "github.com/lightningnetwork/lnd/lnwallet" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/netann" "github.com/lightningnetwork/lnd/shachain" @@ -89,10 +91,16 @@ var ( } ) +// noUpdate is a function which can be used as a parameter in createTestPeer to +// call the setup code with no custom values on the channels set up. +var noUpdate = func(a, b *channeldb.OpenChannel) {} + // createTestPeer creates a channel between two nodes, and returns a peer for -// one of the nodes, together with the channel seen from both nodes. -func createTestPeer(notifier chainntnfs.ChainNotifier, - publTx chan *wire.MsgTx) (*peer, *lnwallet.LightningChannel, +// one of the nodes, together with the channel seen from both nodes. It takes +// an updateChan function which can be used to modify the default values on +// the channel states for each peer. +func createTestPeer(notifier chainntnfs.ChainNotifier, publTx chan *wire.MsgTx, + updateChan func(a, b *channeldb.OpenChannel)) (*peer, *lnwallet.LightningChannel, *lnwallet.LightningChannel, func(), error) { aliceKeyPriv, aliceKeyPub := btcec.PrivKeyFromBytes(btcec.S256(), @@ -188,7 +196,7 @@ func createTestPeer(notifier chainntnfs.ChainNotifier, aliceCommitTx, bobCommitTx, err := lnwallet.CreateCommitmentTxns( channelBal, channelBal, &aliceCfg, &bobCfg, aliceCommitPoint, - bobCommitPoint, *fundingTxIn, true, + bobCommitPoint, *fundingTxIn, channeldb.SingleFunderTweaklessBit, ) if err != nil { return nil, nil, nil, nil, err @@ -214,7 +222,7 @@ func createTestPeer(notifier chainntnfs.ChainNotifier, return nil, nil, nil, nil, err } - estimator := lnwallet.NewStaticFeeEstimator(12500, 0) + estimator := chainfee.NewStaticEstimator(12500, 0) feePerKw, err := estimator.EstimateFeePerKW(1) if err != nil { return nil, nil, nil, nil, err @@ -255,7 +263,7 @@ func createTestPeer(notifier chainntnfs.ChainNotifier, IdentityPub: aliceKeyPub, FundingOutpoint: *prevOut, ShortChannelID: shortChanID, - ChanType: channeldb.SingleFunderTweakless, + ChanType: channeldb.SingleFunderTweaklessBit, IsInitiator: true, Capacity: channelCapacity, RemoteCurrentRevocation: bobCommitPoint, @@ -272,7 +280,7 @@ func createTestPeer(notifier chainntnfs.ChainNotifier, RemoteChanCfg: aliceCfg, IdentityPub: bobKeyPub, FundingOutpoint: *prevOut, - ChanType: channeldb.SingleFunderTweakless, + ChanType: channeldb.SingleFunderTweaklessBit, IsInitiator: false, Capacity: channelCapacity, RemoteCurrentRevocation: aliceCommitPoint, @@ -284,6 +292,9 @@ func createTestPeer(notifier chainntnfs.ChainNotifier, Packager: channeldb.NewChannelPackager(shortChanID), } + // Set custom values on the channel states. + updateChan(aliceChannelState, bobChannelState) + aliceAddr := &net.TCPAddr{ IP: net.ParseIP("127.0.0.1"), Port: 18555, @@ -350,6 +361,12 @@ func createTestPeer(notifier chainntnfs.ChainNotifier, contractcourt.ChainArbitratorConfig{ Notifier: notifier, ChainIO: chainIO, + IsForwardedHTLC: func(chanID lnwire.ShortChannelID, + htlcIndex uint64) bool { + + return true + }, + Clock: clock.NewDefaultClock(), }, dbAlice, ) chainArb.WatchNewChannel(aliceChannelState) diff --git a/tlv/bench_test.go b/tlv/bench_test.go index f71a7eb61e..132ef784f3 100644 --- a/tlv/bench_test.go +++ b/tlv/bench_test.go @@ -6,7 +6,7 @@ import ( "io/ioutil" "testing" - "github.com/lightningnetwork/lnd/lnwallet" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" "github.com/lightningnetwork/lnd/tlv" "github.com/lightningnetwork/lnd/watchtower/blob" "github.com/lightningnetwork/lnd/watchtower/wtwire" @@ -19,7 +19,7 @@ type CreateSessionTLV struct { MaxUpdates uint16 RewardBase uint32 RewardRate uint32 - SweepFeeRate lnwallet.SatPerKWeight + SweepFeeRate chainfee.SatPerKWeight tlvStream *tlv.Stream } @@ -48,24 +48,24 @@ func DBlobType(r io.Reader, val interface{}, buf *[8]byte, l uint64) error { // ESatPerKW is an encoder for lnwallet.SatPerKWeight. func ESatPerKW(w io.Writer, val interface{}, buf *[8]byte) error { - if v, ok := val.(*lnwallet.SatPerKWeight); ok { + if v, ok := val.(*chainfee.SatPerKWeight); ok { return tlv.EUint64(w, uint64(*v), buf) } - return tlv.NewTypeForEncodingErr(val, "lnwallet.SatPerKWeight") + return tlv.NewTypeForEncodingErr(val, "chainfee.SatPerKWeight") } // DSatPerKW is an decoder for lnwallet.SatPerKWeight. func DSatPerKW(r io.Reader, val interface{}, buf *[8]byte, l uint64) error { - if v, ok := val.(*lnwallet.SatPerKWeight); ok { + if v, ok := val.(*chainfee.SatPerKWeight); ok { var sat uint64 err := tlv.DUint64(r, &sat, buf, l) if err != nil { return err } - *v = lnwallet.SatPerKWeight(sat) + *v = chainfee.SatPerKWeight(sat) return nil } - return tlv.NewTypeForDecodingErr(val, "lnwallet.SatPerKWeight", l, 8) + return tlv.NewTypeForDecodingErr(val, "chainfee.SatPerKWeight", l, 8) } // NewCreateSessionTLV initializes a new CreateSessionTLV message. diff --git a/tlv/record.go b/tlv/record.go index 75647e03c7..3807095687 100644 --- a/tlv/record.go +++ b/tlv/record.go @@ -12,8 +12,10 @@ import ( // Type is an 64-bit identifier for a TLV Record. type Type uint64 -// TypeSet is an unordered set of Types. -type TypeSet map[Type]struct{} +// TypeMap is a map of parsed Types. The map values are byte slices. If the byte +// slice is nil, the type was successfully parsed. Otherwise the value is byte +// slice containing the encoded data. +type TypeMap map[Type][]byte // Encoder is a signature for methods that can encode TLV values. An error // should be returned if the Encoder cannot support the underlying type of val. @@ -43,6 +45,14 @@ func SizeVarBytes(e *[]byte) SizeFunc { } } +// RecorderProducer is an interface for objects that can produce a Record object +// capable of encoding and/or decoding the RecordProducer as a Record. +type RecordProducer interface { + // Record returns a Record that can be used to encode or decode the + // backing object. + Record() Record +} + // Record holds the required information to encode or decode a TLV record. type Record struct { value interface{} @@ -77,6 +87,14 @@ func (f *Record) Encode(w io.Writer) error { return f.encoder(w, f.value, &b) } +// Decode read in the TLV record from the passed reader. This is useful when a +// caller wants decode a *single* TLV record, outside the context of the Stream +// struct. +func (f *Record) Decode(r io.Reader, l uint64) error { + var b [8]byte + return f.decoder(r, f.value, &b, l) +} + // MakePrimitiveRecord creates a record for common types. func MakePrimitiveRecord(typ Type, val interface{}) Record { var ( @@ -202,7 +220,7 @@ func StubEncoder(v []byte) Encoder { // MapToRecords encodes the passed TLV map as a series of regular tlv.Record // instances. The resulting set of records will be returned in sorted order by // their type. -func MapToRecords(tlvMap map[uint64][]byte) ([]Record, error) { +func MapToRecords(tlvMap map[uint64][]byte) []Record { records := make([]Record, 0, len(tlvMap)) for k, v := range tlvMap { // We don't pass in a decoder here since we don't actually know @@ -217,7 +235,7 @@ func MapToRecords(tlvMap map[uint64][]byte) ([]Record, error) { SortRecords(records) - return records, nil + return records } // SortRecords is a helper function that will sort a slice of records in place diff --git a/tlv/record_test.go b/tlv/record_test.go index 02d2e89311..d1f1450150 100644 --- a/tlv/record_test.go +++ b/tlv/record_test.go @@ -114,10 +114,7 @@ func TestRecordMapTransformation(t *testing.T) { spew.Sdump(mappedRecords)) } - unmappedRecords, err := MapToRecords(mappedRecords) - if err != nil { - t.Fatalf("#%v: unable to unmap records: %v", i, err) - } + unmappedRecords := MapToRecords(mappedRecords) for i := 0; i < len(testCase.records); i++ { if unmappedRecords[i].Type() != testCase.records[i].Type() { diff --git a/tlv/stream.go b/tlv/stream.go index 60b409199b..4a8eb722fe 100644 --- a/tlv/stream.go +++ b/tlv/stream.go @@ -1,8 +1,8 @@ package tlv import ( + "bytes" "errors" - "fmt" "io" "io/ioutil" "math" @@ -22,15 +22,6 @@ var ErrStreamNotCanonical = errors.New("tlv stream is not canonical") // long to parse. var ErrRecordTooLarge = errors.New("record is too large") -// ErrUnknownRequiredType is an error returned when decoding an unknown and even -// type from a Stream. -type ErrUnknownRequiredType Type - -// Error returns a human-readable description of unknown required type. -func (t ErrUnknownRequiredType) Error() string { - return fmt.Sprintf("unknown required type: %d", t) -} - // Stream defines a TLV stream that can be used for encoding or decoding a set // of TLV Records. type Stream struct { @@ -149,16 +140,16 @@ func (s *Stream) Decode(r io.Reader) error { } // DecodeWithParsedTypes is identical to Decode, but if successful, returns a -// TypeSet containing the types of all records that were decoded or ignored from +// TypeMap containing the types of all records that were decoded or ignored from // the stream. -func (s *Stream) DecodeWithParsedTypes(r io.Reader) (TypeSet, error) { - return s.decode(r, make(TypeSet)) +func (s *Stream) DecodeWithParsedTypes(r io.Reader) (TypeMap, error) { + return s.decode(r, make(TypeMap)) } // decode is a helper function that performs the basis of stream decoding. If // the caller needs the set of parsed types, it must provide an initialized -// parsedTypes, otherwise the returned TypeSet will be nil. -func (s *Stream) decode(r io.Reader, parsedTypes TypeSet) (TypeSet, error) { +// parsedTypes, otherwise the returned TypeMap will be nil. +func (s *Stream) decode(r io.Reader, parsedTypes TypeMap) (TypeMap, error) { var ( typ Type min Type @@ -240,15 +231,25 @@ func (s *Stream) decode(r io.Reader, parsedTypes TypeSet) (TypeSet, error) { return nil, err } - // This record type is unknown to the stream, fail if the type - // is even meaning that we are required to understand it. - case typ%2 == 0: - return nil, ErrUnknownRequiredType(typ) + // Record the successfully decoded type if the caller + // provided an initialized TypeMap. + if parsedTypes != nil { + parsedTypes[typ] = nil + } // Otherwise, the record type is unknown and is odd, discard the // number of bytes specified by length. default: - _, err := io.CopyN(ioutil.Discard, r, int64(length)) + // If the caller provided an initialized TypeMap, record + // the encoded bytes. + var b *bytes.Buffer + writer := ioutil.Discard + if parsedTypes != nil { + b = bytes.NewBuffer(make([]byte, 0, length)) + writer = b + } + + _, err := io.CopyN(writer, r, int64(length)) switch { // We'll convert any EOFs to ErrUnexpectedEOF, since this @@ -260,12 +261,10 @@ func (s *Stream) decode(r io.Reader, parsedTypes TypeSet) (TypeSet, error) { case err != nil: return nil, err } - } - // Record the successfully decoded or ignored type if the - // caller provided an initialized TypeSet. - if parsedTypes != nil { - parsedTypes[typ] = struct{}{} + if parsedTypes != nil { + parsedTypes[typ] = b.Bytes() + } } // Update our record index so that we can begin our next search diff --git a/tlv/stream_test.go b/tlv/stream_test.go index e9970e7446..8f67a316f2 100644 --- a/tlv/stream_test.go +++ b/tlv/stream_test.go @@ -2,50 +2,89 @@ package tlv_test import ( "bytes" + "reflect" "testing" "github.com/lightningnetwork/lnd/tlv" ) +type parsedTypeTest struct { + name string + encode []tlv.Type + decode []tlv.Type + expParsedTypes tlv.TypeMap +} + // TestParsedTypes asserts that a Stream will properly return the set of types // that it encounters when the type is known-and-decoded or unknown-and-ignored. func TestParsedTypes(t *testing.T) { const ( - knownType = 1 - unknownType = 3 + knownType = 1 + unknownType = 3 + secondKnownType = 4 ) - // Construct a stream that will encode two types, one that will be known - // to the decoder and another that will be unknown. - encStream := tlv.MustNewStream( - tlv.MakePrimitiveRecord(knownType, new(uint64)), - tlv.MakePrimitiveRecord(unknownType, new(uint64)), - ) + tests := []parsedTypeTest{ + { + name: "known and unknown", + encode: []tlv.Type{knownType, unknownType}, + decode: []tlv.Type{knownType}, + expParsedTypes: tlv.TypeMap{ + unknownType: []byte{0, 0, 0, 0, 0, 0, 0, 0}, + knownType: nil, + }, + }, + { + name: "known and missing known", + encode: []tlv.Type{knownType}, + decode: []tlv.Type{knownType, secondKnownType}, + expParsedTypes: tlv.TypeMap{ + knownType: nil, + }, + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + testParsedTypes(t, test) + }) + } +} + +func testParsedTypes(t *testing.T, test parsedTypeTest) { + encRecords := make([]tlv.Record, 0, len(test.encode)) + for _, typ := range test.encode { + encRecords = append( + encRecords, tlv.MakePrimitiveRecord(typ, new(uint64)), + ) + } + + decRecords := make([]tlv.Record, 0, len(test.decode)) + for _, typ := range test.decode { + decRecords = append( + decRecords, tlv.MakePrimitiveRecord(typ, new(uint64)), + ) + } + + // Construct a stream that will encode the test's set of types. + encStream := tlv.MustNewStream(encRecords...) var b bytes.Buffer if err := encStream.Encode(&b); err != nil { t.Fatalf("unable to encode stream: %v", err) } - // Create a stream that will parse only the known type. - decStream := tlv.MustNewStream( - tlv.MakePrimitiveRecord(knownType, new(uint64)), - ) + // Create a stream that will parse a subset of the test's types. + decStream := tlv.MustNewStream(decRecords...) parsedTypes, err := decStream.DecodeWithParsedTypes( bytes.NewReader(b.Bytes()), ) if err != nil { - t.Fatalf("unable to decode stream: %v", err) - } - - // Assert that both the known and unknown types are included in the set - // of parsed types. - if _, ok := parsedTypes[knownType]; !ok { - t.Fatalf("known type %d should be in parsed types", knownType) + t.Fatalf("error decoding: %v", err) } - if _, ok := parsedTypes[unknownType]; !ok { - t.Fatalf("unknown type %d should be in parsed types", - unknownType) + if !reflect.DeepEqual(parsedTypes, test.expParsedTypes) { + t.Fatalf("error mismatch on parsed types") } } diff --git a/tlv/tlv_test.go b/tlv/tlv_test.go index 13ef24685c..e5d4734139 100644 --- a/tlv/tlv_test.go +++ b/tlv/tlv_test.go @@ -203,26 +203,6 @@ var tlvDecodingFailureTests = []struct { }, expErr: io.ErrUnexpectedEOF, }, - { - name: "unknown even type", - bytes: []byte{0x12, 0x00}, - expErr: tlv.ErrUnknownRequiredType(0x12), - }, - { - name: "unknown even type", - bytes: []byte{0xfd, 0x01, 0x02, 0x00}, - expErr: tlv.ErrUnknownRequiredType(0x102), - }, - { - name: "unknown even type", - bytes: []byte{0xfe, 0x01, 0x00, 0x00, 0x02, 0x00}, - expErr: tlv.ErrUnknownRequiredType(0x01000002), - }, - { - name: "unknown even type", - bytes: []byte{0xff, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00}, - expErr: tlv.ErrUnknownRequiredType(0x0100000000000002), - }, { name: "greater than encoding length for n1's amt", bytes: []byte{0x01, 0x09, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, @@ -340,12 +320,6 @@ var tlvDecodingFailureTests = []struct { expErr: tlv.NewTypeForDecodingErr(new(nodeAmts), "nodeAmts", 50, 49), skipN2: true, }, - { - name: "unknown required type or n1", - bytes: []byte{0x00, 0x00}, - expErr: tlv.ErrUnknownRequiredType(0x00), - skipN2: true, - }, { name: "less than encoding length for n1's cltvDelta", bytes: []byte{0xfd, 0x00, 0x0fe, 0x00}, @@ -364,12 +338,6 @@ var tlvDecodingFailureTests = []struct { expErr: tlv.NewTypeForDecodingErr(new(uint16), "uint16", 3, 2), skipN2: true, }, - { - name: "unknown even field for n1's namespace", - bytes: []byte{0x0a, 0x00}, - expErr: tlv.ErrUnknownRequiredType(0x0a), - skipN2: true, - }, { name: "valid records but invalid ordering", bytes: []byte{0x02, 0x08, diff --git a/tlv/truncated.go b/tlv/truncated.go index 8ed9cb0d45..930a2ccebf 100644 --- a/tlv/truncated.go +++ b/tlv/truncated.go @@ -40,6 +40,15 @@ func ETUint16(w io.Writer, val interface{}, buf *[8]byte) error { return NewTypeForEncodingErr(val, "uint16") } +// ETUint16T is an Encoder for truncated uint16 values, where leading zeros will +// be omitted. An error is returned if val is not a *uint16. +func ETUint16T(w io.Writer, val uint16, buf *[8]byte) error { + binary.BigEndian.PutUint16(buf[:2], val) + numZeros := numLeadingZeroBytes16(val) + _, err := w.Write(buf[numZeros:2]) + return err +} + // DTUint16 is an Decoder for truncated uint16 values, where leading zeros will // be resurrected. An error is returned if val is not a *uint16. func DTUint16(r io.Reader, val interface{}, buf *[8]byte, l uint64) error { @@ -92,6 +101,15 @@ func ETUint32(w io.Writer, val interface{}, buf *[8]byte) error { return NewTypeForEncodingErr(val, "uint32") } +// ETUint32T is an Encoder for truncated uint32 values, where leading zeros will +// be omitted. An error is returned if val is not a *uint32. +func ETUint32T(w io.Writer, val uint32, buf *[8]byte) error { + binary.BigEndian.PutUint32(buf[:4], val) + numZeros := numLeadingZeroBytes32(val) + _, err := w.Write(buf[numZeros:4]) + return err +} + // DTUint32 is an Decoder for truncated uint32 values, where leading zeros will // be resurrected. An error is returned if val is not a *uint32. func DTUint32(r io.Reader, val interface{}, buf *[8]byte, l uint64) error { @@ -154,6 +172,15 @@ func ETUint64(w io.Writer, val interface{}, buf *[8]byte) error { return NewTypeForEncodingErr(val, "uint64") } +// ETUint64T is an Encoder for truncated uint64 values, where leading zeros will +// be omitted. An error is returned if val is not a *uint64. +func ETUint64T(w io.Writer, val uint64, buf *[8]byte) error { + binary.BigEndian.PutUint64(buf[:], val) + numZeros := numLeadingZeroBytes64(val) + _, err := w.Write(buf[numZeros:]) + return err +} + // DTUint64 is an Decoder for truncated uint64 values, where leading zeros will // be resurrected. An error is returned if val is not a *uint64. func DTUint64(r io.Reader, val interface{}, buf *[8]byte, l uint64) error { diff --git a/tlv/truncated_test.go b/tlv/truncated_test.go index d2a34562e7..eb0f83a75f 100644 --- a/tlv/truncated_test.go +++ b/tlv/truncated_test.go @@ -60,6 +60,8 @@ func TestSizeTUint16(t *testing.T) { func TestTUint16(t *testing.T) { var buf [8]byte for _, test := range tuint16Tests { + test := test + if len(test.bytes) != int(test.size) { t.Fatalf("invalid test case, "+ "len(bytes)[%d] != size[%d]", @@ -68,6 +70,7 @@ func TestTUint16(t *testing.T) { name := fmt.Sprintf("0x%x", test.value) t.Run(name, func(t *testing.T) { + // Test generic encoder. var b bytes.Buffer err := tlv.ETUint16(&b, &test.value, &buf) if err != nil { @@ -80,6 +83,19 @@ func TestTUint16(t *testing.T) { test.bytes, b.Bytes()) } + // Test non-generic encoder. + var b2 bytes.Buffer + err = tlv.ETUint16T(&b2, test.value, &buf) + if err != nil { + t.Fatalf("unable to encode tuint16: %v", err) + } + + if !bytes.Equal(b2.Bytes(), test.bytes) { + t.Fatalf("encoding mismatch, "+ + "expected: %x, got: %x", + test.bytes, b2.Bytes()) + } + var value uint16 r := bytes.NewReader(b.Bytes()) err = tlv.DTUint16(r, &value, &buf, test.size) @@ -168,6 +184,8 @@ func TestSizeTUint32(t *testing.T) { func TestTUint32(t *testing.T) { var buf [8]byte for _, test := range tuint32Tests { + test := test + if len(test.bytes) != int(test.size) { t.Fatalf("invalid test case, "+ "len(bytes)[%d] != size[%d]", @@ -176,6 +194,7 @@ func TestTUint32(t *testing.T) { name := fmt.Sprintf("0x%x", test.value) t.Run(name, func(t *testing.T) { + // Test generic encoder. var b bytes.Buffer err := tlv.ETUint32(&b, &test.value, &buf) if err != nil { @@ -188,6 +207,19 @@ func TestTUint32(t *testing.T) { test.bytes, b.Bytes()) } + // Test non-generic encoder. + var b2 bytes.Buffer + err = tlv.ETUint32T(&b2, test.value, &buf) + if err != nil { + t.Fatalf("unable to encode tuint32: %v", err) + } + + if !bytes.Equal(b2.Bytes(), test.bytes) { + t.Fatalf("encoding mismatch, "+ + "expected: %x, got: %x", + test.bytes, b2.Bytes()) + } + var value uint32 r := bytes.NewReader(b.Bytes()) err = tlv.DTUint32(r, &value, &buf, test.size) @@ -322,6 +354,8 @@ func TestSizeTUint64(t *testing.T) { func TestTUint64(t *testing.T) { var buf [8]byte for _, test := range tuint64Tests { + test := test + if len(test.bytes) != int(test.size) { t.Fatalf("invalid test case, "+ "len(bytes)[%d] != size[%d]", @@ -330,6 +364,7 @@ func TestTUint64(t *testing.T) { name := fmt.Sprintf("0x%x", test.value) t.Run(name, func(t *testing.T) { + // Test generic encoder. var b bytes.Buffer err := tlv.ETUint64(&b, &test.value, &buf) if err != nil { @@ -342,6 +377,19 @@ func TestTUint64(t *testing.T) { test.bytes, b.Bytes()) } + // Test non-generic encoder. + var b2 bytes.Buffer + err = tlv.ETUint64T(&b2, test.value, &buf) + if err != nil { + t.Fatalf("unable to encode tuint64: %v", err) + } + + if !bytes.Equal(b2.Bytes(), test.bytes) { + t.Fatalf("encoding mismatch, "+ + "expected: %x, got: %x", + test.bytes, b2.Bytes()) + } + var value uint64 r := bytes.NewReader(b.Bytes()) err = tlv.DTUint64(r, &value, &buf, test.size) diff --git a/tlv/varint.go b/tlv/varint.go index 3888bfcb47..38c7a7cd69 100644 --- a/tlv/varint.go +++ b/tlv/varint.go @@ -4,6 +4,8 @@ import ( "encoding/binary" "errors" "io" + + "github.com/btcsuite/btcd/wire" ) // ErrVarIntNotCanonical signals that the decoded varint was not minimally encoded. @@ -107,3 +109,8 @@ func WriteVarInt(w io.Writer, val uint64, buf *[8]byte) error { _, err := w.Write(buf[:length]) return err } + +// VarIntSize returns the required number of bytes to encode a var int. +func VarIntSize(val uint64) uint64 { + return uint64(wire.VarIntSerializeSize(val)) +} diff --git a/tor/README.md b/tor/README.md index 337f0159a4..49590c9a7d 100644 --- a/tor/README.md +++ b/tor/README.md @@ -8,8 +8,8 @@ Tor daemon. So far, supported functions include: * Routing DNS queries over Tor (A, AAAA, SRV). * Limited Tor Control functionality (synchronous messages only). So far, this includes: - * Support for SAFECOOKIE authentication only as a sane default. - * Creating v2 onion services. + * Support for SAFECOOKIE, HASHEDPASSWORD, and NULL authentication methods. + * Creating v2 and v3 onion services. In the future, the Tor Control functionality will be extended to support v3 onion services, asynchronous messages, etc. diff --git a/tor/add_onion.go b/tor/add_onion.go new file mode 100644 index 0000000000..aca433ad40 --- /dev/null +++ b/tor/add_onion.go @@ -0,0 +1,217 @@ +package tor + +import ( + "errors" + "fmt" + "io/ioutil" + "os" +) + +var ( + // ErrNoPrivateKey is an error returned by the OnionStore.PrivateKey + // method when a private key hasn't yet been stored. + ErrNoPrivateKey = errors.New("private key not found") +) + +// OnionType denotes the type of the onion service. +type OnionType int + +const ( + // V2 denotes that the onion service is V2. + V2 OnionType = iota + + // V3 denotes that the onion service is V3. + V3 +) + +// OnionStore is a store containing information about a particular onion +// service. +type OnionStore interface { + // StorePrivateKey stores the private key according to the + // implementation of the OnionStore interface. + StorePrivateKey(OnionType, []byte) error + + // PrivateKey retrieves a stored private key. If it is not found, then + // ErrNoPrivateKey should be returned. + PrivateKey(OnionType) ([]byte, error) + + // DeletePrivateKey securely removes the private key from the store. + DeletePrivateKey(OnionType) error +} + +// OnionFile is a file-based implementation of the OnionStore interface that +// stores an onion service's private key. +type OnionFile struct { + privateKeyPath string + privateKeyPerm os.FileMode +} + +// A compile-time constraint to ensure OnionFile satisfies the OnionStore +// interface. +var _ OnionStore = (*OnionFile)(nil) + +// NewOnionFile creates a file-based implementation of the OnionStore interface +// to store an onion service's private key. +func NewOnionFile(privateKeyPath string, privateKeyPerm os.FileMode) *OnionFile { + return &OnionFile{ + privateKeyPath: privateKeyPath, + privateKeyPerm: privateKeyPerm, + } +} + +// StorePrivateKey stores the private key at its expected path. +func (f *OnionFile) StorePrivateKey(_ OnionType, privateKey []byte) error { + return ioutil.WriteFile(f.privateKeyPath, privateKey, f.privateKeyPerm) +} + +// PrivateKey retrieves the private key from its expected path. If the file does +// not exist, then ErrNoPrivateKey is returned. +func (f *OnionFile) PrivateKey(_ OnionType) ([]byte, error) { + if _, err := os.Stat(f.privateKeyPath); os.IsNotExist(err) { + return nil, ErrNoPrivateKey + } + return ioutil.ReadFile(f.privateKeyPath) +} + +// DeletePrivateKey removes the file containing the private key. +func (f *OnionFile) DeletePrivateKey(_ OnionType) error { + return os.Remove(f.privateKeyPath) +} + +// AddOnionConfig houses all of the required parameters in order to successfully +// create a new onion service or restore an existing one. +type AddOnionConfig struct { + // Type denotes the type of the onion service that should be created. + Type OnionType + + // VirtualPort is the externally reachable port of the onion address. + VirtualPort int + + // TargetPorts is the set of ports that the service will be listening on + // locally. The Tor server will use choose a random port from this set + // to forward the traffic from the virtual port. + // + // NOTE: If nil/empty, the virtual port will be used as the only target + // port. + TargetPorts []int + + // Store is responsible for storing all onion service related + // information. + // + // NOTE: If not specified, then nothing will be stored, making onion + // services unrecoverable after shutdown. + Store OnionStore +} + +// AddOnion creates an onion service and returns its onion address. Once +// created, the new onion service will remain active until the connection +// between the controller and the Tor server is closed. +func (c *Controller) AddOnion(cfg AddOnionConfig) (*OnionAddr, error) { + // Before sending the request to create an onion service to the Tor + // server, we'll make sure that it supports V3 onion services if that + // was the type requested. + if cfg.Type == V3 { + if err := supportsV3(c.version); err != nil { + return nil, err + } + } + + // We'll start off by checking if the store contains an existing private + // key. If it does not, then we should request the server to create a + // new onion service and return its private key. Otherwise, we'll + // request the server to recreate the onion server from our private key. + var keyParam string + switch cfg.Type { + case V2: + keyParam = "NEW:RSA1024" + case V3: + keyParam = "NEW:ED25519-V3" + } + + if cfg.Store != nil { + privateKey, err := cfg.Store.PrivateKey(cfg.Type) + switch err { + // Proceed to request a new onion service. + case ErrNoPrivateKey: + + // Recover the onion service with the private key found. + case nil: + keyParam = string(privateKey) + + default: + return nil, err + } + } + + // Now, we'll create a mapping from the virtual port to each target + // port. If no target ports were specified, we'll use the virtual port + // to provide a one-to-one mapping. + var portParam string + + // Helper function which appends the correct Port param depending on + // whether the user chose to use a custom target IP address or not. + pushPortParam := func(targetPort int) { + if c.targetIPAddress == "" { + portParam += fmt.Sprintf("Port=%d,%d ", cfg.VirtualPort, + targetPort) + } else { + portParam += fmt.Sprintf("Port=%d,%s:%d ", cfg.VirtualPort, + c.targetIPAddress, targetPort) + } + } + + if len(cfg.TargetPorts) == 0 { + pushPortParam(cfg.VirtualPort) + } else { + for _, targetPort := range cfg.TargetPorts { + pushPortParam(targetPort) + } + } + + // Send the command to create the onion service to the Tor server and + // await its response. + cmd := fmt.Sprintf("ADD_ONION %s %s", keyParam, portParam) + _, reply, err := c.sendCommand(cmd) + if err != nil { + return nil, err + } + + // If successful, the reply from the server should be of the following + // format, depending on whether a private key has been requested: + // + // C: ADD_ONION RSA1024:[Blob Redacted] Port=80,8080 + // S: 250-ServiceID=testonion1234567 + // S: 250 OK + // + // C: ADD_ONION NEW:RSA1024 Port=80,8080 + // S: 250-ServiceID=testonion1234567 + // S: 250-PrivateKey=RSA1024:[Blob Redacted] + // S: 250 OK + // + // We're interested in retrieving the service ID, which is the public + // name of the service, and the private key if requested. + replyParams := parseTorReply(reply) + serviceID, ok := replyParams["ServiceID"] + if !ok { + return nil, errors.New("service id not found in reply") + } + + // If a new onion service was created and an onion store was provided, + // we'll store its private key to disk in the event that it needs to be + // recreated later on. + if privateKey, ok := replyParams["PrivateKey"]; cfg.Store != nil && ok { + err := cfg.Store.StorePrivateKey(cfg.Type, []byte(privateKey)) + if err != nil { + return nil, fmt.Errorf("unable to write private key "+ + "to file: %v", err) + } + } + + // Finally, we'll return the onion address composed of the service ID, + // along with the onion suffix, and the port this onion service can be + // reached at externally. + return &OnionAddr{ + OnionService: serviceID + ".onion", + Port: cfg.VirtualPort, + }, nil +} diff --git a/tor/add_onion_test.go b/tor/add_onion_test.go new file mode 100644 index 0000000000..9cd255d323 --- /dev/null +++ b/tor/add_onion_test.go @@ -0,0 +1,51 @@ +package tor + +import ( + "bytes" + "io/ioutil" + "path/filepath" + "testing" +) + +// TestOnionFile tests that the OnionFile implementation of the OnionStore +// interface behaves as expected. +func TestOnionFile(t *testing.T) { + t.Parallel() + + tempDir, err := ioutil.TempDir("", "onion_store") + if err != nil { + t.Fatalf("unable to create temp dir: %v", err) + } + + privateKey := []byte("hide_me_plz") + privateKeyPath := filepath.Join(tempDir, "secret") + + // Create a new file-based onion store. A private key should not exist + // yet. + onionFile := NewOnionFile(privateKeyPath, 0600) + if _, err := onionFile.PrivateKey(V2); err != ErrNoPrivateKey { + t.Fatalf("expected ErrNoPrivateKey, got \"%v\"", err) + } + + // Store the private key and ensure what's stored matches. + if err := onionFile.StorePrivateKey(V2, privateKey); err != nil { + t.Fatalf("unable to store private key: %v", err) + } + storePrivateKey, err := onionFile.PrivateKey(V2) + if err != nil { + t.Fatalf("unable to retrieve private key: %v", err) + } + if !bytes.Equal(storePrivateKey, privateKey) { + t.Fatalf("expected private key \"%v\", got \"%v\"", + string(privateKey), string(storePrivateKey)) + } + + // Finally, delete the private key. We should no longer be able to + // retrieve it. + if err := onionFile.DeletePrivateKey(V2); err != nil { + t.Fatalf("unable to delete private key: %v", err) + } + if _, err := onionFile.PrivateKey(V2); err != ErrNoPrivateKey { + t.Fatal("found deleted private key") + } +} diff --git a/tor/controller.go b/tor/controller.go index b96db967a8..f828b63979 100644 --- a/tor/controller.go +++ b/tor/controller.go @@ -10,7 +10,6 @@ import ( "fmt" "io/ioutil" "net/textproto" - "os" "strconv" "strings" "sync/atomic" @@ -36,6 +35,16 @@ const ( // must be running on. This is needed in order to create v3 onion // services through Tor's control port. MinTorVersion = "0.3.3.6" + + // authSafeCookie is the name of the SAFECOOKIE authentication method. + authSafeCookie = "SAFECOOKIE" + + // authHashedPassword is the name of the HASHEDPASSWORD authentication + // method. + authHashedPassword = "HASHEDPASSWORD" + + // authNull is the name of the NULL authentication method. + authNull = "NULL" ) var ( @@ -79,14 +88,30 @@ type Controller struct { // controller connections on. controlAddr string + // password, if non-empty, signals that the controller should attempt to + // authenticate itself with the backing Tor daemon through the + // HASHEDPASSWORD authentication method with this value. + password string + // version is the current version of the Tor server. version string + + // targetIPAddress is the IP address which we tell the Tor server to use + // to connect to the LND node. This is required when the Tor server + // runs on another host, otherwise the service will not be reachable. + targetIPAddress string } // NewController returns a new Tor controller that will be able to interact with // a Tor server. -func NewController(controlAddr string) *Controller { - return &Controller{controlAddr: controlAddr} +func NewController(controlAddr string, targetIPAddress string, + password string) *Controller { + + return &Controller{ + controlAddr: controlAddr, + targetIPAddress: targetIPAddress, + password: password, + } } // Start establishes and authenticates the connection between the controller and @@ -163,26 +188,74 @@ func parseTorReply(reply string) map[string]string { } // authenticate authenticates the connection between the controller and the -// Tor server using the SAFECOOKIE or NULL authentication method. +// Tor server using either of the following supported authentication methods +// depending on its configuration: SAFECOOKIE, HASHEDPASSWORD, and NULL. func (c *Controller) authenticate() error { + protocolInfo, err := c.protocolInfo() + if err != nil { + return err + } + + // With the version retrieved, we'll cache it now in case it needs to be + // used later on. + c.version = protocolInfo.version() + + switch { + // If a password was provided, then we should attempt to use the + // HASHEDPASSWORD authentication method. + case c.password != "": + if !protocolInfo.supportsAuthMethod(authHashedPassword) { + return fmt.Errorf("%v authentication method not "+ + "supported", authHashedPassword) + } + + return c.authenticateViaHashedPassword() + + // Otherwise, attempt to authentication via the SAFECOOKIE method as it + // provides the most security. + case protocolInfo.supportsAuthMethod(authSafeCookie): + return c.authenticateViaSafeCookie(protocolInfo) + + // Fallback to the NULL method if any others aren't supported. + case protocolInfo.supportsAuthMethod(authNull): + return c.authenticateViaNull() + + // No supported authentication methods, fail. + default: + return errors.New("the Tor server must be configured with " + + "NULL, SAFECOOKIE, or HASHEDPASSWORD authentication") + } +} + +// authenticateViaNull authenticates the controller with the Tor server using +// the NULL authentication method. +func (c *Controller) authenticateViaNull() error { + _, _, err := c.sendCommand("AUTHENTICATE") + return err +} + +// authenticateViaHashedPassword authenticates the controller with the Tor +// server using the HASHEDPASSWORD authentication method. +func (c *Controller) authenticateViaHashedPassword() error { + cmd := fmt.Sprintf("AUTHENTICATE \"%s\"", c.password) + _, _, err := c.sendCommand(cmd) + return err +} + +// authenticateViaSafeCookie authenticates the controller with the Tor server +// using the SAFECOOKIE authentication method. +func (c *Controller) authenticateViaSafeCookie(info protocolInfo) error { // Before proceeding to authenticate the connection, we'll retrieve // the authentication cookie of the Tor server. This will be used // throughout the authentication routine. We do this before as once the // authentication routine has begun, it is not possible to retrieve it // mid-way. - cookie, err := c.getAuthCookie() + cookie, err := c.getAuthCookie(info) if err != nil { return fmt.Errorf("unable to retrieve authentication cookie: "+ "%v", err) } - // If cookie is empty and there's no error, we have a NULL - // authentication method that we should use instead. - if len(cookie) == 0 { - _, _, err := c.sendCommand("AUTHENTICATE") - return err - } - // Authenticating using the SAFECOOKIE authentication method is a two // step process. We'll kick off the authentication routine by sending // the AUTHCHALLENGE command followed by a hex-encoded 32-byte nonce. @@ -267,36 +340,15 @@ func (c *Controller) authenticate() error { } // getAuthCookie retrieves the authentication cookie in bytes from the Tor -// server. Cookie authentication must be enabled for this to work. The boolean -func (c *Controller) getAuthCookie() ([]byte, error) { - // Retrieve the authentication methods currently supported by the Tor - // server. - authMethods, cookieFilePath, version, err := c.ProtocolInfo() - if err != nil { - return nil, err - } - - // With the version retrieved, we'll cache it now in case it needs to be - // used later on. - c.version = version - - // Ensure that the Tor server supports the SAFECOOKIE authentication - // method or the NULL method. If NULL, we don't need the cookie info - // below this loop, so we just return. - safeCookieSupport := false - for _, authMethod := range authMethods { - if authMethod == "SAFECOOKIE" { - safeCookieSupport = true - } - if authMethod == "NULL" { - return nil, nil - } - } - - if !safeCookieSupport { - return nil, errors.New("the Tor server is currently not " + - "configured for cookie or null authentication") +// server. Cookie authentication must be enabled for this to work. +func (c *Controller) getAuthCookie(info protocolInfo) ([]byte, error) { + // Retrieve the cookie file path from the PROTOCOLINFO reply. + cookieFilePath, ok := info["COOKIEFILE"] + if !ok { + return nil, errors.New("COOKIEFILE not found in PROTOCOLINFO " + + "reply") } + cookieFilePath = strings.Trim(cookieFilePath, "\"") // Read the cookie from the file and ensure it has the correct length. cookie, err := ioutil.ReadFile(cookieFilePath) @@ -355,176 +407,34 @@ func supportsV3(version string) error { return nil } -// ProtocolInfo returns the different authentication methods supported by the -// Tor server and the version of the Tor server. -func (c *Controller) ProtocolInfo() ([]string, string, string, error) { - // We'll start off by sending the "PROTOCOLINFO" command to the Tor - // server. We should receive a reply of the following format: - // - // METHODS=COOKIE,SAFECOOKIE - // COOKIEFILE="/home/user/.tor/control_auth_cookie" - // VERSION Tor="0.3.2.10" - // - // We're interested in retrieving all of these fields, so we'll parse - // our reply to do so. - cmd := fmt.Sprintf("PROTOCOLINFO %d", ProtocolInfoVersion) - _, reply, err := c.sendCommand(cmd) - if err != nil { - return nil, "", "", err - } - - info := parseTorReply(reply) - methods, ok := info["METHODS"] - if !ok { - return nil, "", "", errors.New("auth methods not found in " + - "reply") - } +// protocolInfo is encompasses the details of a response to a PROTOCOLINFO +// command. +type protocolInfo map[string]string - cookieFile, ok := info["COOKIEFILE"] - if !ok && !strings.Contains(methods, "NULL") { - return nil, "", "", errors.New("cookie file path not found " + - "in reply") - } +// version returns the Tor version as reported by the server. +func (i protocolInfo) version() string { + version := i["Tor"] + return strings.Trim(version, "\"") +} - version, ok := info["Tor"] +// supportsAuthMethod determines whether the Tor server supports the given +// authentication method. +func (i protocolInfo) supportsAuthMethod(method string) bool { + methods, ok := i["METHODS"] if !ok { - return nil, "", "", errors.New("Tor version not found in reply") + return false } - - // Finally, we'll clean up the results before returning them. - authMethods := strings.Split(methods, ",") - cookieFilePath := strings.Trim(cookieFile, "\"") - torVersion := strings.Trim(version, "\"") - - return authMethods, cookieFilePath, torVersion, nil -} - -// OnionType denotes the type of the onion service. -type OnionType int - -const ( - // V2 denotes that the onion service is V2. - V2 OnionType = iota - - // V3 denotes that the onion service is V3. - V3 -) - -// AddOnionConfig houses all of the required parameters in order to successfully -// create a new onion service or restore an existing one. -type AddOnionConfig struct { - // Type denotes the type of the onion service that should be created. - Type OnionType - - // VirtualPort is the externally reachable port of the onion address. - VirtualPort int - - // TargetPorts is the set of ports that the service will be listening on - // locally. The Tor server will use choose a random port from this set - // to forward the traffic from the virtual port. - // - // NOTE: If nil/empty, the virtual port will be used as the only target - // port. - TargetPorts []int - - // PrivateKeyPath is the full path to where the onion service's private - // key is stored. This can be used to restore an existing onion service. - PrivateKeyPath string + return strings.Contains(methods, method) } -// AddOnion creates an onion service and returns its onion address. Once -// created, the new onion service will remain active until the connection -// between the controller and the Tor server is closed. -func (c *Controller) AddOnion(cfg AddOnionConfig) (*OnionAddr, error) { - // Before sending the request to create an onion service to the Tor - // server, we'll make sure that it supports V3 onion services if that - // was the type requested. - if cfg.Type == V3 { - if err := supportsV3(c.version); err != nil { - return nil, err - } - } - - // We'll start off by checking if the file containing the private key - // exists. If it does not, then we should request the server to create - // a new onion service and return its private key. Otherwise, we'll - // request the server to recreate the onion server from our private key. - var keyParam string - if _, err := os.Stat(cfg.PrivateKeyPath); os.IsNotExist(err) { - switch cfg.Type { - case V2: - keyParam = "NEW:RSA1024" - case V3: - keyParam = "NEW:ED25519-V3" - } - } else { - privateKey, err := ioutil.ReadFile(cfg.PrivateKeyPath) - if err != nil { - return nil, err - } - keyParam = string(privateKey) - } - - // Now, we'll create a mapping from the virtual port to each target - // port. If no target ports were specified, we'll use the virtual port - // to provide a one-to-one mapping. - var portParam string - if len(cfg.TargetPorts) == 0 { - portParam += fmt.Sprintf("Port=%d,%d ", cfg.VirtualPort, - cfg.VirtualPort) - } else { - for _, targetPort := range cfg.TargetPorts { - portParam += fmt.Sprintf("Port=%d,%d ", cfg.VirtualPort, - targetPort) - } - } - - // Send the command to create the onion service to the Tor server and - // await its response. - cmd := fmt.Sprintf("ADD_ONION %s %s", keyParam, portParam) +// protocolInfo sends a "PROTOCOLINFO" command to the Tor server and returns its +// response. +func (c *Controller) protocolInfo() (protocolInfo, error) { + cmd := fmt.Sprintf("PROTOCOLINFO %d", ProtocolInfoVersion) _, reply, err := c.sendCommand(cmd) if err != nil { return nil, err } - // If successful, the reply from the server should be of the following - // format, depending on whether a private key has been requested: - // - // C: ADD_ONION RSA1024:[Blob Redacted] Port=80,8080 - // S: 250-ServiceID=testonion1234567 - // S: 250 OK - // - // C: ADD_ONION NEW:RSA1024 Port=80,8080 - // S: 250-ServiceID=testonion1234567 - // S: 250-PrivateKey=RSA1024:[Blob Redacted] - // S: 250 OK - // - // We're interested in retrieving the service ID, which is the public - // name of the service, and the private key if requested. - replyParams := parseTorReply(reply) - serviceID, ok := replyParams["ServiceID"] - if !ok { - return nil, errors.New("service id not found in reply") - } - - // If a new onion service was created, we'll write its private key to - // disk under strict permissions in the event that it needs to be - // recreated later on. - if privateKey, ok := replyParams["PrivateKey"]; ok { - err := ioutil.WriteFile( - cfg.PrivateKeyPath, []byte(privateKey), 0600, - ) - if err != nil { - return nil, fmt.Errorf("unable to write private key "+ - "to file: %v", err) - } - } - - // Finally, we'll return the onion address composed of the service ID, - // along with the onion suffix, and the port this onion service can be - // reached at externally. - return &OnionAddr{ - OnionService: serviceID + ".onion", - Port: cfg.VirtualPort, - }, nil + return protocolInfo(parseTorReply(reply)), nil } diff --git a/utxonursery.go b/utxonursery.go index 713c8c9d41..abff354782 100644 --- a/utxonursery.go +++ b/utxonursery.go @@ -201,7 +201,7 @@ type NurseryConfig struct { Store NurseryStore // Sweep sweeps an input back to the wallet. - SweepInput func(input.Input, sweep.FeePreference) (chan sweep.Result, error) + SweepInput func(input.Input, sweep.Params) (chan sweep.Result, error) } // utxoNursery is a system dedicated to incubating time-locked outputs created @@ -330,7 +330,6 @@ func (u *utxoNursery) Stop() error { // they're CLTV absolute time locked, or if they're CSV relative time locked. // Once all outputs reach maturity, they'll be swept back into the wallet. func (u *utxoNursery) IncubateOutputs(chanPoint wire.OutPoint, - commitResolution *lnwallet.CommitOutputResolution, outgoingHtlcs []lnwallet.OutgoingHtlcResolution, incomingHtlcs []lnwallet.IncomingHtlcResolution, broadcastHeight uint32) error { @@ -352,8 +351,6 @@ func (u *utxoNursery) IncubateOutputs(chanPoint wire.OutPoint, numHtlcs := len(incomingHtlcs) + len(outgoingHtlcs) var ( - hasCommit bool - // Kid outputs can be swept after an initial confirmation // followed by a maturity period.Baby outputs are two stage and // will need to wait for an absolute time out to reach a @@ -364,28 +361,6 @@ func (u *utxoNursery) IncubateOutputs(chanPoint wire.OutPoint, // 1. Build all the spendable outputs that we will try to incubate. - // It could be that our to-self output was below the dust limit. In - // that case the commit resolution would be nil and we would not have - // that output to incubate. - if commitResolution != nil { - hasCommit = true - selfOutput := makeKidOutput( - &commitResolution.SelfOutPoint, - &chanPoint, - commitResolution.MaturityDelay, - input.CommitmentTimeLock, - &commitResolution.SelfOutputSignDesc, - 0, - ) - - // We'll skip any zero valued outputs as this indicates we - // don't have a settled balance within the commitment - // transaction. - if selfOutput.Amount() > 0 { - kidOutputs = append(kidOutputs, selfOutput) - } - } - // TODO(roasbeef): query and see if we already have, if so don't add? // For each incoming HTLC, we'll register a kid output marked as a @@ -422,10 +397,11 @@ func (u *utxoNursery) IncubateOutputs(chanPoint wire.OutPoint, // Otherwise, this is actually a kid output as we can sweep it // once the commitment transaction confirms, and the absolute - // CLTV lock has expired. We set the CSV delay to zero to - // indicate this is actually a CLTV output. + // CLTV lock has expired. We set the CSV delay what the + // resolution encodes, since the sequence number must be set + // accordingly. htlcOutput := makeKidOutput( - &htlcRes.ClaimOutpoint, &chanPoint, 0, + &htlcRes.ClaimOutpoint, &chanPoint, htlcRes.CsvDelay, input.HtlcOfferedRemoteTimeout, &htlcRes.SweepSignDesc, htlcRes.Expiry, ) @@ -436,8 +412,8 @@ func (u *utxoNursery) IncubateOutputs(chanPoint wire.OutPoint, // * need ability to cancel in the case that we learn of pre-image or // remote party pulls - utxnLog.Infof("Incubating Channel(%s) has-commit=%v, num-htlcs=%d", - chanPoint, hasCommit, numHtlcs) + utxnLog.Infof("Incubating Channel(%s) num-htlcs=%d", + chanPoint, numHtlcs) u.mu.Lock() defer u.mu.Unlock() @@ -538,8 +514,6 @@ func (u *utxoNursery) NurseryReport( // Preschool outputs are awaiting the // confirmation of the commitment transaction. switch kid.WitnessType() { - case input.CommitmentTimeLock: - report.AddLimboCommitment(&kid) case input.HtlcAcceptedSuccessSecondLevel: // An HTLC output on our commitment transaction @@ -561,11 +535,6 @@ func (u *utxoNursery) NurseryReport( // We can distinguish them via their witness // types. switch kid.WitnessType() { - case input.CommitmentTimeLock: - // The commitment transaction has been - // confirmed, and we are waiting the CSV - // delay to expire. - report.AddLimboCommitment(&kid) case input.HtlcOfferedRemoteTimeout: // This is an HTLC output on the @@ -590,11 +559,6 @@ func (u *utxoNursery) NurseryReport( // will contribute towards the recovered // balance. switch kid.WitnessType() { - case input.CommitmentTimeLock: - // The commitment output was - // successfully swept back into a - // regular p2wkh output. - report.AddRecoveredCommitment(&kid) case input.HtlcAcceptedSuccessSecondLevel: fallthrough @@ -815,7 +779,9 @@ func (u *utxoNursery) sweepMatureOutputs(classHeight uint32, // passed in with disastrous consequences. local := output - resultChan, err := u.cfg.SweepInput(&local, feePref) + resultChan, err := u.cfg.SweepInput( + &local, sweep.Params{Fee: feePref}, + ) if err != nil { return err } @@ -1071,11 +1037,6 @@ type contractMaturityReport struct { // recoveredBalance is the total value that has been successfully swept // back to the user's wallet. recoveredBalance btcutil.Amount - - // maturityHeight is the absolute block height that this output will - // mature at. - maturityHeight uint32 - // htlcs records a maturity report for each htlc output in this channel. htlcs []htlcMaturityReport } @@ -1100,26 +1061,6 @@ type htlcMaturityReport struct { stage uint32 } -// AddLimboCommitment adds an incubating commitment output to maturity -// report's htlcs, and contributes its amount to the limbo balance. -func (c *contractMaturityReport) AddLimboCommitment(kid *kidOutput) { - c.limboBalance += kid.Amount() - - // If the confirmation height is set, then this means the contract has - // been confirmed, and we know the final maturity height. - if kid.ConfHeight() != 0 { - c.maturityHeight = kid.BlocksToMaturity() + kid.ConfHeight() - } -} - -// AddRecoveredCommitment adds a graduated commitment output to maturity -// report's htlcs, and contributes its amount to the recovered balance. -func (c *contractMaturityReport) AddRecoveredCommitment(kid *kidOutput) { - c.recoveredBalance += kid.Amount() - - c.maturityHeight = kid.BlocksToMaturity() + kid.ConfHeight() -} - // AddLimboStage1TimeoutHtlc adds an htlc crib output to the maturity report's // htlcs, and contributes its amount to the limbo balance. func (c *contractMaturityReport) AddLimboStage1TimeoutHtlc(baby *babyOutput) { @@ -1331,7 +1272,8 @@ type kidOutput struct { // output. // // NOTE: This will be set for: commitment outputs, and incoming HTLC's. - // Otherwise, this will be zero. + // Otherwise, this will be zero. It will also be non-zero for + // commitment types which requires confirmed spends. blocksToMaturity uint32 // absoluteMaturity is the absolute height that this output will be @@ -1344,7 +1286,7 @@ type kidOutput struct { } func makeKidOutput(outpoint, originChanPoint *wire.OutPoint, - blocksToMaturity uint32, witnessType input.WitnessType, + blocksToMaturity uint32, witnessType input.StandardWitnessType, signDescriptor *input.SignDescriptor, absoluteMaturity uint32) kidOutput { @@ -1423,7 +1365,7 @@ func (k *kidOutput) Encode(w io.Writer) error { return err } - byteOrder.PutUint16(scratch[:2], uint16(k.WitnessType())) + byteOrder.PutUint16(scratch[:2], uint16(k.witnessType)) if _, err := w.Write(scratch[:2]); err != nil { return err } @@ -1473,7 +1415,7 @@ func (k *kidOutput) Decode(r io.Reader) error { if _, err := r.Read(scratch[:2]); err != nil { return err } - k.witnessType = input.WitnessType(byteOrder.Uint16(scratch[:2])) + k.witnessType = input.StandardWitnessType(byteOrder.Uint16(scratch[:2])) return input.ReadSignDescriptor(r, &k.signDesc) } diff --git a/utxonursery_test.go b/utxonursery_test.go index 6671c07013..cbc9126970 100644 --- a/utxonursery_test.go +++ b/utxonursery_test.go @@ -603,7 +603,6 @@ func createOutgoingRes(onLocalCommitment bool) *lnwallet.OutgoingHtlcResolution Value: 10000, }, }, - CsvDelay: 2, } if onLocalCommitment { @@ -620,8 +619,10 @@ func createOutgoingRes(onLocalCommitment bool) *lnwallet.OutgoingHtlcResolution } outgoingRes.SignedTimeoutTx = timeoutTx + outgoingRes.CsvDelay = 2 } else { outgoingRes.ClaimOutpoint = htlcOp + outgoingRes.CsvDelay = 0 } return &outgoingRes @@ -650,7 +651,6 @@ func incubateTestOutput(t *testing.T, nursery *utxoNursery, // Hand off to nursery. err := nursery.IncubateOutputs( testChanPoint, - nil, []lnwallet.OutgoingHtlcResolution{*outgoingRes}, nil, 0, ) @@ -839,59 +839,6 @@ func testNurseryOutgoingHtlcSuccessOnRemote(t *testing.T, ctx.finish() } -func TestNurseryCommitSuccessOnLocal(t *testing.T) { - testRestartLoop(t, testNurseryCommitSuccessOnLocal) -} - -func testNurseryCommitSuccessOnLocal(t *testing.T, - checkStartStop func(func()) bool) { - - ctx := createNurseryTestContext(t, checkStartStop) - - commitRes := createCommitmentRes() - - // Hand off to nursery. - err := ctx.nursery.IncubateOutputs( - testChanPoint, - commitRes, nil, nil, 0, - ) - if err != nil { - t.Fatal(err) - } - - // Verify that commitment output is showing up in nursery report as - // limbo balance. - assertNurseryReport(t, ctx.nursery, 0, 0, 10000) - - ctx.restart() - - // Notify confirmation of the commitment tx. - err = ctx.notifier.ConfirmTx(&commitRes.SelfOutPoint.Hash, 124) - if err != nil { - t.Fatal(err) - } - - // Wait for output to be promoted from PSCL to KNDR. - select { - case <-ctx.store.preschoolToKinderChan: - case <-time.After(defaultTestTimeout): - t.Fatalf("output not promoted to KNDR") - } - - ctx.restart() - - // Notify arrival of block where commit output CSV expires. - ctx.notifyEpoch(126) - - // Check final sweep into wallet. - testSweep(t, ctx, func() { - // Check limbo balance after sweep publication - assertNurseryReport(t, ctx.nursery, 0, 0, 10000) - }) - - ctx.finish() -} - func testSweepHtlc(t *testing.T, ctx *nurseryTestContext) { testSweep(t, ctx, func() { // Verify stage in nursery report. HTLCs should now both still @@ -1037,7 +984,7 @@ func newMockSweeper(t *testing.T) *mockSweeper { } func (s *mockSweeper) sweepInput(input input.Input, - _ sweep.FeePreference) (chan sweep.Result, error) { + _ sweep.Params) (chan sweep.Result, error) { utxnLog.Debugf("mockSweeper sweepInput called for %v", *input.OutPoint()) diff --git a/walletunlocker/service_test.go b/walletunlocker/service_test.go index 9b497ede1a..fc331f42c5 100644 --- a/walletunlocker/service_test.go +++ b/walletunlocker/service_test.go @@ -10,6 +10,8 @@ import ( "time" "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcwallet/snacl" + "github.com/btcsuite/btcwallet/waddrmgr" "github.com/btcsuite/btcwallet/wallet" "github.com/lightningnetwork/lnd/aezeed" "github.com/lightningnetwork/lnd/keychain" @@ -35,6 +37,19 @@ var ( ) func createTestWallet(t *testing.T, dir string, netParams *chaincfg.Params) { + // Instruct waddrmgr to use the cranked down scrypt parameters when + // creating new wallet encryption keys. + fastScrypt := waddrmgr.FastScryptOptions + keyGen := func(passphrase *[]byte, config *waddrmgr.ScryptOptions) ( + *snacl.SecretKey, error) { + + return snacl.NewSecretKey( + passphrase, fastScrypt.N, fastScrypt.R, fastScrypt.P, + ) + } + waddrmgr.SetSecretKeyGen(keyGen) + + // Create a new test wallet that uses fast scrypt as KDF. netDir := btcwallet.NetworkDir(dir, netParams) loader := wallet.NewLoader(netParams, netDir, true, 0) _, err := loader.CreateNewWallet( diff --git a/watchtower/config.go b/watchtower/config.go index 26dfac75a6..a71bd0433c 100644 --- a/watchtower/config.go +++ b/watchtower/config.go @@ -34,8 +34,8 @@ var ( ) // Config defines the resources and parameters used to configure a Watchtower. -// All nil-able elements with the Config must be set in order for the Watchtower -// to function properly. +// All nil-able elements besides tor-related ones must be set in order for the +// Watchtower to function properly. type Config struct { // ChainHash identifies the chain that the watchtower will be monitoring // for breaches and that will be advertised in the server's Init message @@ -89,4 +89,16 @@ type Config struct { // message from the other end, if the connection has stopped buffering // the server's replies. WriteTimeout time.Duration + + // TorController allows the watchtower to optionally setup an onion hidden + // service. + TorController *tor.Controller + + // WatchtowerKeyPath allows the watchtower to specify where the private key + // for a watchtower hidden service should be stored. + WatchtowerKeyPath string + + // Type specifies the hidden service type (V2 or V3) that the watchtower + // will create. + Type tor.OnionType } diff --git a/watchtower/lookout/justice_descriptor.go b/watchtower/lookout/justice_descriptor.go index 40e5a479e1..cea3ae0327 100644 --- a/watchtower/lookout/justice_descriptor.go +++ b/watchtower/lookout/justice_descriptor.go @@ -225,6 +225,12 @@ func (p *JusticeDescriptor) assembleJusticeTxn(txWeight int64, // CreateJusticeTxn computes the justice transaction that sweeps a breaching // commitment transaction. The justice transaction is constructed by assembling // the witnesses using data provided by the client in a prior state update. +// +// NOTE: An older version of ToLocalPenaltyWitnessSize underestimated the size +// of the witness by one byte, which could cause the signature(s) to break if +// the tower is reconstructing with the newer constant because the output values +// might differ. This method retains that original behavior to not invalidate +// historical signatures. func (p *JusticeDescriptor) CreateJusticeTxn() (*wire.MsgTx, error) { var ( sweepInputs = make([]*breachedInput, 0, 2) @@ -256,7 +262,13 @@ func (p *JusticeDescriptor) CreateJusticeTxn() (*wire.MsgTx, error) { if err != nil { return nil, err } - weightEstimate.AddWitnessInput(input.ToLocalPenaltyWitnessSize) + + // An older ToLocalPenaltyWitnessSize constant used to underestimate the + // size by one byte. The diferrence in weight can cause different output + // values on the sweep transaction, so we mimic the original bug to + // avoid invalidating signatures by older clients. + weightEstimate.AddWitnessInput(input.ToLocalPenaltyWitnessSize - 1) + sweepInputs = append(sweepInputs, toLocalInput) // If the justice kit specifies that we have to sweep the to-remote diff --git a/watchtower/lookout/justice_descriptor_test.go b/watchtower/lookout/justice_descriptor_test.go index 7fd93c8c8a..afc4dacd6f 100644 --- a/watchtower/lookout/justice_descriptor_test.go +++ b/watchtower/lookout/justice_descriptor_test.go @@ -144,7 +144,13 @@ func testJusticeDescriptor(t *testing.T, blobType blob.Type) { // Compute the weight estimate for our justice transaction. var weightEstimate input.TxWeightEstimator - weightEstimate.AddWitnessInput(input.ToLocalPenaltyWitnessSize) + + // An older ToLocalPenaltyWitnessSize constant used to underestimate the + // size by one byte. The diferrence in weight can cause different output + // values on the sweep transaction, so we mimic the original bug and + // create signatures using the original weight estimate. + weightEstimate.AddWitnessInput(input.ToLocalPenaltyWitnessSize - 1) + weightEstimate.AddWitnessInput(input.P2WKHWitnessSize) weightEstimate.AddP2WKHOutput() if blobType.Has(blob.FlagReward) { @@ -262,7 +268,7 @@ func testJusticeDescriptor(t *testing.T, blobType blob.Type) { toRemoteSigRaw := toRemoteWitness[0][:len(toRemoteWitness[0])-1] // Convert the DER to-local sig into a fixed-size signature. - toLocalSig, err := lnwire.NewSigFromRawSignature(toLocalSigRaw) + toLocalSig, err := lnwire.NewSigFromSignature(toLocalSigRaw) if err != nil { t.Fatalf("unable to parse to-local signature: %v", err) } @@ -310,7 +316,7 @@ func testJusticeDescriptor(t *testing.T, blobType blob.Type) { // Construct the test's to-local witness. justiceTxn.TxIn[0].Witness = make([][]byte, 3) - justiceTxn.TxIn[0].Witness[0] = append(toLocalSigRaw, + justiceTxn.TxIn[0].Witness[0] = append(toLocalSigRaw.Serialize(), byte(txscript.SigHashAll)) justiceTxn.TxIn[0].Witness[1] = []byte{1} justiceTxn.TxIn[0].Witness[2] = toLocalScript diff --git a/watchtower/lookout/lookout.go b/watchtower/lookout/lookout.go index fc7baddec2..1d9f911b38 100644 --- a/watchtower/lookout/lookout.go +++ b/watchtower/lookout/lookout.go @@ -68,7 +68,7 @@ func (l *Lookout) Start() error { if startEpoch == nil { log.Infof("Starting lookout from chain tip") } else { - log.Infof("Starting lookout from epoch(height=%d hash=%x)", + log.Infof("Starting lookout from epoch(height=%d hash=%v)", startEpoch.Height, startEpoch.Hash) } diff --git a/watchtower/standalone.go b/watchtower/standalone.go index 8f50dc65a0..b1a36bb520 100644 --- a/watchtower/standalone.go +++ b/watchtower/standalone.go @@ -6,6 +6,7 @@ import ( "github.com/btcsuite/btcd/btcec" "github.com/lightningnetwork/lnd/brontide" + "github.com/lightningnetwork/lnd/tor" "github.com/lightningnetwork/lnd/watchtower/lookout" "github.com/lightningnetwork/lnd/watchtower/wtserver" ) @@ -112,6 +113,15 @@ func (w *Standalone) Start() error { log.Infof("Starting watchtower") + // If a tor controller exists in the config, then automatically create a + // hidden service for the watchtower to accept inbound connections from. + if w.cfg.TorController != nil { + log.Infof("Creating watchtower hidden service") + if err := w.createNewHiddenService(); err != nil { + return err + } + } + if err := w.lookout.Start(); err != nil { return err } @@ -142,6 +152,39 @@ func (w *Standalone) Stop() error { return nil } +// createNewHiddenService automatically sets up a v2 or v3 onion service in +// order to listen for inbound connections over Tor. +func (w *Standalone) createNewHiddenService() error { + // Get all the ports the watchtower is listening on. These will be used to + // map the hidden service's virtual port. + listenPorts := make([]int, 0, len(w.listeners)) + for _, listener := range w.listeners { + port := listener.Addr().(*net.TCPAddr).Port + listenPorts = append(listenPorts, port) + } + + // Once we've created the port mapping, we can automatically create the + // hidden service. The service's private key will be saved on disk in order + // to persistently have access to this hidden service across restarts. + onionCfg := tor.AddOnionConfig{ + VirtualPort: DefaultPeerPort, + TargetPorts: listenPorts, + Store: tor.NewOnionFile(w.cfg.WatchtowerKeyPath, 0600), + Type: w.cfg.Type, + } + + addr, err := w.cfg.TorController.AddOnion(onionCfg) + if err != nil { + return err + } + + // Append this address to ExternalIPs so that it will be exposed in + // tower info calls. + w.cfg.ExternalIPs = append(w.cfg.ExternalIPs, addr) + + return nil +} + // PubKey returns the public key for the watchtower used to authentication and // encrypt traffic with clients. // diff --git a/watchtower/wtclient/backup_task.go b/watchtower/wtclient/backup_task.go index abdabe2f9f..302a6bc3bf 100644 --- a/watchtower/wtclient/backup_task.go +++ b/watchtower/wtclient/backup_task.go @@ -141,7 +141,14 @@ func (t *backupTask) bindSession(session *wtdb.ClientSessionBody) error { // Next, add the contribution from the inputs that are present on this // breach transaction. if t.toLocalInput != nil { - weightEstimate.AddWitnessInput(input.ToLocalPenaltyWitnessSize) + // An older ToLocalPenaltyWitnessSize constant used to + // underestimate the size by one byte. The diferrence in weight + // can cause different output values on the sweep transaction, + // so we mimic the original bug and create signatures using the + // original weight estimate. + weightEstimate.AddWitnessInput( + input.ToLocalPenaltyWitnessSize - 1, + ) } if t.toRemoteInput != nil { weightEstimate.AddWitnessInput(input.P2WKHWitnessSize) @@ -189,7 +196,7 @@ func (t *backupTask) craftSessionPayload( justiceKit := &blob.JusticeKit{ SweepAddress: t.sweepPkScript, RevocationPubKey: toBlobPubKey(keyRing.RevocationKey), - LocalDelayPubKey: toBlobPubKey(keyRing.DelayKey), + LocalDelayPubKey: toBlobPubKey(keyRing.ToLocalKey), CSVDelay: t.breachInfo.RemoteDelay, } @@ -199,7 +206,7 @@ func (t *backupTask) craftSessionPayload( // output to spend from. if t.toRemoteInput != nil { justiceKit.CommitToRemotePubKey = toBlobPubKey( - keyRing.NoDelayKey, + keyRing.ToRemoteKey, ) } diff --git a/watchtower/wtclient/backup_task_internal_test.go b/watchtower/wtclient/backup_task_internal_test.go index cff6f2b860..3c8ed17f70 100644 --- a/watchtower/wtclient/backup_task_internal_test.go +++ b/watchtower/wtclient/backup_task_internal_test.go @@ -16,6 +16,7 @@ import ( "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/keychain" "github.com/lightningnetwork/lnd/lnwallet" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/watchtower/blob" "github.com/lightningnetwork/lnd/watchtower/wtdb" @@ -86,7 +87,7 @@ func genTaskTest( toLocalAmt int64, toRemoteAmt int64, blobType blob.Type, - sweepFeeRate lnwallet.SatPerKWeight, + sweepFeeRate chainfee.SatPerKWeight, rewardScript []byte, expSweepAmt int64, expRewardAmt int64, @@ -120,8 +121,8 @@ func genTaskTest( BreachTransaction: breachTxn, KeyRing: &lnwallet.CommitmentKeyRing{ RevocationKey: revPK, - DelayKey: toLocalPK, - NoDelayKey: toRemotePK, + ToLocalKey: toLocalPK, + ToRemoteKey: toRemotePK, }, RemoteDelay: csvDelay, } @@ -564,9 +565,9 @@ func testBackupTask(t *testing.T, test backupTaskTest) { } keyRing := test.breachInfo.KeyRing - expToLocalPK := keyRing.DelayKey.SerializeCompressed() + expToLocalPK := keyRing.ToLocalKey.SerializeCompressed() expRevPK := keyRing.RevocationKey.SerializeCompressed() - expToRemotePK := keyRing.NoDelayKey.SerializeCompressed() + expToRemotePK := keyRing.ToRemoteKey.SerializeCompressed() // Assert that the blob contained the serialized revocation and to-local // pubkeys. diff --git a/watchtower/wtclient/client_test.go b/watchtower/wtclient/client_test.go index bd16ac0655..ea681d5751 100644 --- a/watchtower/wtclient/client_test.go +++ b/watchtower/wtclient/client_test.go @@ -291,8 +291,8 @@ func (c *mockChannel) createRemoteCommitTx(t *testing.T) { commitKeyRing := &lnwallet.CommitmentKeyRing{ RevocationKey: c.revPK, - NoDelayKey: c.toLocalPK, - DelayKey: c.toRemotePK, + ToRemoteKey: c.toLocalPK, + ToLocalKey: c.toRemotePK, } retribution := &lnwallet.BreachRetribution{ diff --git a/watchtower/wtdb/client_db.go b/watchtower/wtdb/client_db.go index 169b904288..22b913e1c5 100644 --- a/watchtower/wtdb/client_db.go +++ b/watchtower/wtdb/client_db.go @@ -8,7 +8,7 @@ import ( "net" "github.com/btcsuite/btcd/btcec" - "github.com/coreos/bbolt" + "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/lnwire" ) @@ -113,7 +113,7 @@ var ( // ClientDB is single database providing a persistent storage engine for the // wtclient. type ClientDB struct { - db *bbolt.DB + db kvdb.Backend dbPath string } @@ -146,7 +146,7 @@ func OpenClientDB(dbPath string) (*ClientDB, error) { // initialized. This allows us to assume their presence throughout all // operations. If an known top-level bucket is expected to exist but is // missing, this will trigger a ErrUninitializedDB error. - err = clientDB.db.Update(initClientDBBuckets) + err = kvdb.Update(clientDB.db, initClientDBBuckets) if err != nil { bdb.Close() return nil, err @@ -157,7 +157,7 @@ func OpenClientDB(dbPath string) (*ClientDB, error) { // initClientDBBuckets creates all top-level buckets required to handle database // operations required by the latest version. -func initClientDBBuckets(tx *bbolt.Tx) error { +func initClientDBBuckets(tx kvdb.RwTx) error { buckets := [][]byte{ cSessionKeyIndexBkt, cChanSummaryBkt, @@ -167,7 +167,7 @@ func initClientDBBuckets(tx *bbolt.Tx) error { } for _, bucket := range buckets { - _, err := tx.CreateBucketIfNotExists(bucket) + _, err := tx.CreateTopLevelBucket(bucket) if err != nil { return err } @@ -179,7 +179,7 @@ func initClientDBBuckets(tx *bbolt.Tx) error { // bdb returns the backing bbolt.DB instance. // // NOTE: Part of the versionedDB interface. -func (c *ClientDB) bdb() *bbolt.DB { +func (c *ClientDB) bdb() kvdb.Backend { return c.db } @@ -188,7 +188,7 @@ func (c *ClientDB) bdb() *bbolt.DB { // NOTE: Part of the versionedDB interface. func (c *ClientDB) Version() (uint32, error) { var version uint32 - err := c.db.View(func(tx *bbolt.Tx) error { + err := kvdb.View(c.db, func(tx kvdb.ReadTx) error { var err error version, err = getDBVersion(tx) return err @@ -215,13 +215,13 @@ func (c *ClientDB) CreateTower(lnAddr *lnwire.NetAddress) (*Tower, error) { copy(towerPubKey[:], lnAddr.IdentityKey.SerializeCompressed()) var tower *Tower - err := c.db.Update(func(tx *bbolt.Tx) error { - towerIndex := tx.Bucket(cTowerIndexBkt) + err := kvdb.Update(c.db, func(tx kvdb.RwTx) error { + towerIndex := tx.ReadWriteBucket(cTowerIndexBkt) if towerIndex == nil { return ErrUninitializedDB } - towers := tx.Bucket(cTowerBkt) + towers := tx.ReadWriteBucket(cTowerBkt) if towers == nil { return ErrUninitializedDB } @@ -248,7 +248,7 @@ func (c *ClientDB) CreateTower(lnAddr *lnwire.NetAddress) (*Tower, error) { // // TODO(wilmer): with an index of tower -> sessions we // can avoid the linear lookup. - sessions := tx.Bucket(cSessionBkt) + sessions := tx.ReadWriteBucket(cSessionBkt) if sessions == nil { return ErrUninitializedDB } @@ -308,12 +308,12 @@ func (c *ClientDB) CreateTower(lnAddr *lnwire.NetAddress) (*Tower, error) { // // NOTE: An error is not returned if the tower doesn't exist. func (c *ClientDB) RemoveTower(pubKey *btcec.PublicKey, addr net.Addr) error { - return c.db.Update(func(tx *bbolt.Tx) error { - towers := tx.Bucket(cTowerBkt) + return kvdb.Update(c.db, func(tx kvdb.RwTx) error { + towers := tx.ReadWriteBucket(cTowerBkt) if towers == nil { return ErrUninitializedDB } - towerIndex := tx.Bucket(cTowerIndexBkt) + towerIndex := tx.ReadWriteBucket(cTowerIndexBkt) if towerIndex == nil { return ErrUninitializedDB } @@ -342,7 +342,7 @@ func (c *ClientDB) RemoveTower(pubKey *btcec.PublicKey, addr net.Addr) error { // // TODO(wilmer): with an index of tower -> sessions we can avoid // the linear lookup. - sessions := tx.Bucket(cSessionBkt) + sessions := tx.ReadWriteBucket(cSessionBkt) if sessions == nil { return ErrUninitializedDB } @@ -383,8 +383,8 @@ func (c *ClientDB) RemoveTower(pubKey *btcec.PublicKey, addr net.Addr) error { // LoadTowerByID retrieves a tower by its tower ID. func (c *ClientDB) LoadTowerByID(towerID TowerID) (*Tower, error) { var tower *Tower - err := c.db.View(func(tx *bbolt.Tx) error { - towers := tx.Bucket(cTowerBkt) + err := kvdb.View(c.db, func(tx kvdb.ReadTx) error { + towers := tx.ReadBucket(cTowerBkt) if towers == nil { return ErrUninitializedDB } @@ -403,12 +403,12 @@ func (c *ClientDB) LoadTowerByID(towerID TowerID) (*Tower, error) { // LoadTower retrieves a tower by its public key. func (c *ClientDB) LoadTower(pubKey *btcec.PublicKey) (*Tower, error) { var tower *Tower - err := c.db.View(func(tx *bbolt.Tx) error { - towers := tx.Bucket(cTowerBkt) + err := kvdb.View(c.db, func(tx kvdb.ReadTx) error { + towers := tx.ReadBucket(cTowerBkt) if towers == nil { return ErrUninitializedDB } - towerIndex := tx.Bucket(cTowerIndexBkt) + towerIndex := tx.ReadBucket(cTowerIndexBkt) if towerIndex == nil { return ErrUninitializedDB } @@ -432,8 +432,8 @@ func (c *ClientDB) LoadTower(pubKey *btcec.PublicKey) (*Tower, error) { // ListTowers retrieves the list of towers available within the database. func (c *ClientDB) ListTowers() ([]*Tower, error) { var towers []*Tower - err := c.db.View(func(tx *bbolt.Tx) error { - towerBucket := tx.Bucket(cTowerBkt) + err := kvdb.View(c.db, func(tx kvdb.ReadTx) error { + towerBucket := tx.ReadBucket(cTowerBkt) if towerBucket == nil { return ErrUninitializedDB } @@ -461,8 +461,8 @@ func (c *ClientDB) ListTowers() ([]*Tower, error) { // CreateClientSession is invoked should return the same index. func (c *ClientDB) NextSessionKeyIndex(towerID TowerID) (uint32, error) { var index uint32 - err := c.db.Update(func(tx *bbolt.Tx) error { - keyIndex := tx.Bucket(cSessionKeyIndexBkt) + err := kvdb.Update(c.db, func(tx kvdb.RwTx) error { + keyIndex := tx.ReadWriteBucket(cSessionKeyIndexBkt) if keyIndex == nil { return ErrUninitializedDB } @@ -509,20 +509,20 @@ func (c *ClientDB) NextSessionKeyIndex(towerID TowerID) (uint32, error) { // CreateClientSession records a newly negotiated client session in the set of // active sessions. The session can be identified by its SessionID. func (c *ClientDB) CreateClientSession(session *ClientSession) error { - return c.db.Update(func(tx *bbolt.Tx) error { - keyIndexes := tx.Bucket(cSessionKeyIndexBkt) + return kvdb.Update(c.db, func(tx kvdb.RwTx) error { + keyIndexes := tx.ReadWriteBucket(cSessionKeyIndexBkt) if keyIndexes == nil { return ErrUninitializedDB } - sessions := tx.Bucket(cSessionBkt) + sessions := tx.ReadWriteBucket(cSessionBkt) if sessions == nil { return ErrUninitializedDB } // Check that client session with this session id doesn't // already exist. - existingSessionBytes := sessions.Bucket(session.ID[:]) + existingSessionBytes := sessions.NestedReadWriteBucket(session.ID[:]) if existingSessionBytes != nil { return ErrClientSessionAlreadyExists } @@ -558,8 +558,8 @@ func (c *ClientDB) CreateClientSession(session *ClientSession) error { // response that do not correspond to this tower. func (c *ClientDB) ListClientSessions(id *TowerID) (map[SessionID]*ClientSession, error) { var clientSessions map[SessionID]*ClientSession - err := c.db.View(func(tx *bbolt.Tx) error { - sessions := tx.Bucket(cSessionBkt) + err := kvdb.View(c.db, func(tx kvdb.ReadTx) error { + sessions := tx.ReadBucket(cSessionBkt) if sessions == nil { return ErrUninitializedDB } @@ -577,7 +577,7 @@ func (c *ClientDB) ListClientSessions(id *TowerID) (map[SessionID]*ClientSession // listClientSessions returns the set of all client sessions known to the db. An // optional tower ID can be used to filter out any client sessions in the // response that do not correspond to this tower. -func listClientSessions(sessions *bbolt.Bucket, +func listClientSessions(sessions kvdb.ReadBucket, id *TowerID) (map[SessionID]*ClientSession, error) { clientSessions := make(map[SessionID]*ClientSession) @@ -612,8 +612,8 @@ func listClientSessions(sessions *bbolt.Bucket, // channel summaries. func (c *ClientDB) FetchChanSummaries() (ChannelSummaries, error) { summaries := make(map[lnwire.ChannelID]ClientChanSummary) - err := c.db.View(func(tx *bbolt.Tx) error { - chanSummaries := tx.Bucket(cChanSummaryBkt) + err := kvdb.View(c.db, func(tx kvdb.ReadTx) error { + chanSummaries := tx.ReadBucket(cChanSummaryBkt) if chanSummaries == nil { return ErrUninitializedDB } @@ -648,8 +648,8 @@ func (c *ClientDB) FetchChanSummaries() (ChannelSummaries, error) { func (c *ClientDB) RegisterChannel(chanID lnwire.ChannelID, sweepPkScript []byte) error { - return c.db.Update(func(tx *bbolt.Tx) error { - chanSummaries := tx.Bucket(cChanSummaryBkt) + return kvdb.Update(c.db, func(tx kvdb.RwTx) error { + chanSummaries := tx.ReadWriteBucket(cChanSummaryBkt) if chanSummaries == nil { return ErrUninitializedDB } @@ -692,8 +692,8 @@ func (c *ClientDB) CommitUpdate(id *SessionID, update *CommittedUpdate) (uint16, error) { var lastApplied uint16 - err := c.db.Update(func(tx *bbolt.Tx) error { - sessions := tx.Bucket(cSessionBkt) + err := kvdb.Update(c.db, func(tx kvdb.RwTx) error { + sessions := tx.ReadWriteBucket(cSessionBkt) if sessions == nil { return ErrUninitializedDB } @@ -708,7 +708,7 @@ func (c *ClientDB) CommitUpdate(id *SessionID, } // Can't fail if the above didn't fail. - sessionBkt := sessions.Bucket(id[:]) + sessionBkt := sessions.NestedReadWriteBucket(id[:]) // Ensure the session commits sub-bucket is initialized. sessionCommits, err := sessionBkt.CreateBucketIfNotExists( @@ -796,8 +796,8 @@ func (c *ClientDB) CommitUpdate(id *SessionID, func (c *ClientDB) AckUpdate(id *SessionID, seqNum uint16, lastApplied uint16) error { - return c.db.Update(func(tx *bbolt.Tx) error { - sessions := tx.Bucket(cSessionBkt) + return kvdb.Update(c.db, func(tx kvdb.RwTx) error { + sessions := tx.ReadWriteBucket(cSessionBkt) if sessions == nil { return ErrUninitializedDB } @@ -835,11 +835,11 @@ func (c *ClientDB) AckUpdate(id *SessionID, seqNum uint16, } // Can't fail because of getClientSession succeeded. - sessionBkt := sessions.Bucket(id[:]) + sessionBkt := sessions.NestedReadWriteBucket(id[:]) // If the commits sub-bucket doesn't exist, there can't possibly // be a corresponding committed update to remove. - sessionCommits := sessionBkt.Bucket(cSessionCommits) + sessionCommits := sessionBkt.NestedReadWriteBucket(cSessionCommits) if sessionCommits == nil { return ErrCommittedUpdateNotFound } @@ -894,10 +894,10 @@ func (c *ClientDB) AckUpdate(id *SessionID, seqNum uint16, // bucket corresponding to the serialized session id. This does not deserialize // the CommittedUpdates or AckUpdates associated with the session. If the caller // requires this info, use getClientSession. -func getClientSessionBody(sessions *bbolt.Bucket, +func getClientSessionBody(sessions kvdb.ReadBucket, idBytes []byte) (*ClientSession, error) { - sessionBkt := sessions.Bucket(idBytes) + sessionBkt := sessions.NestedReadBucket(idBytes) if sessionBkt == nil { return nil, ErrClientSessionNotFound } @@ -922,7 +922,7 @@ func getClientSessionBody(sessions *bbolt.Bucket, // getClientSession loads the full ClientSession associated with the serialized // session id. This method populates the CommittedUpdates and AckUpdates in // addition to the ClientSession's body. -func getClientSession(sessions *bbolt.Bucket, +func getClientSession(sessions kvdb.ReadBucket, idBytes []byte) (*ClientSession, error) { session, err := getClientSessionBody(sessions, idBytes) @@ -950,17 +950,17 @@ func getClientSession(sessions *bbolt.Bucket, // getClientSessionCommits retrieves all committed updates for the session // identified by the serialized session id. -func getClientSessionCommits(sessions *bbolt.Bucket, +func getClientSessionCommits(sessions kvdb.ReadBucket, idBytes []byte) ([]CommittedUpdate, error) { // Can't fail because client session body has already been read. - sessionBkt := sessions.Bucket(idBytes) + sessionBkt := sessions.NestedReadBucket(idBytes) // Initialize commitedUpdates so that we can return an initialized map // if no committed updates exist. committedUpdates := make([]CommittedUpdate, 0) - sessionCommits := sessionBkt.Bucket(cSessionCommits) + sessionCommits := sessionBkt.NestedReadBucket(cSessionCommits) if sessionCommits == nil { return committedUpdates, nil } @@ -986,17 +986,17 @@ func getClientSessionCommits(sessions *bbolt.Bucket, // getClientSessionAcks retrieves all acked updates for the session identified // by the serialized session id. -func getClientSessionAcks(sessions *bbolt.Bucket, +func getClientSessionAcks(sessions kvdb.ReadBucket, idBytes []byte) (map[uint16]BackupID, error) { // Can't fail because client session body has already been read. - sessionBkt := sessions.Bucket(idBytes) + sessionBkt := sessions.NestedReadBucket(idBytes) // Initialize ackedUpdates so that we can return an initialized map if // no acked updates exist. ackedUpdates := make(map[uint16]BackupID) - sessionAcks := sessionBkt.Bucket(cSessionAcks) + sessionAcks := sessionBkt.NestedReadBucket(cSessionAcks) if sessionAcks == nil { return ackedUpdates, nil } @@ -1023,7 +1023,7 @@ func getClientSessionAcks(sessions *bbolt.Bucket, // putClientSessionBody stores the body of the ClientSession (everything but the // CommittedUpdates and AckedUpdates). -func putClientSessionBody(sessions *bbolt.Bucket, +func putClientSessionBody(sessions kvdb.RwBucket, session *ClientSession) error { sessionBkt, err := sessions.CreateBucketIfNotExists(session.ID[:]) @@ -1042,7 +1042,7 @@ func putClientSessionBody(sessions *bbolt.Bucket, // markSessionStatus updates the persisted state of the session to the new // status. -func markSessionStatus(sessions *bbolt.Bucket, session *ClientSession, +func markSessionStatus(sessions kvdb.RwBucket, session *ClientSession, status CSessionStatus) error { session.Status = status @@ -1050,7 +1050,7 @@ func markSessionStatus(sessions *bbolt.Bucket, session *ClientSession, } // getChanSummary loads a ClientChanSummary for the passed chanID. -func getChanSummary(chanSummaries *bbolt.Bucket, +func getChanSummary(chanSummaries kvdb.ReadBucket, chanID lnwire.ChannelID) (*ClientChanSummary, error) { chanSummaryBytes := chanSummaries.Get(chanID[:]) @@ -1068,7 +1068,7 @@ func getChanSummary(chanSummaries *bbolt.Bucket, } // putChanSummary stores a ClientChanSummary for the passed chanID. -func putChanSummary(chanSummaries *bbolt.Bucket, chanID lnwire.ChannelID, +func putChanSummary(chanSummaries kvdb.RwBucket, chanID lnwire.ChannelID, summary *ClientChanSummary) error { var b bytes.Buffer @@ -1081,7 +1081,7 @@ func putChanSummary(chanSummaries *bbolt.Bucket, chanID lnwire.ChannelID, } // getTower loads a Tower identified by its serialized tower id. -func getTower(towers *bbolt.Bucket, id []byte) (*Tower, error) { +func getTower(towers kvdb.ReadBucket, id []byte) (*Tower, error) { towerBytes := towers.Get(id) if towerBytes == nil { return nil, ErrTowerNotFound @@ -1099,7 +1099,7 @@ func getTower(towers *bbolt.Bucket, id []byte) (*Tower, error) { } // putTower stores a Tower identified by its serialized tower id. -func putTower(towers *bbolt.Bucket, tower *Tower) error { +func putTower(towers kvdb.RwBucket, tower *Tower) error { var b bytes.Buffer err := tower.Encode(&b) if err != nil { diff --git a/watchtower/wtdb/codec.go b/watchtower/wtdb/codec.go index 4cfa6b6b48..e6b99db6d4 100644 --- a/watchtower/wtdb/codec.go +++ b/watchtower/wtdb/codec.go @@ -4,7 +4,7 @@ import ( "io" "github.com/lightningnetwork/lnd/channeldb" - "github.com/lightningnetwork/lnd/lnwallet" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" "github.com/lightningnetwork/lnd/watchtower/blob" "github.com/lightningnetwork/lnd/watchtower/wtpolicy" ) @@ -58,7 +58,7 @@ func ReadElement(r io.Reader, element interface{}) error { } e.BlobType = blob.Type(blobType) - e.SweepFeeRate = lnwallet.SatPerKWeight(sweepFeeRate) + e.SweepFeeRate = chainfee.SatPerKWeight(sweepFeeRate) // Type is still unknown to wtdb extensions, fail. default: diff --git a/watchtower/wtdb/db_common.go b/watchtower/wtdb/db_common.go index ed6f1c6b8f..1592f1e6e8 100644 --- a/watchtower/wtdb/db_common.go +++ b/watchtower/wtdb/db_common.go @@ -6,7 +6,7 @@ import ( "os" "path/filepath" - "github.com/coreos/bbolt" + "github.com/lightningnetwork/lnd/channeldb/kvdb" ) const ( @@ -49,7 +49,7 @@ func fileExists(path string) bool { // one doesn't exist. The boolean returned indicates if the database did not // exist before, or if it has been created but no version metadata exists within // it. -func createDBIfNotExist(dbPath, name string) (*bbolt.DB, bool, error) { +func createDBIfNotExist(dbPath, name string) (kvdb.Backend, bool, error) { path := filepath.Join(dbPath, name) // If the database file doesn't exist, this indicates we much initialize @@ -65,12 +65,7 @@ func createDBIfNotExist(dbPath, name string) (*bbolt.DB, bool, error) { // Specify bbolt freelist options to reduce heap pressure in case the // freelist grows to be very large. - options := &bbolt.Options{ - NoFreelistSync: true, - FreelistType: bbolt.FreelistMapType, - } - - bdb, err := bbolt.Open(path, dbFilePermission, options) + bdb, err := kvdb.Create(kvdb.BoltBackendName, path, true) if err != nil { return nil, false, err } @@ -82,8 +77,8 @@ func createDBIfNotExist(dbPath, name string) (*bbolt.DB, bool, error) { // set firstInit to true so that we can treat is initialize the bucket. if !firstInit { var metadataExists bool - err = bdb.View(func(tx *bbolt.Tx) error { - metadataExists = tx.Bucket(metadataBkt) != nil + err = kvdb.View(bdb, func(tx kvdb.ReadTx) error { + metadataExists = tx.ReadBucket(metadataBkt) != nil return nil }) if err != nil { diff --git a/watchtower/wtdb/tower_db.go b/watchtower/wtdb/tower_db.go index 92a9e55abe..39782f1d54 100644 --- a/watchtower/wtdb/tower_db.go +++ b/watchtower/wtdb/tower_db.go @@ -5,8 +5,8 @@ import ( "errors" "github.com/btcsuite/btcd/chaincfg/chainhash" - "github.com/coreos/bbolt" "github.com/lightningnetwork/lnd/chainntnfs" + "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/watchtower/blob" ) @@ -55,7 +55,7 @@ var ( // TowerDB is single database providing a persistent storage engine for the // wtserver and lookout subsystems. type TowerDB struct { - db *bbolt.DB + db kvdb.Backend dbPath string } @@ -88,7 +88,7 @@ func OpenTowerDB(dbPath string) (*TowerDB, error) { // initialized. This allows us to assume their presence throughout all // operations. If an known top-level bucket is expected to exist but is // missing, this will trigger a ErrUninitializedDB error. - err = towerDB.db.Update(initTowerDBBuckets) + err = kvdb.Update(towerDB.db, initTowerDBBuckets) if err != nil { bdb.Close() return nil, err @@ -99,7 +99,7 @@ func OpenTowerDB(dbPath string) (*TowerDB, error) { // initTowerDBBuckets creates all top-level buckets required to handle database // operations required by the latest version. -func initTowerDBBuckets(tx *bbolt.Tx) error { +func initTowerDBBuckets(tx kvdb.RwTx) error { buckets := [][]byte{ sessionsBkt, updateIndexBkt, @@ -108,7 +108,7 @@ func initTowerDBBuckets(tx *bbolt.Tx) error { } for _, bucket := range buckets { - _, err := tx.CreateBucketIfNotExists(bucket) + _, err := tx.CreateTopLevelBucket(bucket) if err != nil { return err } @@ -120,7 +120,7 @@ func initTowerDBBuckets(tx *bbolt.Tx) error { // bdb returns the backing bbolt.DB instance. // // NOTE: Part of the versionedDB interface. -func (t *TowerDB) bdb() *bbolt.DB { +func (t *TowerDB) bdb() kvdb.Backend { return t.db } @@ -129,7 +129,7 @@ func (t *TowerDB) bdb() *bbolt.DB { // NOTE: Part of the versionedDB interface. func (t *TowerDB) Version() (uint32, error) { var version uint32 - err := t.db.View(func(tx *bbolt.Tx) error { + err := kvdb.View(t.db, func(tx kvdb.ReadTx) error { var err error version, err = getDBVersion(tx) return err @@ -150,8 +150,8 @@ func (t *TowerDB) Close() error { // returned if the session could not be found. func (t *TowerDB) GetSessionInfo(id *SessionID) (*SessionInfo, error) { var session *SessionInfo - err := t.db.View(func(tx *bbolt.Tx) error { - sessions := tx.Bucket(sessionsBkt) + err := kvdb.View(t.db, func(tx kvdb.ReadTx) error { + sessions := tx.ReadBucket(sessionsBkt) if sessions == nil { return ErrUninitializedDB } @@ -170,13 +170,13 @@ func (t *TowerDB) GetSessionInfo(id *SessionID) (*SessionInfo, error) { // InsertSessionInfo records a negotiated session in the tower database. An // error is returned if the session already exists. func (t *TowerDB) InsertSessionInfo(session *SessionInfo) error { - return t.db.Update(func(tx *bbolt.Tx) error { - sessions := tx.Bucket(sessionsBkt) + return kvdb.Update(t.db, func(tx kvdb.RwTx) error { + sessions := tx.ReadWriteBucket(sessionsBkt) if sessions == nil { return ErrUninitializedDB } - updateIndex := tx.Bucket(updateIndexBkt) + updateIndex := tx.ReadWriteBucket(updateIndexBkt) if updateIndex == nil { return ErrUninitializedDB } @@ -219,18 +219,18 @@ func (t *TowerDB) InsertSessionInfo(session *SessionInfo) error { // properly and the last applied values echoed by the client are sane. func (t *TowerDB) InsertStateUpdate(update *SessionStateUpdate) (uint16, error) { var lastApplied uint16 - err := t.db.Update(func(tx *bbolt.Tx) error { - sessions := tx.Bucket(sessionsBkt) + err := kvdb.Update(t.db, func(tx kvdb.RwTx) error { + sessions := tx.ReadWriteBucket(sessionsBkt) if sessions == nil { return ErrUninitializedDB } - updates := tx.Bucket(updatesBkt) + updates := tx.ReadWriteBucket(updatesBkt) if updates == nil { return ErrUninitializedDB } - updateIndex := tx.Bucket(updateIndexBkt) + updateIndex := tx.ReadWriteBucket(updateIndexBkt) if updateIndex == nil { return ErrUninitializedDB } @@ -303,18 +303,18 @@ func (t *TowerDB) InsertStateUpdate(update *SessionStateUpdate) (uint16, error) // DeleteSession removes all data associated with a particular session id from // the tower's database. func (t *TowerDB) DeleteSession(target SessionID) error { - return t.db.Update(func(tx *bbolt.Tx) error { - sessions := tx.Bucket(sessionsBkt) + return kvdb.Update(t.db, func(tx kvdb.RwTx) error { + sessions := tx.ReadWriteBucket(sessionsBkt) if sessions == nil { return ErrUninitializedDB } - updates := tx.Bucket(updatesBkt) + updates := tx.ReadWriteBucket(updatesBkt) if updates == nil { return ErrUninitializedDB } - updateIndex := tx.Bucket(updateIndexBkt) + updateIndex := tx.ReadWriteBucket(updateIndexBkt) if updateIndex == nil { return ErrUninitializedDB } @@ -341,7 +341,7 @@ func (t *TowerDB) DeleteSession(target SessionID) error { for _, hint := range hints { // Remove the state updates for any blobs stored under // the target session identifier. - updatesForHint := updates.Bucket(hint[:]) + updatesForHint := updates.NestedReadWriteBucket(hint[:]) if updatesForHint == nil { continue } @@ -371,7 +371,7 @@ func (t *TowerDB) DeleteSession(target SessionID) error { // No more updates for this hint, prune hint bucket. default: - err = updates.DeleteBucket(hint[:]) + err = updates.DeleteNestedBucket(hint[:]) if err != nil { return err } @@ -389,13 +389,13 @@ func (t *TowerDB) DeleteSession(target SessionID) error { // they exist in the database. func (t *TowerDB) QueryMatches(breachHints []blob.BreachHint) ([]Match, error) { var matches []Match - err := t.db.View(func(tx *bbolt.Tx) error { - sessions := tx.Bucket(sessionsBkt) + err := kvdb.View(t.db, func(tx kvdb.ReadTx) error { + sessions := tx.ReadBucket(sessionsBkt) if sessions == nil { return ErrUninitializedDB } - updates := tx.Bucket(updatesBkt) + updates := tx.ReadBucket(updatesBkt) if updates == nil { return ErrUninitializedDB } @@ -405,7 +405,7 @@ func (t *TowerDB) QueryMatches(breachHints []blob.BreachHint) ([]Match, error) { for _, hint := range breachHints { // If a bucket does not exist for this hint, no matches // are known. - updatesForHint := updates.Bucket(hint[:]) + updatesForHint := updates.NestedReadBucket(hint[:]) if updatesForHint == nil { continue } @@ -471,8 +471,8 @@ func (t *TowerDB) QueryMatches(breachHints []blob.BreachHint) ([]Match, error) { // SetLookoutTip stores the provided epoch as the latest lookout tip epoch in // the tower database. func (t *TowerDB) SetLookoutTip(epoch *chainntnfs.BlockEpoch) error { - return t.db.Update(func(tx *bbolt.Tx) error { - lookoutTip := tx.Bucket(lookoutTipBkt) + return kvdb.Update(t.db, func(tx kvdb.RwTx) error { + lookoutTip := tx.ReadWriteBucket(lookoutTipBkt) if lookoutTip == nil { return ErrUninitializedDB } @@ -485,8 +485,8 @@ func (t *TowerDB) SetLookoutTip(epoch *chainntnfs.BlockEpoch) error { // database. func (t *TowerDB) GetLookoutTip() (*chainntnfs.BlockEpoch, error) { var epoch *chainntnfs.BlockEpoch - err := t.db.View(func(tx *bbolt.Tx) error { - lookoutTip := tx.Bucket(lookoutTipBkt) + err := kvdb.View(t.db, func(tx kvdb.ReadTx) error { + lookoutTip := tx.ReadBucket(lookoutTipBkt) if lookoutTip == nil { return ErrUninitializedDB } @@ -505,7 +505,7 @@ func (t *TowerDB) GetLookoutTip() (*chainntnfs.BlockEpoch, error) { // getSession retrieves the session info from the sessions bucket identified by // its session id. An error is returned if the session is not found or a // deserialization error occurs. -func getSession(sessions *bbolt.Bucket, id []byte) (*SessionInfo, error) { +func getSession(sessions kvdb.ReadBucket, id []byte) (*SessionInfo, error) { sessionBytes := sessions.Get(id) if sessionBytes == nil { return nil, ErrSessionNotFound @@ -522,7 +522,7 @@ func getSession(sessions *bbolt.Bucket, id []byte) (*SessionInfo, error) { // putSession stores the session info in the sessions bucket identified by its // session id. An error is returned if a serialization error occurs. -func putSession(sessions *bbolt.Bucket, session *SessionInfo) error { +func putSession(sessions kvdb.RwBucket, session *SessionInfo) error { var b bytes.Buffer err := session.Encode(&b) if err != nil { @@ -536,7 +536,7 @@ func putSession(sessions *bbolt.Bucket, session *SessionInfo) error { // session id. This ensures that future calls to getHintsForSession or // putHintForSession can rely on the bucket already being created, and fail if // index has not been initialized as this points to improper usage. -func touchSessionHintBkt(updateIndex *bbolt.Bucket, id *SessionID) error { +func touchSessionHintBkt(updateIndex kvdb.RwBucket, id *SessionID) error { _, err := updateIndex.CreateBucketIfNotExists(id[:]) return err } @@ -544,17 +544,17 @@ func touchSessionHintBkt(updateIndex *bbolt.Bucket, id *SessionID) error { // removeSessionHintBkt prunes the session-hint bucket for the given session id // and all of the hints contained inside. This should be used to clean up the // index upon session deletion. -func removeSessionHintBkt(updateIndex *bbolt.Bucket, id *SessionID) error { - return updateIndex.DeleteBucket(id[:]) +func removeSessionHintBkt(updateIndex kvdb.RwBucket, id *SessionID) error { + return updateIndex.DeleteNestedBucket(id[:]) } // getHintsForSession returns all known hints belonging to the given session id. // If the index for the session has not been initialized, this method returns // ErrNoSessionHintIndex. -func getHintsForSession(updateIndex *bbolt.Bucket, +func getHintsForSession(updateIndex kvdb.ReadBucket, id *SessionID) ([]blob.BreachHint, error) { - sessionHints := updateIndex.Bucket(id[:]) + sessionHints := updateIndex.NestedReadBucket(id[:]) if sessionHints == nil { return nil, ErrNoSessionHintIndex } @@ -582,10 +582,10 @@ func getHintsForSession(updateIndex *bbolt.Bucket, // session id, and used to perform efficient removal of updates. If the index // for the session has not been initialized, this method returns // ErrNoSessionHintIndex. -func putHintForSession(updateIndex *bbolt.Bucket, id *SessionID, +func putHintForSession(updateIndex kvdb.RwBucket, id *SessionID, hint blob.BreachHint) error { - sessionHints := updateIndex.Bucket(id[:]) + sessionHints := updateIndex.NestedReadWriteBucket(id[:]) if sessionHints == nil { return ErrNoSessionHintIndex } @@ -594,7 +594,7 @@ func putHintForSession(updateIndex *bbolt.Bucket, id *SessionID, } // putLookoutEpoch stores the given lookout tip block epoch in provided bucket. -func putLookoutEpoch(bkt *bbolt.Bucket, epoch *chainntnfs.BlockEpoch) error { +func putLookoutEpoch(bkt kvdb.RwBucket, epoch *chainntnfs.BlockEpoch) error { epochBytes := make([]byte, 36) copy(epochBytes, epoch.Hash[:]) byteOrder.PutUint32(epochBytes[32:], uint32(epoch.Height)) @@ -604,7 +604,7 @@ func putLookoutEpoch(bkt *bbolt.Bucket, epoch *chainntnfs.BlockEpoch) error { // getLookoutEpoch retrieves the lookout tip block epoch from the given bucket. // A nil epoch is returned if no update exists. -func getLookoutEpoch(bkt *bbolt.Bucket) *chainntnfs.BlockEpoch { +func getLookoutEpoch(bkt kvdb.ReadBucket) *chainntnfs.BlockEpoch { epochBytes := bkt.Get(lookoutTipKey) if len(epochBytes) != 36 { return nil @@ -625,7 +625,7 @@ func getLookoutEpoch(bkt *bbolt.Bucket) *chainntnfs.BlockEpoch { var errBucketNotEmpty = errors.New("bucket not empty") // isBucketEmpty returns errBucketNotEmpty if the bucket is not empty. -func isBucketEmpty(bkt *bbolt.Bucket) error { +func isBucketEmpty(bkt kvdb.ReadBucket) error { return bkt.ForEach(func(_, _ []byte) error { return errBucketNotEmpty }) diff --git a/watchtower/wtdb/version.go b/watchtower/wtdb/version.go index b8aa2b7e99..597a1f1fb1 100644 --- a/watchtower/wtdb/version.go +++ b/watchtower/wtdb/version.go @@ -1,14 +1,14 @@ package wtdb import ( - "github.com/coreos/bbolt" "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/channeldb/kvdb" ) // migration is a function which takes a prior outdated version of the database // instances and mutates the key/bucket structure to arrive at a more // up-to-date version of the database. -type migration func(tx *bbolt.Tx) error +type migration func(tx kvdb.RwTx) error // version pairs a version number with the migration that would need to be // applied from the prior version to upgrade. @@ -46,8 +46,8 @@ func getMigrations(versions []version, curVersion uint32) []version { // getDBVersion retrieves the current database version from the metadata bucket // using the dbVersionKey. -func getDBVersion(tx *bbolt.Tx) (uint32, error) { - metadata := tx.Bucket(metadataBkt) +func getDBVersion(tx kvdb.ReadTx) (uint32, error) { + metadata := tx.ReadBucket(metadataBkt) if metadata == nil { return 0, ErrUninitializedDB } @@ -62,8 +62,8 @@ func getDBVersion(tx *bbolt.Tx) (uint32, error) { // initDBVersion initializes the top-level metadata bucket and writes the passed // version number as the current version. -func initDBVersion(tx *bbolt.Tx, version uint32) error { - _, err := tx.CreateBucketIfNotExists(metadataBkt) +func initDBVersion(tx kvdb.RwTx, version uint32) error { + _, err := tx.CreateTopLevelBucket(metadataBkt) if err != nil { return err } @@ -73,8 +73,8 @@ func initDBVersion(tx *bbolt.Tx, version uint32) error { // putDBVersion stores the passed database version in the metadata bucket under // the dbVersionKey. -func putDBVersion(tx *bbolt.Tx, version uint32) error { - metadata := tx.Bucket(metadataBkt) +func putDBVersion(tx kvdb.RwTx, version uint32) error { + metadata := tx.ReadWriteBucket(metadataBkt) if metadata == nil { return ErrUninitializedDB } @@ -89,7 +89,7 @@ func putDBVersion(tx *bbolt.Tx, version uint32) error { // on either. type versionedDB interface { // bdb returns the underlying bbolt database. - bdb() *bbolt.DB + bdb() kvdb.Backend // Version returns the current version stored in the database. Version() (uint32, error) @@ -105,7 +105,7 @@ func initOrSyncVersions(db versionedDB, init bool, versions []version) error { // If the database has not yet been created, we'll initialize the // database version with the latest known version. if init { - return db.bdb().Update(func(tx *bbolt.Tx) error { + return kvdb.Update(db.bdb(), func(tx kvdb.RwTx) error { return initDBVersion(tx, getLatestDBVersion(versions)) }) } @@ -141,7 +141,7 @@ func syncVersions(db versionedDB, versions []version) error { // Otherwise, apply any migrations in order to bring the database // version up to the highest known version. updates := getMigrations(versions, curVersion) - return db.bdb().Update(func(tx *bbolt.Tx) error { + return kvdb.Update(db.bdb(), func(tx kvdb.RwTx) error { for i, update := range updates { if update.migration == nil { continue diff --git a/watchtower/wtmock/signer.go b/watchtower/wtmock/signer.go index c41e4f2f44..89421d6adb 100644 --- a/watchtower/wtmock/signer.go +++ b/watchtower/wtmock/signer.go @@ -30,7 +30,7 @@ func NewMockSigner() *MockSigner { // in the sign descriptor. The returned signature is the raw DER-encoded // signature without the signhash flag. func (s *MockSigner) SignOutputRaw(tx *wire.MsgTx, - signDesc *input.SignDescriptor) ([]byte, error) { + signDesc *input.SignDescriptor) (input.Signature, error) { s.mu.Lock() defer s.mu.Unlock() @@ -50,7 +50,7 @@ func (s *MockSigner) SignOutputRaw(tx *wire.MsgTx, return nil, err } - return sig[:len(sig)-1], nil + return btcec.ParseDERSignature(sig[:len(sig)-1], btcec.S256()) } // ComputeInputScript is not implemented. diff --git a/watchtower/wtpolicy/policy.go b/watchtower/wtpolicy/policy.go index db636b684b..9c7f5e64e6 100644 --- a/watchtower/wtpolicy/policy.go +++ b/watchtower/wtpolicy/policy.go @@ -7,6 +7,7 @@ import ( "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcutil" "github.com/lightningnetwork/lnd/lnwallet" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" "github.com/lightningnetwork/lnd/watchtower/blob" ) @@ -27,11 +28,11 @@ const ( // DefaultSweepFeeRate specifies the fee rate used to construct justice // transactions. The value is expressed in satoshis per kilo-weight. - DefaultSweepFeeRate = lnwallet.SatPerKWeight(2500) + DefaultSweepFeeRate = chainfee.SatPerKWeight(2500) // MinSweepFeeRate is the minimum sweep fee rate a client may use in its // policy, the current value is 4 sat/vbyte. - MinSweepFeeRate = lnwallet.SatPerKWeight(1000) + MinSweepFeeRate = chainfee.SatPerKWeight(1000) ) var ( @@ -97,7 +98,7 @@ type TxPolicy struct { // constructing the justice transaction. All sweep transactions created // for this session must use this value during construction, and the // signatures must implicitly commit to the resulting output values. - SweepFeeRate lnwallet.SatPerKWeight + SweepFeeRate chainfee.SatPerKWeight } // Policy defines the negotiated parameters for a session between a client and diff --git a/watchtower/wtwire/create_session.go b/watchtower/wtwire/create_session.go index 338dda83ee..7047ce30ed 100644 --- a/watchtower/wtwire/create_session.go +++ b/watchtower/wtwire/create_session.go @@ -3,7 +3,7 @@ package wtwire import ( "io" - "github.com/lightningnetwork/lnd/lnwallet" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" "github.com/lightningnetwork/lnd/watchtower/blob" ) @@ -34,7 +34,7 @@ type CreateSession struct { // constructing the justice transaction. All sweep transactions created // for this session must use this value during construction, and the // signatures must implicitly commit to the resulting output values. - SweepFeeRate lnwallet.SatPerKWeight + SweepFeeRate chainfee.SatPerKWeight } // A compile time check to ensure CreateSession implements the wtwire.Message diff --git a/watchtower/wtwire/init.go b/watchtower/wtwire/init.go index 79a5fbf8b7..4d5ec34bdb 100644 --- a/watchtower/wtwire/init.go +++ b/watchtower/wtwire/init.go @@ -5,6 +5,7 @@ import ( "io" "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/lightningnetwork/lnd/feature" "github.com/lightningnetwork/lnd/lnwire" ) @@ -92,12 +93,7 @@ func (msg *Init) CheckRemoteInit(remoteInit *Init, // Check that the remote peer doesn't have any required connection // feature bits that we ourselves are unaware of. - unknownConnFeatures := remoteConnFeatures.UnknownRequiredFeatures() - if len(unknownConnFeatures) > 0 { - return NewErrUnknownRequiredFeatures(unknownConnFeatures...) - } - - return nil + return feature.ValidateRequired(remoteConnFeatures) } // ErrUnknownChainHash signals that the remote Init has a different chain hash @@ -116,24 +112,3 @@ func NewErrUnknownChainHash(hash chainhash.Hash) *ErrUnknownChainHash { func (e *ErrUnknownChainHash) Error() string { return fmt.Sprintf("remote init has unknown chain hash: %s", e.hash) } - -// ErrUnknownRequiredFeatures signals that the remote Init has required feature -// bits that were unknown to us. -type ErrUnknownRequiredFeatures struct { - unknownFeatures []lnwire.FeatureBit -} - -// NewErrUnknownRequiredFeatures creates an ErrUnknownRequiredFeatures using the -// remote Init's required features that were unknown to us. -func NewErrUnknownRequiredFeatures( - unknownFeatures ...lnwire.FeatureBit) *ErrUnknownRequiredFeatures { - - return &ErrUnknownRequiredFeatures{unknownFeatures} -} - -// Error returns a human-readable error displaying the unknown required feature -// bits. -func (e *ErrUnknownRequiredFeatures) Error() string { - return fmt.Sprintf("remote init has unknown required features: %v", - e.unknownFeatures) -} diff --git a/watchtower/wtwire/init_test.go b/watchtower/wtwire/init_test.go index 1aee5530b6..c0b0fa7511 100644 --- a/watchtower/wtwire/init_test.go +++ b/watchtower/wtwire/init_test.go @@ -5,6 +5,7 @@ import ( "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/lightningnetwork/lnd/feature" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/watchtower/wtwire" ) @@ -60,8 +61,8 @@ var checkRemoteInitTests = []checkRemoteInitTest{ lHash: testnetChainHash, rFeatures: lnwire.NewRawFeatureVector(lnwire.GossipQueriesRequired), rHash: testnetChainHash, - expErr: wtwire.NewErrUnknownRequiredFeatures( - lnwire.GossipQueriesRequired, + expErr: feature.NewErrUnknownRequired( + []lnwire.FeatureBit{lnwire.GossipQueriesRequired}, ), }, } diff --git a/watchtower/wtwire/wtwire.go b/watchtower/wtwire/wtwire.go index 1af9433d5b..4472556021 100644 --- a/watchtower/wtwire/wtwire.go +++ b/watchtower/wtwire/wtwire.go @@ -8,7 +8,7 @@ import ( "github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" - "github.com/lightningnetwork/lnd/lnwallet" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/watchtower/blob" ) @@ -74,7 +74,7 @@ func WriteElement(w io.Writer, element interface{}) error { return err } - case lnwallet.SatPerKWeight: + case chainfee.SatPerKWeight: var b [8]byte binary.BigEndian.PutUint64(b[:], uint64(e)) if _, err := w.Write(b[:]); err != nil { @@ -194,12 +194,12 @@ func ReadElement(r io.Reader, element interface{}) error { } *e = bytes - case *lnwallet.SatPerKWeight: + case *chainfee.SatPerKWeight: var b [8]byte if _, err := io.ReadFull(r, b[:]); err != nil { return err } - *e = lnwallet.SatPerKWeight(binary.BigEndian.Uint64(b[:])) + *e = chainfee.SatPerKWeight(binary.BigEndian.Uint64(b[:])) case *ErrorCode: var b [2]byte diff --git a/zpay32/invoice.go b/zpay32/invoice.go index 4e7210f76b..691c016979 100644 --- a/zpay32/invoice.go +++ b/zpay32/invoice.go @@ -73,19 +73,33 @@ const ( // supported or required by the receiver. fieldType9 = 5 + // fieldTypeS contains a 32-byte payment address, which is a nonce + // included in the final hop's payload to prevent intermediaries from + // probing the recipient. + fieldTypeS = 16 + // maxInvoiceLength is the maximum total length an invoice can have. // This is chosen to be the maximum number of bytes that can fit into a // single QR code: https://en.wikipedia.org/wiki/QR_code#Storage maxInvoiceLength = 7089 + + // DefaultInvoiceExpiry is the default expiry duration from the creation + // timestamp if expiry is set to zero. + DefaultInvoiceExpiry = time.Hour ) var ( - // InvoiceFeatures holds the set of all known feature bits that are - // exposed as BOLT 11 features. - InvoiceFeatures = map[lnwire.FeatureBit]string{} - - // ErrInvoiceTooLarge is returned when an invoice exceeds maxInvoiceLength. + // ErrInvoiceTooLarge is returned when an invoice exceeds + // maxInvoiceLength. ErrInvoiceTooLarge = errors.New("invoice is too large") + + // ErrInvalidFieldLength is returned when a tagged field was specified + // with a length larger than the left over bytes of the data field. + ErrInvalidFieldLength = errors.New("invalid field length") + + // ErrBrokenTaggedField is returned when the last tagged field is + // incorrectly formatted and doesn't have enough bytes to be read. + ErrBrokenTaggedField = errors.New("last tagged field is broken") ) // MessageSigner is passed to the Encode method to provide a signature @@ -118,6 +132,10 @@ type Invoice struct { // invoice. PaymentHash *[32]byte + // PaymentAddr is the payment address to be used by payments to prevent + // probing of the destination. + PaymentAddr *[32]byte + // Destination is the public key of the target node. This will always // be set after decoding, and can optionally be set before encoding to // include the pubkey as an 'n' field. If this is not set before @@ -126,7 +144,8 @@ type Invoice struct { Destination *btcec.PublicKey // minFinalCLTVExpiry is the value that the creator of the invoice - // expects to be used for the + // expects to be used for the CLTV expiry of the HTLC extended to it in + // the last hop. // // NOTE: This value is optional, and should be set to nil if the // invoice creator doesn't have a strong requirement on the CLTV expiry @@ -242,6 +261,23 @@ func RouteHint(routeHint []HopHint) func(*Invoice) { } } +// Features is a functional option that allows callers of NewInvoice to set the +// desired feature bits that are advertised on the invoice. If this option is +// not used, an empty feature vector will automatically be populated. +func Features(features *lnwire.FeatureVector) func(*Invoice) { + return func(i *Invoice) { + i.Features = features + } +} + +// PaymentAddr is a functional option that allows callers of NewInvoice to set +// the desired payment address tht is advertised on the invoice. +func PaymentAddr(addr [32]byte) func(*Invoice) { + return func(i *Invoice) { + i.PaymentAddr = &addr + } +} + // NewInvoice creates a new Invoice object. The last parameter is a set of // variadic arguments for setting optional fields of the invoice. // @@ -260,6 +296,13 @@ func NewInvoice(net *chaincfg.Params, paymentHash [32]byte, option(invoice) } + // If no features were set, we'll populate an empty feature vector. + if invoice.Features == nil { + invoice.Features = lnwire.NewFeatureVector( + nil, lnwire.Features, + ) + } + if err := validateInvoice(invoice); err != nil { return nil, err } @@ -368,6 +411,13 @@ func Decode(invoice string, net *chaincfg.Params) (*Invoice, error) { decodedInvoice.Destination = pubkey } + // If no feature vector was decoded, populate an empty one. + if decodedInvoice.Features == nil { + decodedInvoice.Features = lnwire.NewFeatureVector( + nil, lnwire.Features, + ) + } + // Now that we have created the invoice, make sure it has the required // fields set. if err := validateInvoice(&decodedInvoice); err != nil { @@ -499,7 +549,7 @@ func (invoice *Invoice) Expiry() time.Duration { } // If no expiry is set for this invoice, default is 3600 seconds. - return 3600 * time.Second + return DefaultInvoiceExpiry } // MinFinalCLTVExpiry returns the minimum final CLTV expiry delta as specified @@ -556,6 +606,11 @@ func validateInvoice(invoice *Invoice) error { len(invoice.Destination.SerializeCompressed())) } + // Ensure that all invoices have feature vectors. + if invoice.Features == nil { + return fmt.Errorf("missing feature vector") + } + return nil } @@ -592,12 +647,14 @@ func parseTimestamp(data []byte) (uint64, error) { // fills the Invoice struct accordingly. func parseTaggedFields(invoice *Invoice, fields []byte, net *chaincfg.Params) error { index := 0 - for { + for len(fields)-index > 0 { // If there are less than 3 groups to read, there cannot be more // interesting information, as we need the type (1 group) and // length (2 groups). + // + // This means the last tagged field is broken. if len(fields)-index < 3 { - break + return ErrBrokenTaggedField } typ := fields[index] @@ -609,7 +666,7 @@ func parseTaggedFields(invoice *Invoice, fields []byte, net *chaincfg.Params) er // If we don't have enough field data left to read this length, // return error. if len(fields) < index+3+int(dataLength) { - return fmt.Errorf("invalid field length") + return ErrInvalidFieldLength } base32Data := fields[index+3 : index+3+int(dataLength)] @@ -624,7 +681,15 @@ func parseTaggedFields(invoice *Invoice, fields []byte, net *chaincfg.Params) er continue } - invoice.PaymentHash, err = parsePaymentHash(base32Data) + invoice.PaymentHash, err = parse32Bytes(base32Data) + case fieldTypeS: + if invoice.PaymentAddr != nil { + // We skip the field if we have already seen a + // supported one. + continue + } + + invoice.PaymentAddr, err = parse32Bytes(base32Data) case fieldTypeD: if invoice.Description != nil { // We skip the field if we have already seen a @@ -648,7 +713,7 @@ func parseTaggedFields(invoice *Invoice, fields []byte, net *chaincfg.Params) er continue } - invoice.DescriptionHash, err = parseDescriptionHash(base32Data) + invoice.DescriptionHash, err = parse32Bytes(base32Data) case fieldTypeX: if invoice.expiry != nil { // We skip the field if we have already seen a @@ -715,12 +780,12 @@ func parseFieldDataLength(data []byte) (uint16, error) { return uint16(data[0])<<5 | uint16(data[1]), nil } -// parsePaymentHash converts a 256-bit payment hash (encoded in base32) -// to *[32]byte. -func parsePaymentHash(data []byte) (*[32]byte, error) { +// parse32Bytes converts a 256-bit value (encoded in base32) to *[32]byte. This +// can be used for payment hashes, description hashes, payment addresses, etc. +func parse32Bytes(data []byte) (*[32]byte, error) { var paymentHash [32]byte - // As BOLT-11 states, a reader must skip over the payment hash field if + // As BOLT-11 states, a reader must skip over the 32-byte fields if // it does not have a length of 52, so avoid returning an error. if len(data) != hashBase32Len { return nil, nil @@ -766,27 +831,6 @@ func parseDestination(data []byte) (*btcec.PublicKey, error) { return btcec.ParsePubKey(base256Data, btcec.S256()) } -// parseDescriptionHash converts a 256-bit description hash (encoded in base32) -// to *[32]byte. -func parseDescriptionHash(data []byte) (*[32]byte, error) { - var descriptionHash [32]byte - - // As BOLT-11 states, a reader must skip over the description hash field - // if it does not have a length of 52, so avoid returning an error. - if len(data) != hashBase32Len { - return nil, nil - } - - hash, err := bech32.ConvertBits(data, 5, 8, false) - if err != nil { - return nil, err - } - - copy(descriptionHash[:], hash[:]) - - return &descriptionHash, nil -} - // parseExpiry converts the data (encoded in base32) into the expiry time. func parseExpiry(data []byte) (*time.Duration, error) { expiry, err := base32ToUint64(data) @@ -912,32 +956,14 @@ func parseFeatures(data []byte) (*lnwire.FeatureVector, error) { return nil, err } - fv := lnwire.NewFeatureVector(rawFeatures, InvoiceFeatures) - unknownFeatures := fv.UnknownRequiredFeatures() - if len(unknownFeatures) > 0 { - return nil, fmt.Errorf("invoice contains unknown required "+ - "features: %v", unknownFeatures) - } - - return fv, nil + return lnwire.NewFeatureVector(rawFeatures, lnwire.Features), nil } // writeTaggedFields writes the non-nil tagged fields of the Invoice to the // base32 buffer. func writeTaggedFields(bufferBase32 *bytes.Buffer, invoice *Invoice) error { if invoice.PaymentHash != nil { - // Convert 32 byte hash to 52 5-bit groups. - base32, err := bech32.ConvertBits(invoice.PaymentHash[:], 8, 5, - true) - if err != nil { - return err - } - if len(base32) != hashBase32Len { - return fmt.Errorf("invalid payment hash length: %d", - len(invoice.PaymentHash)) - } - - err = writeTaggedField(bufferBase32, fieldTypeP, base32) + err := writeBytes32(bufferBase32, fieldTypeP, *invoice.PaymentHash) if err != nil { return err } @@ -956,19 +982,9 @@ func writeTaggedFields(bufferBase32 *bytes.Buffer, invoice *Invoice) error { } if invoice.DescriptionHash != nil { - // Convert 32 byte hash to 52 5-bit groups. - descBase32, err := bech32.ConvertBits( - invoice.DescriptionHash[:], 8, 5, true) - if err != nil { - return err - } - - if len(descBase32) != hashBase32Len { - return fmt.Errorf("invalid description hash length: %d", - len(invoice.DescriptionHash)) - } - - err = writeTaggedField(bufferBase32, fieldTypeH, descBase32) + err := writeBytes32( + bufferBase32, fieldTypeH, *invoice.DescriptionHash, + ) if err != nil { return err } @@ -1072,7 +1088,15 @@ func writeTaggedFields(bufferBase32 *bytes.Buffer, invoice *Invoice) error { return err } } - if invoice.Features != nil && invoice.Features.SerializeSize32() > 0 { + if invoice.PaymentAddr != nil { + err := writeBytes32( + bufferBase32, fieldTypeS, *invoice.PaymentAddr, + ) + if err != nil { + return err + } + } + if invoice.Features.SerializeSize32() > 0 { var b bytes.Buffer err := invoice.Features.RawFeatureVector.EncodeBase32(&b) if err != nil { @@ -1088,6 +1112,18 @@ func writeTaggedFields(bufferBase32 *bytes.Buffer, invoice *Invoice) error { return nil } +// writeBytes32 encodes a 32-byte array as base32 and writes it to bufferBase32 +// under the passed fieldType. +func writeBytes32(bufferBase32 *bytes.Buffer, fieldType byte, b [32]byte) error { + // Convert 32 byte hash to 52 5-bit groups. + base32, err := bech32.ConvertBits(b[:], 8, 5, true) + if err != nil { + return err + } + + return writeTaggedField(bufferBase32, fieldType, base32) +} + // writeTaggedField takes the type of a tagged data field, and the data of // the tagged field (encoded in base32), and writes the type, length and data // to the buffer. diff --git a/zpay32/invoice_internal_test.go b/zpay32/invoice_internal_test.go index 5d0c2cf177..b72c72e08a 100644 --- a/zpay32/invoice_internal_test.go +++ b/zpay32/invoice_internal_test.go @@ -314,10 +314,10 @@ func TestParseFieldDataLength(t *testing.T) { } } -// TestParsePaymentHash checks that the payment hash is properly parsed. +// TestParse32Bytes checks that the payment hash is properly parsed. // If the data does not have a length of 52 bytes, we skip over parsing the // field and do not return an error. -func TestParsePaymentHash(t *testing.T) { +func TestParse32Bytes(t *testing.T) { t.Parallel() testPaymentHashData, _ := bech32.ConvertBits(testPaymentHash[:], 8, 5, true) @@ -350,7 +350,7 @@ func TestParsePaymentHash(t *testing.T) { } for i, test := range tests { - paymentHash, err := parsePaymentHash(test.data) + paymentHash, err := parse32Bytes(test.data) if (err == nil) != test.valid { t.Errorf("payment hash decoding test %d failed: %v", i, err) return @@ -458,56 +458,6 @@ func TestParseDestination(t *testing.T) { } } -// TestParseDescriptionHash checks that the description hash is properly parsed. -// If the data does not have a length of 52 bytes, we skip over parsing the -// field and do not return an error. -func TestParseDescriptionHash(t *testing.T) { - t.Parallel() - - testDescriptionHashData, _ := bech32.ConvertBits(testDescriptionHash[:], 8, 5, true) - - tests := []struct { - data []byte - valid bool - result *[32]byte - }{ - { - data: []byte{}, - valid: true, - result: nil, // skip unknown length, not 52 bytes - }, - { - data: []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, - valid: true, - result: nil, // skip unknown length, not 52 bytes - }, - { - data: testDescriptionHashData, - valid: true, - result: &testDescriptionHash, - }, - { - data: append(testDescriptionHashData, 0x0), - valid: true, - result: nil, // skip unknown length, not 52 bytes - }, - } - - for i, test := range tests { - descriptionHash, err := parseDescriptionHash(test.data) - if (err == nil) != test.valid { - t.Errorf("description hash decoding test %d failed: %v", i, err) - return - } - if test.valid && !compareHashes(descriptionHash, test.result) { - t.Fatalf("test %d failed decoding description hash: "+ - "expected %x, got %x", - i, *test.result, *descriptionHash) - return - } - } -} - // TestParseExpiry checks that the expiry is properly parsed. func TestParseExpiry(t *testing.T) { t.Parallel() @@ -777,3 +727,75 @@ func TestParseRouteHint(t *testing.T) { } } } + +// TestParseTaggedFields checks that tagged field data is correctly parsed or +// errors as expected. +func TestParseTaggedFields(t *testing.T) { + t.Parallel() + + netParams := &chaincfg.SimNetParams + + tests := []struct { + name string + data []byte + wantErr error + }{ + { + name: "nil data", + data: nil, + }, + { + name: "empty data", + data: []byte{}, + }, + { + // Type 0xff cannot be encoded in a single 5-bit + // element, so it's technically invalid but + // parseTaggedFields doesn't error on non-5bpp + // compatible codes so we can use a code in tests which + // will never become known in the future. + name: "valid unknown field", + data: []byte{0xff, 0x00, 0x00}, + }, + { + name: "unknown field valid data", + data: []byte{0xff, 0x00, 0x01, 0xab}, + }, + { + name: "only type specified", + data: []byte{0x0d}, + wantErr: ErrBrokenTaggedField, + }, + { + name: "not enough bytes for len", + data: []byte{0x0d, 0x00}, + wantErr: ErrBrokenTaggedField, + }, + { + name: "no bytes after len", + data: []byte{0x0d, 0x00, 0x01}, + wantErr: ErrInvalidFieldLength, + }, + { + name: "not enough bytes after len", + data: []byte{0x0d, 0x00, 0x02, 0x01}, + wantErr: ErrInvalidFieldLength, + }, + { + name: "not enough bytes after len with unknown type", + data: []byte{0xff, 0x00, 0x02, 0x01}, + wantErr: ErrInvalidFieldLength, + }, + } + for _, tc := range tests { + tc := tc // pin + t.Run(tc.name, func(t *testing.T) { + var invoice Invoice + gotErr := parseTaggedFields(&invoice, tc.data, netParams) + if tc.wantErr != gotErr { + t.Fatalf("Unexpected error. want=%v got=%v", + tc.wantErr, gotErr) + } + }) + } +} diff --git a/zpay32/invoice_test.go b/zpay32/invoice_test.go index 565c3fcff4..41687a6901 100644 --- a/zpay32/invoice_test.go +++ b/zpay32/invoice_test.go @@ -8,6 +8,7 @@ import ( "encoding/hex" "fmt" "reflect" + "strings" "testing" "time" @@ -27,7 +28,26 @@ var ( testMillisat25mBTC = lnwire.MilliSatoshi(2500000000) testMillisat20mBTC = lnwire.MilliSatoshi(2000000000) - testPaymentHashSlice, _ = hex.DecodeString("0001020304050607080900010203040506070809000102030405060708090102") + testPaymentHash = [32]byte{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, + 0x06, 0x07, 0x08, 0x09, 0x00, 0x01, 0x02, 0x03, + 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x01, 0x02, + } + + testPaymentAddr = [32]byte{ + 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x01, 0x02, + 0x06, 0x07, 0x08, 0x09, 0x00, 0x01, 0x02, 0x03, + 0x08, 0x09, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + } + + specPaymentAddr = [32]byte{ + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, + 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, + } testEmptyString = "" testCupOfCoffee = "1 cup coffee" @@ -92,8 +112,9 @@ var ( }, } + emptyFeatures = lnwire.NewFeatureVector(nil, lnwire.Features) + // Must be initialized in init(). - testPaymentHash [32]byte testDescriptionHash [32]byte ltcTestNetParams chaincfg.Params @@ -101,7 +122,6 @@ var ( ) func init() { - copy(testPaymentHash[:], testPaymentHashSlice[:]) copy(testDescriptionHash[:], testDescriptionHashSlice[:]) // Initialize litecoin testnet and mainnet params by applying key fields @@ -179,6 +199,7 @@ func TestDecodeEncode(t *testing.T) { Timestamp: time.Unix(1496314658, 0), DescriptionHash: &testDescriptionHash, Destination: testPubKey, + Features: emptyFeatures, } }, }, @@ -195,6 +216,7 @@ func TestDecodeEncode(t *testing.T) { Description: &testPleaseConsider, DescriptionHash: &testDescriptionHash, Destination: testPubKey, + Features: emptyFeatures, } }, }, @@ -209,6 +231,7 @@ func TestDecodeEncode(t *testing.T) { Timestamp: time.Unix(1496314658, 0), PaymentHash: &testPaymentHash, Destination: testPubKey, + Features: emptyFeatures, } }, }, @@ -224,6 +247,7 @@ func TestDecodeEncode(t *testing.T) { PaymentHash: &testPaymentHash, Description: &testPleaseConsider, Destination: testPubKey, + Features: emptyFeatures, } }, skipEncoding: true, // Skip encoding since we don't have the unknown fields to encode. @@ -240,6 +264,7 @@ func TestDecodeEncode(t *testing.T) { PaymentHash: &testPaymentHash, DescriptionHash: &testDescriptionHash, Destination: testPubKey, + Features: emptyFeatures, } }, skipEncoding: true, // Skip encoding since we don't have the unknown fields to encode. @@ -256,6 +281,7 @@ func TestDecodeEncode(t *testing.T) { PaymentHash: &testPaymentHash, Destination: testPubKey, DescriptionHash: &testDescriptionHash, + Features: emptyFeatures, } }, skipEncoding: true, // Skip encoding since we don't have the unknown fields to encode. @@ -271,6 +297,7 @@ func TestDecodeEncode(t *testing.T) { PaymentHash: &testPaymentHash, Description: &testCupOfCoffee, Destination: testPubKey, + Features: emptyFeatures, } }, beforeEncoding: func(i *Invoice) { @@ -291,6 +318,7 @@ func TestDecodeEncode(t *testing.T) { PaymentHash: &testPaymentHash, Description: &testPleaseConsider, Destination: testPubKey, + Features: emptyFeatures, } }, beforeEncoding: func(i *Invoice) { @@ -312,6 +340,7 @@ func TestDecodeEncode(t *testing.T) { PaymentHash: &testPaymentHash, Destination: testPubKey, Description: &testEmptyString, + Features: emptyFeatures, } }, }, @@ -371,6 +400,7 @@ func TestDecodeEncode(t *testing.T) { PaymentHash: &testPaymentHash, DescriptionHash: &testDescriptionHash, Destination: testPubKey, + Features: emptyFeatures, } }, beforeEncoding: func(i *Invoice) { @@ -393,6 +423,7 @@ func TestDecodeEncode(t *testing.T) { DescriptionHash: &testDescriptionHash, Destination: testPubKey, FallbackAddr: testAddrTestnet, + Features: emptyFeatures, } }, beforeEncoding: func(i *Invoice) { @@ -416,6 +447,7 @@ func TestDecodeEncode(t *testing.T) { Destination: testPubKey, FallbackAddr: testRustyAddr, RouteHints: [][]HopHint{testSingleHop}, + Features: emptyFeatures, } }, beforeEncoding: func(i *Invoice) { @@ -439,6 +471,7 @@ func TestDecodeEncode(t *testing.T) { Destination: testPubKey, FallbackAddr: testRustyAddr, RouteHints: [][]HopHint{testDoubleHop}, + Features: emptyFeatures, } }, beforeEncoding: func(i *Invoice) { @@ -461,6 +494,7 @@ func TestDecodeEncode(t *testing.T) { DescriptionHash: &testDescriptionHash, Destination: testPubKey, FallbackAddr: testAddrMainnetP2SH, + Features: emptyFeatures, } }, beforeEncoding: func(i *Invoice) { @@ -472,8 +506,8 @@ func TestDecodeEncode(t *testing.T) { }, { // On mainnet, please send $30 coffee beans supporting - // features 1 and 9. - encodedInvoice: "lnbc25m1pvjluezpp5qqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqypqdq5vdhkven9v5sxyetpdees9qzsze992adudgku8p05pstl6zh7av6rx2f297pv89gu5q93a0hf3g7lynl3xq56t23dpvah6u7y9qey9lccrdml3gaqwc6nxsl5ktzm464sq73t7cl", + // features 9, 15 and 99, using secret 0x11... + encodedInvoice: "lnbc25m1pvjluezpp5qqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqypqdq5vdhkven9v5sxyetpdeessp5zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zygs9q5sqqqqqqqqqqqqqqqpqsq67gye39hfg3zd8rgc80k32tvy9xk2xunwm5lzexnvpx6fd77en8qaq424dxgt56cag2dpt359k3ssyhetktkpqh24jqnjyw6uqd08sgptq44qu", valid: true, decodedInvoice: func() *Invoice { return &Invoice{ @@ -481,11 +515,12 @@ func TestDecodeEncode(t *testing.T) { MilliSat: &testMillisat25mBTC, Timestamp: time.Unix(1496314658, 0), PaymentHash: &testPaymentHash, + PaymentAddr: &specPaymentAddr, Description: &testCoffeeBeans, Destination: testPubKey, Features: lnwire.NewFeatureVector( - lnwire.NewRawFeatureVector(1, 9), - InvoiceFeatures, + lnwire.NewRawFeatureVector(9, 15, 99), + lnwire.Features, ), } }, @@ -498,21 +533,21 @@ func TestDecodeEncode(t *testing.T) { }, { // On mainnet, please send $30 coffee beans supporting - // features 1, 9, and 100. - encodedInvoice: "lnbc25m1pvjluezpp5qqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqypqdq5vdhkven9v5sxyetpdees9q4pqqqqqqqqqqqqqqqqqqszk3ed62snp73037h4py4gry05eltlp0uezm2w9ajnerhmxzhzhsu40g9mgyx5v3ad4aqwkmvyftzk4k9zenz90mhjcy9hcevc7r3lx2sphzfxz7", - valid: false, - skipEncoding: true, + // features 9, 15, 99, and 100, using secret 0x11... + encodedInvoice: "lnbc25m1pvjluezpp5qqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqypqdq5vdhkven9v5sxyetpdeessp5zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zygs9q4psqqqqqqqqqqqqqqqpqsqq40wa3khl49yue3zsgm26jrepqr2eghqlx86rttutve3ugd05em86nsefzh4pfurpd9ek9w2vp95zxqnfe2u7ckudyahsa52q66tgzcp6t2dyk", + valid: true, decodedInvoice: func() *Invoice { return &Invoice{ Net: &chaincfg.MainNetParams, MilliSat: &testMillisat25mBTC, Timestamp: time.Unix(1496314658, 0), PaymentHash: &testPaymentHash, + PaymentAddr: &specPaymentAddr, Description: &testCoffeeBeans, Destination: testPubKey, Features: lnwire.NewFeatureVector( - lnwire.NewRawFeatureVector(1, 9, 100), - InvoiceFeatures, + lnwire.NewRawFeatureVector(9, 15, 99, 100), + lnwire.Features, ), } }, @@ -536,6 +571,7 @@ func TestDecodeEncode(t *testing.T) { DescriptionHash: &testDescriptionHash, Destination: testPubKey, FallbackAddr: testAddrMainnetP2WPKH, + Features: emptyFeatures, } }, beforeEncoding: func(i *Invoice) { @@ -558,6 +594,7 @@ func TestDecodeEncode(t *testing.T) { DescriptionHash: &testDescriptionHash, Destination: testPubKey, FallbackAddr: testAddrMainnetP2WSH, + Features: emptyFeatures, } }, beforeEncoding: func(i *Invoice) { @@ -586,6 +623,25 @@ func TestDecodeEncode(t *testing.T) { return i }, }, + { + // Send 2500uBTC for a cup of coffee with a payment + // address. + encodedInvoice: "lnbc2500u1pvjluezpp5qqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqypqdq5xysxxatsyp3k7enxv4jsnp4q0n326hr8v9zprg8gsvezcch06gfaqqhde2aj730yg0durunfhv66sp5qszsvpcgpyqsyps8pqysqqgzqvyqjqqpqgpsgpgqqypqxpq9qcrsusq8nx2hdt3st3ankwz23xy9w7udvqq3f0mdlpc6ga5ew3y67u4qkx8vu72ejg5x6tqhyclm28r7r0mg6lx9x3vls9g6glp2qy3y34cpry54xp", + valid: true, + decodedInvoice: func() *Invoice { + i, _ := NewInvoice( + &chaincfg.MainNetParams, + testPaymentHash, + time.Unix(1496314658, 0), + Amount(testMillisat2500uBTC), + Description(testCupOfCoffee), + Destination(testPubKey), + PaymentAddr(testPaymentAddr), + ) + + return i + }, + }, { // Decode a mainnet invoice while expecting active net to be testnet encodedInvoice: "lnbc241pveeq09pp5qqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqqqsyqcyq5rqwzqfqypqdqqnp4q0n326hr8v9zprg8gsvezcch06gfaqqhde2aj730yg0durunfhv66jd3m5klcwhq68vdsmx2rjgxeay5v0tkt2v5sjaky4eqahe4fx3k9sqavvce3capfuwv8rvjng57jrtfajn5dkpqv8yelsewtljwmmycq62k443", @@ -598,6 +654,7 @@ func TestDecodeEncode(t *testing.T) { PaymentHash: &testPaymentHash, Destination: testPubKey, Description: &testEmptyString, + Features: emptyFeatures, } }, skipEncoding: true, // Skip encoding since we were given the wrong net @@ -615,6 +672,7 @@ func TestDecodeEncode(t *testing.T) { PaymentHash: &testPaymentHash, DescriptionHash: &testDescriptionHash, Destination: testPubKey, + Features: emptyFeatures, } }, }, @@ -630,6 +688,7 @@ func TestDecodeEncode(t *testing.T) { PaymentHash: &testPaymentHash, DescriptionHash: &testDescriptionHash, Destination: testPubKey, + Features: emptyFeatures, } }, }, @@ -650,7 +709,7 @@ func TestDecodeEncode(t *testing.T) { } if test.valid { - if err := compareInvoices(test.decodedInvoice(), invoice); err != nil { + if err := compareInvoices(decodedInvoice, invoice); err != nil { t.Errorf("Invoice decoding result %d not as expected: %v", i, err) return } @@ -843,6 +902,56 @@ func TestMaxInvoiceLength(t *testing.T) { } } +// TestInvoiceChecksumMalleability ensures that the malleability of the +// checksum in bech32 strings cannot cause a signature to become valid and +// therefore cause a wrong destination to be decoded for invoices where the +// destination is extracted from the signature. +func TestInvoiceChecksumMalleability(t *testing.T) { + privKeyHex := "a50f3bdf9b6c4b1fdd7c51a8bbf4b5855cf381f413545ed155c0282f4412a1b1" + privKeyBytes, _ := hex.DecodeString(privKeyHex) + chain := &chaincfg.SimNetParams + var payHash [32]byte + ts := time.Unix(0, 0) + + privKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), privKeyBytes) + msgSigner := MessageSigner{ + SignCompact: func(hash []byte) ([]byte, error) { + return btcec.SignCompact(btcec.S256(), privKey, hash, true) + }, + } + opts := []func(*Invoice){Description("test")} + invoice, err := NewInvoice(chain, payHash, ts, opts...) + if err != nil { + t.Fatal(err) + } + + encoded, err := invoice.Encode(msgSigner) + if err != nil { + t.Fatal(err) + } + + // Changing a bech32 string which checksum ends in "p" to "(q*)p" can + // cause the checksum to return as a valid bech32 string _but_ the + // signature field immediately preceding it would be mutaded. In rare + // cases (about 3%) it is still seen as a valid signature and public + // key recovery causes a different node than the originally intended + // one to be derived. + // + // We thus modify the checksum here and verify the invoice gets broken + // enough that it fails to decode. + if !strings.HasSuffix(encoded, "p") { + t.Logf("Invoice: %s", encoded) + t.Fatalf("Generated invoice checksum does not end in 'p'") + } + encoded = encoded[:len(encoded)-1] + "qp" + + _, err = Decode(encoded, chain) + if err == nil { + t.Fatalf("Did not get expected error when decoding invoice") + } + +} + func compareInvoices(expected, actual *Invoice) error { if !reflect.DeepEqual(expected.Net, actual.Net) { return fmt.Errorf("expected net %v, got %v", @@ -903,7 +1012,7 @@ func compareInvoices(expected, actual *Invoice) error { if !reflect.DeepEqual(expected.Features, actual.Features) { return fmt.Errorf("expected features %v, got %v", - expected.Features.RawFeatureVector, actual.Features.RawFeatureVector) + expected.Features, actual.Features) } return nil