Skip to content

Separately shut down the JSON-RPC subsystem in a DB transaction #1755

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Jul 26, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 2 additions & 4 deletions lightningd/jsonrpc.c
Original file line number Diff line number Diff line change
Expand Up @@ -447,7 +447,7 @@ static void parse_request(struct json_connection *jcon, const jsmntok_t tok[])

/* This is a convenient tal parent for duration of command
* (which may outlive the conn!). */
c = tal(jcon->ld, struct command);
c = tal(jcon->ld->rpc_listener, struct command);
c->jcon = jcon;
c->ld = jcon->ld;
c->pending = false;
Expand Down Expand Up @@ -650,8 +650,7 @@ void setup_jsonrpc(struct lightningd *ld, const char *rpc_filename)
err(1, "Listening on '%s'", rpc_filename);

log_debug(ld->log, "Listening on '%s'", rpc_filename);
/* Technically this is a leak, but there's only one */
notleak(io_new_listener(ld, fd, incoming_jcon_connected, ld));
ld->rpc_listener = io_new_listener(ld->rpc_filename, fd, incoming_jcon_connected, ld);
}

/**
Expand Down Expand Up @@ -790,4 +789,3 @@ bool json_tok_wtx(struct wallet_tx * tx, const char * buffer,
}
return true;
}

8 changes: 8 additions & 0 deletions lightningd/lightningd.c
Original file line number Diff line number Diff line change
Expand Up @@ -462,6 +462,14 @@ int main(int argc, char *argv[])
}

shutdown_subdaemons(ld);

/* Clean up the JSON-RPC. This needs to happen in a DB transaction since
* it might actually be touching the DB in some destructors, e.g.,
* unreserving UTXOs (see #1737) */
db_begin_transaction(ld->wallet->db);
tal_free(ld->rpc_listener);
db_commit_transaction(ld->wallet->db);

close(pid_fd);
remove(ld->pidfile);

Expand Down
7 changes: 7 additions & 0 deletions lightningd/lightningd.h
Original file line number Diff line number Diff line change
Expand Up @@ -78,8 +78,15 @@ struct lightningd {

/* Our config dir, and rpc file */
char *config_dir;

/* Location of the RPC socket. */
char *rpc_filename;

/* The listener for the RPC socket. Can be shut down separately from the
* rest of the daemon to allow a clean shutdown, which frees all pending
* cmds in a DB transaction. */
struct io_listener *rpc_listener;

/* Configuration file name */
char *config_filename;
/* Configuration settings. */
Expand Down
37 changes: 37 additions & 0 deletions tests/test_rpc.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
from fixtures import * # noqa: F401,F403

import os
import signal
import unittest

with open('config.vars') as configfile:
config = dict([(line.rstrip().split('=', 1)) for line in configfile])

DEVELOPER = os.getenv("DEVELOPER", config['DEVELOPER']) == "1"


@unittest.skipIf(not DEVELOPER, "needs --dev-disconnect")
def test_stop_pending_fundchannel(node_factory, executor):
"""Stop the daemon while waiting for an accept_channel

This used to crash the node, since we were calling unreserve_utxo while
freeing the daemon, but that needs a DB transaction to be open.

"""
l1 = node_factory.get_node()
l2 = node_factory.get_node()

l1.rpc.connect(l2.info['id'], 'localhost', l2.port)

# We want l2 to stop replying altogether, not disconnect
os.kill(l2.daemon.proc.pid, signal.SIGSTOP)

# The fundchannel call will not terminate so run it in a future
executor.submit(l1.fund_channel, l2, 10**6)
l1.daemon.wait_for_log('peer_out WIRE_OPEN_CHANNEL')

l1.rpc.stop()

# Now allow l2 a clean shutdown
os.kill(l2.daemon.proc.pid, signal.SIGCONT)
l2.rpc.stop()