diff --git a/.github/workflows/pypi.yml b/.github/workflows/pypi.yml index 3ab335134eea..7008297bb178 100644 --- a/.github/workflows/pypi.yml +++ b/.github/workflows/pypi.yml @@ -67,6 +67,13 @@ jobs: - name: Install uv uses: astral-sh/setup-uv@v5 + - name: Update pyln versions + id: update-versions + run: | + export VERSION=$(git describe --tags --abbrev=0) + echo "Pyln VERSION: $VERSION" + make update-pyln-versions NEW_VERSION=$VERSION + - name: Publish distribution 📦 to Test PyPI if: github.repository == 'ElementsProject/lightning' && steps.set-values.outputs.DISTLOCATION == 'test' env: @@ -84,10 +91,6 @@ jobs: WORKDIR: ${{ matrix.WORKDIR }} run: | echo "UV VERSION PUBLISH: $(uv --version)" - cd ${{ env.WORKDIR }} - export VERSION=$(git describe --tags --abbrev=0) echo "Pyln VERSION: $VERSION" - make update-pyln-versions NEW_VERSION=$VERSION - cd /github/workspace uv build --package ${{ matrix.PACKAGE }} uv publish diff --git a/.version b/.version index 739af04dcf62..381fa89eff41 100644 --- a/.version +++ b/.version @@ -1 +1 @@ -25.09.1 +25.09.2 diff --git a/CHANGELOG.md b/CHANGELOG.md index 92a2bd55631a..b463403274bf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,43 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). + +## [25.09.2] - 2025-11-04: "Hot Wallet Guardian III" + +`Bookkeeper` and `xpay` users: please upgrade! +This point release includes fixes for `xpay`, `bookkeeper` and optimizations for large nodes using `bookkeeper`. + +### Changed + + - plugins: the sql plugin now keeps an index on `channelmoves` by `payment_hash`. ([#8618]) + - plugins: `bookkeeper` reduced logging for large imports to increase speed. ([#8657]) + - plugins: `sql` initial load for tables is much faster (e.g 82 to 17 seconds for very large channelmoves table). ([#8657]) + +### Fixed + + - Core lightning builds for Ubuntu Focal, Jammy and Noble are deterministic again. ([#8547]) + - Reproducible build for Ubuntu noble by updating sqlite3 version and shasums. ([#8551]) + - plugins: bookkeeper first invocation after migration from prior to 25.09 with very large databases will not crash. ([#8618]) + - `xpay` would sometimes leave payment parts status `pending` in failure cases (as seen in listpays or listsendpays). ([#8635]) + - Plugins: `askrene` could enter an infinite loop when maxparts is restricted. ([#8636]) + - plugins: `bcli` would fail with "Argument list too long" when sending a giant tx. ([#8639]) + - JSON-RPC: Dealing with giant PSBTs (700 inputs!) is now much faster. ([#8639]) + - plugins: assertion crash in bookkeeper when fresh records arrive while multiple queries in progress. ([#8642]) + - Plugins: `bookkeeper` now correctly restores chain event blockheights it has derived. ([#8649]) + +[#8529]: https://github.com/ElementsProject/lightning/pull/8529 +[#8547]: https://github.com/ElementsProject/lightning/pull/8547 +[#8551]: https://github.com/ElementsProject/lightning/pull/8551 +[#8607]: https://github.com/ElementsProject/lightning/pull/8607 +[#8618]: https://github.com/ElementsProject/lightning/pull/8618 +[#8635]: https://github.com/ElementsProject/lightning/pull/8635 +[#8636]: https://github.com/ElementsProject/lightning/pull/8636 +[#8639]: https://github.com/ElementsProject/lightning/pull/8639 +[#8642]: https://github.com/ElementsProject/lightning/pull/8642 +[#8649]: https://github.com/ElementsProject/lightning/pull/8649 +[#8657]: https://github.com/ElementsProject/lightning/pull/8657 +[25.09.2]: https://github.com/ElementsProject/lightning/releases/tag/v25.09.2 + ## [25.09.1] - 2025-10-15: "Hot Wallet Guardian II" Several important fixes, please upgrade! diff --git a/Makefile b/Makefile index 1fd14cca9e3c..7c0797fab26b 100644 --- a/Makefile +++ b/Makefile @@ -354,7 +354,7 @@ RUST_TARGET_DIR = target/$(TARGET)/$(RUST_PROFILE) endif ifneq ($(RUST_PROFILE),debug) -CARGO_OPTS := --profile=$(RUST_PROFILE) --quiet +CARGO_OPTS := --profile=$(RUST_PROFILE) --locked --quiet else CARGO_OPTS := --quiet endif diff --git a/common/setup.c b/common/setup.c index 56ff6828473a..8db367eec9d6 100644 --- a/common/setup.c +++ b/common/setup.c @@ -11,7 +11,6 @@ static void *cln_wally_tal(size_t size) { assert(wally_tal_ctx); - assert(tal_check(wally_tal_ctx, "cln_wally_tal ctx check")); return tal_arr_label(wally_tal_ctx, u8, size, "cln_wally_tal"); } diff --git a/contrib/pyln-client/pyln/client/__init__.py b/contrib/pyln-client/pyln/client/__init__.py index 1e880b79f450..60bab464b961 100644 --- a/contrib/pyln-client/pyln/client/__init__.py +++ b/contrib/pyln-client/pyln/client/__init__.py @@ -4,7 +4,7 @@ from .gossmapstats import GossmapStats from .version import NodeVersion -__version__ = "25.09.1" +__version__ = "25.09.2" __all__ = [ "LightningRpc", diff --git a/contrib/pyln-client/pyproject.toml b/contrib/pyln-client/pyproject.toml index 3a45091bd663..bfd55a2ec35b 100644 --- a/contrib/pyln-client/pyproject.toml +++ b/contrib/pyln-client/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "pyln-client" -version = "25.09.1" +version = "25.09.2" description = "Client library and plugin library for Core Lightning" authors = [{ name = "Christian Decker", email = "decker.christian@gmail.com" }] license = { text = "BSD-MIT" } diff --git a/contrib/pyln-proto/pyln/proto/__init__.py b/contrib/pyln-proto/pyln/proto/__init__.py index 7a5462ef8525..8a40865fd0eb 100644 --- a/contrib/pyln-proto/pyln/proto/__init__.py +++ b/contrib/pyln-proto/pyln/proto/__init__.py @@ -4,7 +4,7 @@ from .onion import OnionPayload, TlvPayload, LegacyOnionPayload from .wire import LightningConnection, LightningServerSocket -__version__ = "25.09.1" +__version__ = "25.09.2" __all__ = [ "Invoice", diff --git a/contrib/pyln-proto/pyproject.toml b/contrib/pyln-proto/pyproject.toml index 07761dfc62c6..5c3125ad1169 100644 --- a/contrib/pyln-proto/pyproject.toml +++ b/contrib/pyln-proto/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "pyln-proto" -version = "25.09.1" +version = "25.09.2" description = "This package implements some of the Lightning Network protocol in pure python. It is intended for protocol testing and some minor tooling only. It is not deemed secure enough to handle any amount of real funds (you have been warned!)." authors = [ {name = "Christian Decker", email = "decker.christian@gmail.com"} diff --git a/contrib/pyln-testing/pyln/testing/__init__.py b/contrib/pyln-testing/pyln/testing/__init__.py index ebfcdf4e46f2..c63ff7e7c3ff 100644 --- a/contrib/pyln-testing/pyln/testing/__init__.py +++ b/contrib/pyln-testing/pyln/testing/__init__.py @@ -1,4 +1,4 @@ -__version__ = "25.09.1" +__version__ = "25.09.2" __all__ = [ "__version__", diff --git a/contrib/pyln-testing/pyproject.toml b/contrib/pyln-testing/pyproject.toml index 7852c091c4a7..9c3bcc5c5e02 100644 --- a/contrib/pyln-testing/pyproject.toml +++ b/contrib/pyln-testing/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "pyln-testing" -version = "25.09.1" +version = "25.09.2" description = "Test your Core Lightning integration, plugins or whatever you want" authors = [{ name = "Christian Decker", email = "decker.christian@gmail.com" }] license = { text = "BSD-MIT" } diff --git a/contrib/reprobuild/Dockerfile.focal b/contrib/reprobuild/Dockerfile.focal index e61524d40187..4d960aa6906b 100644 --- a/contrib/reprobuild/Dockerfile.focal +++ b/contrib/reprobuild/Dockerfile.focal @@ -2,6 +2,7 @@ FROM focal ENV TZ=UTC RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone +ENV SOURCE_DATE_EPOCH=1672531200 ENV RUST_PROFILE=release ENV PATH=/root/.pyenv/shims:/root/.pyenv/bin:/root/.cargo/bin:/root/.local/bin:$PATH ENV PROTOC_VERSION=29.4 diff --git a/contrib/reprobuild/Dockerfile.jammy b/contrib/reprobuild/Dockerfile.jammy index b363bc3b2f83..3f156a6f6658 100644 --- a/contrib/reprobuild/Dockerfile.jammy +++ b/contrib/reprobuild/Dockerfile.jammy @@ -2,6 +2,7 @@ FROM jammy ENV TZ=UTC RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone +ENV SOURCE_DATE_EPOCH=1672531200 ENV RUST_PROFILE=release ENV PATH=/root/.pyenv/shims:/root/.pyenv/bin:/root/.cargo/bin:/root/.local/bin:$PATH ENV PROTOC_VERSION=29.4 diff --git a/contrib/reprobuild/Dockerfile.noble b/contrib/reprobuild/Dockerfile.noble index f9c4506b312c..a630596bd765 100644 --- a/contrib/reprobuild/Dockerfile.noble +++ b/contrib/reprobuild/Dockerfile.noble @@ -2,6 +2,7 @@ FROM ubuntu:noble ENV TZ=UTC RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone +ENV SOURCE_DATE_EPOCH=1672531200 ENV RUST_PROFILE=release ENV PATH=/root/.pyenv/shims:/root/.pyenv/bin:/root/.cargo/bin:/root/.local/bin:$PATH ENV PROTOC_VERSION=29.4 diff --git a/doc/lightningd-config.5.md b/doc/lightningd-config.5.md index 7a6d9fcd8f31..4a1a2ced51cf 100644 --- a/doc/lightningd-config.5.md +++ b/doc/lightningd-config.5.md @@ -538,6 +538,10 @@ command, so they invoices can also be paid onchain. Setting this makes `xpay` wait until all parts have failed/succeeded before returning. Usually this is unnecessary, as xpay will return on the first success (we have the preimage, if they don't take all the parts that's their problem) or failure (the destination could succeed another part, but it would mean it was only partially paid). The default is `false`. +* **askrene-timeout**=*SECONDS* [plugin `askrene`, *dynamic*] + + This option makes the `getroutes` call fail if it takes more than this many seconds. Setting it to zero is a fun way to ensure your node never makes payments. + ### Networking options Note that for simple setups, the implicit *autolisten* option does the diff --git a/lightningd/pay.c b/lightningd/pay.c index bd85e732d597..6c1e32c067da 100644 --- a/lightningd/pay.c +++ b/lightningd/pay.c @@ -2080,19 +2080,11 @@ static struct command_result *json_injectpaymentonion(struct command *cmd, if (command_check_only(cmd)) return command_check_done(cmd); - register_payment_and_waiter(cmd, - payment_hash, - *partid, *groupid, - *destination_msat, *msat, AMOUNT_MSAT(0), - label, invstring, local_invreq_id, - &shared_secret, - destination); - - /* If unknown, we set this equal (so accounting logs 0 fees) */ - if (amount_msat_eq(*destination_msat, AMOUNT_MSAT(0))) - *destination_msat = *msat; failmsg = send_htlc_out(tmpctx, next, *msat, - *cltv, *destination_msat, + *cltv, + /* If unknown, we set this equal (so accounting logs 0 fees) */ + amount_msat_eq(*destination_msat, AMOUNT_MSAT(0)) + ? *msat : *destination_msat, payment_hash, next_path_key, NULL, *partid, *groupid, serialize_onionpacket(tmpctx, rs->next), @@ -2102,6 +2094,16 @@ static struct command_result *json_injectpaymentonion(struct command *cmd, "Could not send to first peer: %s", onion_wire_name(fromwire_peektype(failmsg))); } + + /* Now HTLC is created, we can add the payment as pending */ + register_payment_and_waiter(cmd, + payment_hash, + *partid, *groupid, + *destination_msat, *msat, AMOUNT_MSAT(0), + label, invstring, local_invreq_id, + &shared_secret, + destination); + return command_still_pending(cmd); } diff --git a/plugins/askrene/askrene.c b/plugins/askrene/askrene.c index 9664d2487d7f..a683c0968a9a 100644 --- a/plugins/askrene/askrene.c +++ b/plugins/askrene/askrene.c @@ -613,13 +613,15 @@ static struct command_result *do_getroutes(struct command *cmd, /* Compute the routes. At this point we might select between multiple * algorithms. Right now there is only one algorithm available. */ struct timemono time_start = time_mono(); + struct timemono deadline = timemono_add(time_start, + time_from_sec(askrene->route_seconds)); if (info->dev_algo == ALGO_SINGLE_PATH) { - err = single_path_routes(rq, rq, srcnode, dstnode, info->amount, + err = single_path_routes(rq, rq, deadline, srcnode, dstnode, info->amount, info->maxfee, info->finalcltv, info->maxdelay, &flows, &probability); } else { assert(info->dev_algo == ALGO_DEFAULT); - err = default_routes(rq, rq, srcnode, dstnode, info->amount, + err = default_routes(rq, rq, deadline, srcnode, dstnode, info->amount, info->maxfee, info->finalcltv, info->maxdelay, &flows, &probability); } @@ -1301,7 +1303,8 @@ static const char *init(struct command *init_cmd, const char *buf UNUSED, const jsmntok_t *config UNUSED) { struct plugin *plugin = init_cmd->plugin; - struct askrene *askrene = tal(plugin, struct askrene); + struct askrene *askrene = get_askrene(plugin); + askrene->plugin = plugin; list_head_init(&askrene->layers); askrene->reserved = new_reserve_htable(askrene); @@ -1327,7 +1330,18 @@ static const char *init(struct command *init_cmd, int main(int argc, char *argv[]) { + struct askrene *askrene; setup_locale(); - plugin_main(argv, init, NULL, PLUGIN_RESTARTABLE, true, NULL, commands, ARRAY_SIZE(commands), - NULL, 0, NULL, 0, NULL, 0, NULL); + + askrene = tal(NULL, struct askrene); + askrene->route_seconds = 10; + plugin_main(argv, init, take(askrene), PLUGIN_RESTARTABLE, true, NULL, commands, ARRAY_SIZE(commands), + NULL, 0, NULL, 0, NULL, 0, + plugin_option_dynamic("askrene-timeout", + "int", + "How many seconds to try before giving up on calculating a route." + " Defaults to 10 seconds", + u32_option, u32_jsonfmt, + &askrene->route_seconds), + NULL); } diff --git a/plugins/askrene/askrene.h b/plugins/askrene/askrene.h index 21805e64ff5b..a9a809737329 100644 --- a/plugins/askrene/askrene.h +++ b/plugins/askrene/askrene.h @@ -34,6 +34,8 @@ struct askrene { struct node_id my_id; /* Aux command for layer */ struct command *layer_cmd; + /* How long before we abort trying to find a route? */ + u32 route_seconds; }; /* Information for a single route query. */ diff --git a/plugins/askrene/mcf.c b/plugins/askrene/mcf.c index 4614fb49c5d9..1a488f1ddec6 100644 --- a/plugins/askrene/mcf.c +++ b/plugins/askrene/mcf.c @@ -1345,6 +1345,7 @@ static bool check_htlc_max_limits(struct route_query *rq, struct flow **flows) */ static const char * linear_routes(const tal_t *ctx, struct route_query *rq, + struct timemono deadline, const struct gossmap_node *srcnode, const struct gossmap_node *dstnode, struct amount_msat amount, struct amount_msat maxfee, u32 finalcltv, u32 maxdelay, @@ -1376,6 +1377,14 @@ linear_routes(const tal_t *ctx, struct route_query *rq, while (!amount_msat_is_zero(amount_to_deliver)) { size_t num_parts, parts_slots, excess_parts; + u32 bottleneck_idx; + + if (timemono_after(time_mono(), deadline)) { + error_message = rq_log(ctx, rq, LOG_BROKEN, + "%s: timed out after deadline", + __func__); + goto fail; + } /* FIXME: This algorithm to limit the number of parts is dumb * for two reasons: @@ -1423,7 +1432,7 @@ linear_routes(const tal_t *ctx, struct route_query *rq, } error_message = - refine_flows(ctx, rq, amount_to_deliver, &new_flows); + refine_flows(ctx, rq, amount_to_deliver, &new_flows, &bottleneck_idx); if (error_message) goto fail; @@ -1458,14 +1467,19 @@ linear_routes(const tal_t *ctx, struct route_query *rq, excess_parts = 1; } else excess_parts = 0; - if (excess_parts > 0 && - !remove_flows(&new_flows, excess_parts)) { - error_message = rq_log(ctx, rq, LOG_BROKEN, - "%s: failed to remove %zu" - " flows from a set of %zu", - __func__, excess_parts, - tal_count(new_flows)); - goto fail; + if (excess_parts > 0) { + /* If we removed all the flows we found, avoid selecting them again, + * by disabling one. */ + if (excess_parts == tal_count(new_flows)) + bitmap_set_bit(rq->disabled_chans, bottleneck_idx); + if (!remove_flows(&new_flows, excess_parts)) { + error_message = rq_log(ctx, rq, LOG_BROKEN, + "%s: failed to remove %zu" + " flows from a set of %zu", + __func__, excess_parts, + tal_count(new_flows)); + goto fail; + } } /* Is this set of flows too expensive? @@ -1634,17 +1648,19 @@ linear_routes(const tal_t *ctx, struct route_query *rq, } const char *default_routes(const tal_t *ctx, struct route_query *rq, + struct timemono deadline, const struct gossmap_node *srcnode, const struct gossmap_node *dstnode, struct amount_msat amount, struct amount_msat maxfee, u32 finalcltv, u32 maxdelay, struct flow ***flows, double *probability) { - return linear_routes(ctx, rq, srcnode, dstnode, amount, maxfee, + return linear_routes(ctx, rq, deadline, srcnode, dstnode, amount, maxfee, finalcltv, maxdelay, flows, probability, minflow); } const char *single_path_routes(const tal_t *ctx, struct route_query *rq, + struct timemono deadline, const struct gossmap_node *srcnode, const struct gossmap_node *dstnode, struct amount_msat amount, @@ -1652,7 +1668,7 @@ const char *single_path_routes(const tal_t *ctx, struct route_query *rq, u32 maxdelay, struct flow ***flows, double *probability) { - return linear_routes(ctx, rq, srcnode, dstnode, amount, maxfee, + return linear_routes(ctx, rq, deadline, srcnode, dstnode, amount, maxfee, finalcltv, maxdelay, flows, probability, single_path_flow); } diff --git a/plugins/askrene/mcf.h b/plugins/askrene/mcf.h index 448aee27a40c..7d60159063d6 100644 --- a/plugins/askrene/mcf.h +++ b/plugins/askrene/mcf.h @@ -64,6 +64,7 @@ struct amount_msat linear_flow_cost(const struct flow *flow, /* A wrapper to the min. cost flow solver that actually takes into consideration * the extra msats per channel needed to pay for fees. */ const char *default_routes(const tal_t *ctx, struct route_query *rq, + struct timemono deadline, const struct gossmap_node *srcnode, const struct gossmap_node *dstnode, struct amount_msat amount, @@ -73,6 +74,7 @@ const char *default_routes(const tal_t *ctx, struct route_query *rq, /* A wrapper to the single-path constrained solver. */ const char *single_path_routes(const tal_t *ctx, struct route_query *rq, + struct timemono deadline, const struct gossmap_node *srcnode, const struct gossmap_node *dstnode, struct amount_msat amount, diff --git a/plugins/askrene/refine.c b/plugins/askrene/refine.c index 5335a9dfaac9..00ee4f75ced9 100644 --- a/plugins/askrene/refine.c +++ b/plugins/askrene/refine.c @@ -235,16 +235,25 @@ static int revcmp_flows(const size_t *a, const size_t *b, struct flow **flows) // -> check that htlc_max are all satisfied // -> check that (x+1) at least one htlc_max is violated /* Given the channel constraints, return the maximum amount that can be - * delivered. */ -static struct amount_msat path_max_deliverable(struct channel_data *path) + * delivered. Sets *bottleneck_idx to one of the contraining channels' idx, if non-NULL */ +static struct amount_msat path_max_deliverable(struct channel_data *path, + u32 *bottleneck_idx) { struct amount_msat deliver = AMOUNT_MSAT(-1); for (size_t i = 0; i < tal_count(path); i++) { deliver = amount_msat_sub_fee(deliver, path[i].fee_base_msat, path[i].fee_proportional_millionths); - deliver = amount_msat_min(deliver, path[i].htlc_max); - deliver = amount_msat_min(deliver, path[i].liquidity_max); + if (amount_msat_greater(deliver, path[i].htlc_max)) { + if (bottleneck_idx) + *bottleneck_idx = path[i].idx; + deliver = path[i].htlc_max; + } + if (amount_msat_greater(deliver, path[i].liquidity_max)) { + if (bottleneck_idx) + *bottleneck_idx = path[i].idx; + deliver = path[i].liquidity_max; + } } return deliver; } @@ -477,9 +486,9 @@ static void write_selected_flows(const tal_t *ctx, size_t *flows_index, tal_free(tmp_flows); } -/* FIXME: on failure return error message */ const char *refine_flows(const tal_t *ctx, struct route_query *rq, - struct amount_msat deliver, struct flow ***flows) + struct amount_msat deliver, struct flow ***flows, + u32 *bottleneck_idx) { const tal_t *working_ctx = tal(ctx, tal_t); const char *error_message = NULL; @@ -499,7 +508,7 @@ const char *refine_flows(const tal_t *ctx, struct route_query *rq, for (size_t i = 0; i < tal_count(channel_mpp_cache); i++) { // FIXME: does path_max_deliverable work for a single // channel with 0 fees? - max_deliverable[i] = path_max_deliverable(channel_mpp_cache[i]); + max_deliverable[i] = path_max_deliverable(channel_mpp_cache[i], bottleneck_idx); min_deliverable[i] = path_min_deliverable(channel_mpp_cache[i]); /* We use an array of indexes to keep track of the order * of the flows. Likewise flows can be removed by simply @@ -578,7 +587,7 @@ void squash_flows(const tal_t *ctx, struct route_query *rq, struct short_channel_id_dir scidd; flows_index[i] = i; paths_str[i] = tal_strdup(working_ctx, ""); - max_deliverable[i] = path_max_deliverable(channel_mpp_cache[i]); + max_deliverable[i] = path_max_deliverable(channel_mpp_cache[i], NULL); for (size_t j = 0; j < tal_count(flow->path); j++) { scidd.scid = diff --git a/plugins/askrene/refine.h b/plugins/askrene/refine.h index c0d60109ae8d..fcd64be21be6 100644 --- a/plugins/askrene/refine.h +++ b/plugins/askrene/refine.h @@ -22,9 +22,13 @@ bool create_flow_reservations_verify(const struct route_query *rq, const struct flow *flow); /* Modify flows to meet HTLC min/max requirements. - * It takes into account the exact value of the fees expected at each hop. */ + * It takes into account the exact value of the fees expected at each hop. + * If we reduce flows because it's too large for one channel, *bottleneck_idx + * is set to the idx of a channel which caused a reduction (if non-NULL). + */ const char *refine_flows(const tal_t *ctx, struct route_query *rq, - struct amount_msat deliver, struct flow ***flows); + struct amount_msat deliver, struct flow ***flows, + u32 *bottleneck_idx); /* Duplicated flows are merged into one. This saves in base fee and HTLC fees. */ diff --git a/plugins/bcli.c b/plugins/bcli.c index 9f17282fdc4b..c9d6187c9608 100644 --- a/plugins/bcli.c +++ b/plugins/bcli.c @@ -76,6 +76,7 @@ struct bitcoin_cli { int *exitstatus; pid_t pid; const char **args; + const char **stdinargs; struct timeabs start; enum bitcoind_prio prio; char *output; @@ -95,7 +96,8 @@ static void add_arg(const char ***args, const char *arg TAKES) tal_arr_expand(args, arg); } -static const char **gather_argsv(const tal_t *ctx, const char *cmd, va_list ap) +/* If stdinargs is non-NULL, that is where we put additional args */ +static const char **gather_argsv(const tal_t *ctx, const char ***stdinargs, const char *cmd, va_list ap) { const char **args = tal_arr(ctx, const char *, 1); const char *arg; @@ -128,23 +130,30 @@ static const char **gather_argsv(const tal_t *ctx, const char *cmd, va_list ap) // `-rpcpassword` argument - secrets in arguments can leak when listing // system processes. add_arg(&args, "-stdinrpcpass"); + /* To avoid giant command lines, we use -stdin (avail since bitcoin 0.13) */ + if (stdinargs) + add_arg(&args, "-stdin"); add_arg(&args, cmd); - while ((arg = va_arg(ap, char *)) != NULL) - add_arg(&args, arg); + while ((arg = va_arg(ap, char *)) != NULL) { + if (stdinargs) + add_arg(stdinargs, arg); + else + add_arg(&args, arg); + } add_arg(&args, NULL); return args; } static LAST_ARG_NULL const char ** -gather_args(const tal_t *ctx, const char *cmd, ...) +gather_args(const tal_t *ctx, const char ***stdinargs, const char *cmd, ...) { va_list ap; const char **ret; va_start(ap, cmd); - ret = gather_argsv(ctx, cmd, ap); + ret = gather_argsv(ctx, stdinargs, cmd, ap); va_end(ap); return ret; @@ -170,7 +179,7 @@ static struct io_plan *output_init(struct io_conn *conn, struct bitcoin_cli *bcl static void next_bcli(enum bitcoind_prio prio); /* For printing: simple string of args (no secrets!) */ -static char *args_string(const tal_t *ctx, const char **args) +static char *args_string(const tal_t *ctx, const char **args, const char **stdinargs) { size_t i; char *ret = tal_strdup(ctx, args[0]); @@ -185,12 +194,16 @@ static char *args_string(const tal_t *ctx, const char **args) ret = tal_strcat(ctx, take(ret), args[i]); } } + for (i = 0; i < tal_count(stdinargs); i++) { + ret = tal_strcat(ctx, take(ret), " "); + ret = tal_strcat(ctx, take(ret), stdinargs[i]); + } return ret; } static char *bcli_args(const tal_t *ctx, struct bitcoin_cli *bcli) { - return args_string(ctx, bcli->args); + return args_string(ctx, bcli->args, bcli->stdinargs); } /* Only set as destructor once bcli is in current. */ @@ -313,9 +326,14 @@ static void next_bcli(enum bitcoind_prio prio) bcli->args[0], strerror(errno)); - if (bitcoind->rpcpass) + if (bitcoind->rpcpass) { write_all(in, bitcoind->rpcpass, strlen(bitcoind->rpcpass)); - + write_all(in, "\n", strlen("\n")); + } + for (size_t i = 0; i < tal_count(bcli->stdinargs); i++) { + write_all(in, bcli->stdinargs[i], strlen(bcli->stdinargs[i])); + write_all(in, "\n", strlen("\n")); + } close(in); bcli->start = time_now(); @@ -351,7 +369,8 @@ start_bitcoin_cliv(const tal_t *ctx, else bcli->exitstatus = NULL; - bcli->args = gather_argsv(bcli, method, ap); + bcli->stdinargs = tal_arr(bcli, const char *, 0); + bcli->args = gather_argsv(bcli, &bcli->stdinargs, method, ap); bcli->stash = stash; list_add_tail(&bitcoind->pending[bcli->prio], &bcli->list); @@ -994,14 +1013,14 @@ static struct command_result *getutxout(struct command *cmd, static void bitcoind_failure(struct plugin *p, const char *error_message) { - const char **cmd = gather_args(bitcoind, "echo", NULL); + const char **cmd = gather_args(bitcoind, NULL, "echo", NULL); plugin_err(p, "\n%s\n\n" "Make sure you have bitcoind running and that bitcoin-cli" " is able to connect to bitcoind.\n\n" "You can verify that your Bitcoin Core installation is" " ready for use by running:\n\n" " $ %s 'hello world'\n", error_message, - args_string(cmd, cmd)); + args_string(cmd, cmd, NULL)); } /* Do some sanity checks on bitcoind based on the output of `getnetworkinfo`. */ @@ -1016,7 +1035,7 @@ static void parse_getnetworkinfo_result(struct plugin *p, const char *buf) if (!result) plugin_err(p, "Invalid response to '%s': '%s'. Can not " "continue without proceeding to sanity checks.", - args_string(tmpctx, gather_args(bitcoind, "getnetworkinfo", NULL)), + args_string(tmpctx, gather_args(bitcoind, NULL, "getnetworkinfo", NULL), NULL), buf); /* Check that we have a fully-featured `estimatesmartfee`. */ @@ -1046,7 +1065,7 @@ static void wait_and_check_bitcoind(struct plugin *p) { int in, from, status, ret; pid_t child; - const char **cmd = gather_args(bitcoind, "getnetworkinfo", NULL); + const char **cmd = gather_args(bitcoind, NULL, "getnetworkinfo", NULL); bool printed = false; char *output = NULL; diff --git a/plugins/bkpr/blockheights.c b/plugins/bkpr/blockheights.c index 094c8489666c..e483a0662dd8 100644 --- a/plugins/bkpr/blockheights.c +++ b/plugins/bkpr/blockheights.c @@ -140,7 +140,7 @@ struct blockheights *init_blockheights(const tal_t *ctx, if (keytok->size != 3) goto weird; - if (!json_to_txid(buf, keytok + 2, &txid)) + if (!json_to_txid(buf, keytok + 3, &txid)) goto weird; if (!json_hex_to_be32(buf, hextok, &be_blockheight)) goto weird; diff --git a/plugins/bkpr/bookkeeper.c b/plugins/bkpr/bookkeeper.c index 1312924de600..3ea4e6f8ff66 100644 --- a/plugins/bkpr/bookkeeper.c +++ b/plugins/bkpr/bookkeeper.c @@ -53,6 +53,10 @@ static struct refresh_info *use_rinfo(struct refresh_info *rinfo) return rinfo; } +/* Recursion */ +static struct command_result *limited_listchannelmoves(struct command *cmd, + struct refresh_info *rinfo); + static struct command_result *rinfo_one_done(struct command *cmd, struct refresh_info *rinfo) { @@ -82,7 +86,8 @@ static void parse_and_log_channel_move(struct command *cmd, const char *buf, const jsmntok_t *channelmove, - struct refresh_info *rinfo); + struct refresh_info *rinfo, + bool log); static struct command_result *datastore_done(struct command *cmd, const char *method, @@ -115,24 +120,52 @@ static struct command_result *listchannelmoves_done(struct command *cmd, be64 be_index; moves = json_get_member(buf, result, "channelmoves"); + if (moves->size > 2) { + plugin_log(cmd->plugin, LOG_DBG, + "%u channelmoves, only logging first and last", + moves->size); + } + json_for_each_arr(i, t, moves) - parse_and_log_channel_move(cmd, buf, t, rinfo); + parse_and_log_channel_move(cmd, buf, t, rinfo, + i == 0 || i == moves->size - 1); be_index = cpu_to_be64(bkpr->channelmoves_index); jsonrpc_set_datastore_binary(cmd, "bookkeeper/channelmoves_index", &be_index, sizeof(be_index), "create-or-replace", datastore_done, NULL, use_rinfo(rinfo)); + + /* If there might be more, try asking for more */ + if (moves->size != 0) + limited_listchannelmoves(cmd, rinfo); + return rinfo_one_done(cmd, rinfo); } +/* We do 1000 at a time to avoid overwhelming lightningd */ +static struct command_result *limited_listchannelmoves(struct command *cmd, + struct refresh_info *rinfo) +{ + struct bkpr *bkpr = bkpr_of(cmd->plugin); + struct out_req *req; + + req = jsonrpc_request_start(cmd, "listchannelmoves", + listchannelmoves_done, + plugin_broken_cb, + use_rinfo(rinfo)); + json_add_string(req->js, "index", "created"); + json_add_u64(req->js, "start", bkpr->channelmoves_index + 1); + json_add_u64(req->js, "limit", 1000); + return send_outreq(req); +} + static struct command_result *listchainmoves_done(struct command *cmd, const char *method, const char *buf, const jsmntok_t *result, struct refresh_info *rinfo) { - struct out_req *req; const jsmntok_t *moves, *t; size_t i; struct bkpr *bkpr = bkpr_of(cmd->plugin); @@ -148,13 +181,7 @@ static struct command_result *listchainmoves_done(struct command *cmd, "create-or-replace", datastore_done, NULL, use_rinfo(rinfo)); - req = jsonrpc_request_start(cmd, "listchannelmoves", - listchannelmoves_done, - plugin_broken_cb, - use_rinfo(rinfo)); - json_add_string(req->js, "index", "created"); - json_add_u64(req->js, "start", bkpr->channelmoves_index + 1); - send_outreq(req); + limited_listchannelmoves(cmd, rinfo); return rinfo_one_done(cmd, rinfo); } @@ -1202,8 +1229,10 @@ parse_and_log_chain_move(struct command *cmd, if (e->origin_acct) find_or_create_account(cmd, bkpr, e->origin_acct); - /* Make this visible for queries (we expect increasing!) */ - assert(e->db_id > bkpr->chainmoves_index); + /* Make this visible for queries (we expect increasing!). If we raced, this is not true. */ + if (e->db_id <= bkpr->chainmoves_index) + return; + bkpr->chainmoves_index = e->db_id; /* This event *might* have implications for account; @@ -1255,7 +1284,8 @@ static void parse_and_log_channel_move(struct command *cmd, const char *buf, const jsmntok_t *channelmove, - struct refresh_info *rinfo) + struct refresh_info *rinfo, + bool log) { struct channel_event *e = tal(cmd, struct channel_event); struct account *acct; @@ -1302,11 +1332,12 @@ parse_and_log_channel_move(struct command *cmd, err = tal_free(err); } - plugin_log(cmd->plugin, LOG_DBG, "coin_move 2 (%s) %s -%s %s %"PRIu64, - e->tag, - fmt_amount_msat(tmpctx, e->credit), - fmt_amount_msat(tmpctx, e->debit), - CHANNEL_MOVE, e->timestamp); + if (log) + plugin_log(cmd->plugin, LOG_DBG, "coin_move 2 (%s) %s -%s %s %"PRIu64, + e->tag, + fmt_amount_msat(tmpctx, e->credit), + fmt_amount_msat(tmpctx, e->debit), + CHANNEL_MOVE, e->timestamp); /* Go find the account for this event */ acct = find_account(bkpr, acct_name); @@ -1316,8 +1347,9 @@ parse_and_log_channel_move(struct command *cmd, " but no account exists %s", acct_name); - /* Make this visible for queries (we expect increasing!) */ - assert(e->db_id > bkpr->channelmoves_index); + /* Make this visible for queries (we expect increasing!). If we raced, this is not true. */ + if (e->db_id <= bkpr->channelmoves_index) + return; bkpr->channelmoves_index = e->db_id; /* Check for invoice desc data, necessary */ diff --git a/plugins/libplugin.c b/plugins/libplugin.c index 36a74d9f8134..167d13621099 100644 --- a/plugins/libplugin.c +++ b/plugins/libplugin.c @@ -290,17 +290,6 @@ static void ld_rpc_send(struct plugin *plugin, struct json_stream *stream) io_wake(plugin->io_rpc_conn); } - -/* When cmd for request is gone, we use this as noop callback */ -static struct command_result *ignore_cb(struct command *command, - const char *method, - const char *buf, - const jsmntok_t *result, - void *arg) -{ - return &complete; -} - /* Ignore the result, and terminate the timer/aux/hook */ struct command_result *ignore_and_complete(struct command *cmd, const char *method, @@ -357,14 +346,6 @@ struct command_result *plugin_broken_cb(struct command *cmd, json_tok_full(buf, result)); } -static void disable_request_cb(struct command *cmd, struct out_req *out) -{ - out->errcb = NULL; - out->cb = ignore_cb; - /* Called because cmd got free'd */ - out->cmd = NULL; -} - /* Prefix is usually a cmd->id */ static const char *json_id(const tal_t *ctx, struct plugin *plugin, const char *method, const char *prefix) @@ -424,9 +405,6 @@ jsonrpc_request_start_(struct command *cmd, strmap_add(&cmd->plugin->out_reqs, out->id, out); tal_add_destructor2(out, destroy_out_req, cmd->plugin); - /* If command goes away, don't call callbacks! */ - tal_add_destructor2(out->cmd, disable_request_cb, out); - out->js = new_json_stream(NULL, cmd, NULL); json_object_start(out->js, NULL); json_add_string(out->js, "jsonrpc", "2.0"); @@ -1100,9 +1078,6 @@ static void handle_rpc_reply(struct plugin *plugin, const char *buf, const jsmnt return; } - /* Remove destructor if one existed */ - tal_del_destructor2(out->cmd, disable_request_cb, out); - /* We want to free this if callback doesn't. */ tal_steal(tmpctx, out); diff --git a/plugins/sql.c b/plugins/sql.c index 7b7e21bf4793..d7aa1b857054 100644 --- a/plugins/sql.c +++ b/plugins/sql.c @@ -122,6 +122,8 @@ struct table_desc { bool is_subobject; /* Do we use created_index as primary key? Otherwise we create rowid. */ bool has_created_index; + /* Have we created our sql indexes yet? */ + bool indices_created; /* function to refresh it. */ struct command_result *(*refresh)(struct command *cmd, const struct table_desc *td, @@ -197,6 +199,10 @@ static const struct index indices[] = { "channelmoves", { "account_id", NULL }, }, + { + "channelmoves", + { "payment_hash", NULL }, + }, }; static enum fieldtype find_fieldtype(const jsmntok_t *name) @@ -486,6 +492,28 @@ static struct command_result *refresh_complete(struct command *cmd, return command_finished(cmd, ret); } +static void init_indices(struct plugin *plugin, const struct table_desc *td) +{ + for (size_t i = 0; i < ARRAY_SIZE(indices); i++) { + char *errmsg, *cmd; + int err; + + if (!streq(indices[i].tablename, td->name)) + continue; + + cmd = tal_fmt(tmpctx, "CREATE INDEX %s_%zu_idx ON %s (%s", + indices[i].tablename, i, + indices[i].tablename, + indices[i].fields[0]); + if (indices[i].fields[1]) + tal_append_fmt(&cmd, ", %s", indices[i].fields[1]); + tal_append_fmt(&cmd, ");"); + err = sqlite3_exec(db, cmd, NULL, NULL, &errmsg); + if (err != SQLITE_OK) + plugin_err(plugin, "Failed '%s': %s", cmd, errmsg); + } +} + /* Recursion */ static struct command_result *refresh_tables(struct command *cmd, struct db_query *dbq); @@ -501,6 +529,11 @@ static struct command_result *one_refresh_done(struct command *cmd, assert(td->refreshing); td->refreshing = false; + if (!td->indices_created) { + init_indices(cmd->plugin, td); + td->indices_created = 1; + } + /* Transfer refresh waiters onto local list */ list_head_init(&waiters); list_append_list(&waiters, &td->refresh_waiters); @@ -1523,6 +1556,7 @@ static struct table_desc *new_table_desc(const tal_t *ctx, td->last_created_index = 0; td->has_created_index = false; td->refreshing = false; + td->indices_created = false; list_head_init(&td->refresh_waiters); /* Only top-levels have refresh functions */ @@ -1703,25 +1737,6 @@ static void init_tablemap(struct plugin *plugin) } } -static void init_indices(struct plugin *plugin) -{ - for (size_t i = 0; i < ARRAY_SIZE(indices); i++) { - char *errmsg, *cmd; - int err; - - cmd = tal_fmt(tmpctx, "CREATE INDEX %s_%zu_idx ON %s (%s", - indices[i].tablename, i, - indices[i].tablename, - indices[i].fields[0]); - if (indices[i].fields[1]) - tal_append_fmt(&cmd, ", %s", indices[i].fields[1]); - tal_append_fmt(&cmd, ");"); - err = sqlite3_exec(db, cmd, NULL, NULL, &errmsg); - if (err != SQLITE_OK) - plugin_err(plugin, "Failed '%s': %s", cmd, errmsg); - } -} - static void memleak_mark_tablemap(struct plugin *p, struct htable *memtable) { memleak_ptr(memtable, dbfilename); @@ -1734,7 +1749,6 @@ static const char *init(struct command *init_cmd, struct plugin *plugin = init_cmd->plugin; db = sqlite_setup(plugin); init_tablemap(plugin); - init_indices(plugin); plugin_set_memleak_handler(plugin, memleak_mark_tablemap); return NULL; @@ -1757,20 +1771,22 @@ static const char *fmt_indexes(const tal_t *ctx, const char *table) for (size_t i = 0; i < ARRAY_SIZE(indices); i++) { if (!streq(indices[i].tablename, table)) continue; - /* FIXME: Handle multiple indices! */ - assert(!ret); + if (!ret) + ret = tal_fmt(ctx, " indexed by "); + else + tal_append_fmt(&ret, ", also indexed by "); BUILD_ASSERT(ARRAY_SIZE(indices[i].fields) == 2); if (indices[i].fields[1]) - ret = tal_fmt(tmpctx, "`%s` and `%s`", - indices[i].fields[0], - indices[i].fields[1]); + tal_append_fmt(&ret, "`%s` and `%s`", + indices[i].fields[0], + indices[i].fields[1]); else - ret = tal_fmt(tmpctx, "`%s`", - indices[i].fields[0]); + tal_append_fmt(&ret, "`%s`", + indices[i].fields[0]); } if (!ret) return ""; - return tal_fmt(ctx, " indexed by %s", ret); + return ret; } static const char *json_prefix(const tal_t *ctx, diff --git a/tests/test_askrene.py b/tests/test_askrene.py index b54b3776a63e..727083e69f20 100644 --- a/tests/test_askrene.py +++ b/tests/test_askrene.py @@ -4,7 +4,7 @@ from pyln.testing.utils import SLOW_MACHINE from utils import ( only_one, first_scid, GenChannel, generate_gossip_store, - sync_blockheight, wait_for, TEST_NETWORK, TIMEOUT + sync_blockheight, wait_for, TEST_NETWORK, TIMEOUT, mine_funding_to_announce ) import os import pytest @@ -1185,7 +1185,9 @@ def test_real_data(node_factory, bitcoind): l1, l2 = node_factory.line_graph(2, fundamount=AMOUNT, opts=[{'gossip_store_file': outfile.name, 'allow_warning': True, - 'dev-throttle-gossip': None}, + 'dev-throttle-gossip': None, + # This can be slow! + 'askrene-timeout': TIMEOUT}, {'allow_warning': True}]) # These were obviously having a bad day at the time of the snapshot: @@ -1536,3 +1538,73 @@ def test_simple_dummy_channel(node_factory): final_cltv=5, layers=["mylayer"], ) + + +def test_maxparts_infloop(node_factory, bitcoind): + # Three paths from l1 -> l5. + # FIXME: enhance explain_failure! + l1, l2, l3, l4, l5 = node_factory.get_nodes(5, opts=[{'broken_log': 'plugin-cln-askrene.*the obvious route'}] + [{}] * 4) + + for intermediate in (l2, l3, l4): + node_factory.join_nodes([l1, intermediate, l5]) + + # We create exorbitant fees into l3. + for n in (l2, l3, l4): + n.rpc.setchannel(l5.info['id'], feeppm=100000) + + mine_funding_to_announce(bitcoind, (l1, l2, l3, l4, l5)) + wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 12) + + amount = 1_400_000_000 + # You can do this one + route = l1.rpc.getroutes(source=l1.info['id'], + destination=l5.info['id'], + amount_msat=amount, + layers=[], + maxfee_msat=amount, + final_cltv=5) + assert len(route['routes']) == 3 + + # Now with maxparts == 2. Usually askrene can't figure out why it failed, + # but sometimes it gets a theory. + with pytest.raises(RpcError): + l1.rpc.getroutes(source=l1.info['id'], + destination=l5.info['id'], + amount_msat=amount, + layers=[], + maxfee_msat=amount, + final_cltv=5, + maxparts=2) + + +def test_askrene_timeout(node_factory, bitcoind): + """Test askrene's route timeout""" + l1, l2 = node_factory.line_graph(2, opts=[{'broken_log': 'linear_routes: timed out after deadline'}, {}]) + + assert l1.rpc.listconfigs('askrene-timeout')['configs']['askrene-timeout']['value_int'] == 10 + l1.rpc.getroutes(source=l1.info['id'], + destination=l2.info['id'], + amount_msat=1, + layers=['auto.localchans'], + maxfee_msat=1, + final_cltv=5) + + # It will exit instantly. + l1.rpc.setconfig('askrene-timeout', 0) + + with pytest.raises(RpcError, match='linear_routes: timed out after deadline'): + l1.rpc.getroutes(source=l1.info['id'], + destination=l2.info['id'], + amount_msat=1, + layers=['auto.localchans'], + maxfee_msat=1, + final_cltv=5) + + # We can put it back though. + l1.rpc.setconfig('askrene-timeout', 10) + l1.rpc.getroutes(source=l1.info['id'], + destination=l2.info['id'], + amount_msat=1, + layers=['auto.localchans'], + maxfee_msat=1, + final_cltv=5) diff --git a/tests/test_bookkeeper.py b/tests/test_bookkeeper.py index 6144e3cbeff2..26fdb1fe3b65 100644 --- a/tests/test_bookkeeper.py +++ b/tests/test_bookkeeper.py @@ -1156,3 +1156,56 @@ def test_migration_no_bkpr(node_factory, bitcoind): 'is_rebalance': False, 'tag': 'journal_entry', 'type': 'channel'}] + + +@unittest.skipIf(TEST_NETWORK != 'regtest', "External wallet support doesn't work with elements yet.") +def test_listincome_timebox(node_factory, bitcoind): + l1 = node_factory.get_node() + addr = l1.rpc.newaddr()['bech32'] + + amount = 1111111 + bitcoind.rpc.sendtoaddress(addr, amount / 10**8) + + bitcoind.generate_block(1, wait_for_mempool=1) + wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 1) + + waddr = bitcoind.rpc.getnewaddress() + + # Ok, now we send some funds to an external address, get change. + l1.rpc.withdraw(waddr, amount // 2) + bitcoind.generate_block(1, wait_for_mempool=1) + wait_for(lambda: len(l1.rpc.listfunds(spent=True)['outputs']) == 2) + + first_one = int(time.time()) + time.sleep(2) + + # Do another one, make sure we don't see it if we filter by timestamp. + bitcoind.rpc.sendtoaddress(addr, amount / 10**8) + + bitcoind.generate_block(1, wait_for_mempool=1) + wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 2) + + incomes = l1.rpc.bkpr_listincome(end_time=first_one)['income_events'] + assert [i for i in incomes if i['timestamp'] > first_one] == [] + + +@unittest.skipIf(TEST_NETWORK != 'regtest', "Snapshots are bitcoin regtest.") +@unittest.skipIf(os.getenv('TEST_DB_PROVIDER', 'sqlite3') != 'sqlite3', "uses snapshots") +def test_bkpr_parallel(node_factory, bitcoind, executor): + """Bookkeeper could crash with parallel requests""" + bitcoind.generate_block(1) + l1 = node_factory.get_node(dbfile="l1-before-moves-in-db.sqlite3.xz", + options={'database-upgrade': True}) + + fut1 = executor.submit(l1.rpc.bkpr_listincome) + fut2 = executor.submit(l1.rpc.bkpr_listincome) + + fut1.result() + fut2.result() + + # We save blockheights in storage, so make sure we restore them on restart! + acctevents_before = l1.rpc.bkpr_listaccountevents() + l1.restart() + + acctevents_after = l1.rpc.bkpr_listaccountevents() + assert acctevents_after == acctevents_before diff --git a/tests/test_misc.py b/tests/test_misc.py index a91ecafd9649..00790d4649a5 100644 --- a/tests/test_misc.py +++ b/tests/test_misc.py @@ -2178,7 +2178,7 @@ def test_bitcoind_fail_first(node_factory, bitcoind): # first. timeout = 5 if 5 < TIMEOUT // 3 else TIMEOUT // 3 l1 = node_factory.get_node(start=False, - broken_log=r'plugin-bcli: .*(-stdinrpcpass getblockhash 100 exited 1 \(after [0-9]* other errors\)|we have been retrying command for)', + broken_log=r'plugin-bcli: .*(-stdinrpcpass -stdin getblockhash 100 exited 1 \(after [0-9]* other errors\)|we have been retrying command for)', may_fail=True, options={'bitcoin-retry-timeout': timeout}) diff --git a/tests/test_pay.py b/tests/test_pay.py index 151d560362bf..c3ec21ab633b 100644 --- a/tests/test_pay.py +++ b/tests/test_pay.py @@ -6750,6 +6750,49 @@ def test_injectpaymentonion_failures(node_factory, executor): assert 'onionreply' in err.value.error['data'] +def test_injectpaymentonion_peerfail(node_factory, executor): + l1, l2 = node_factory.line_graph(2, + opts=[{'may_reconnect': True, + 'dev-no-reconnect': None, + 'disconnect': ['=WIRE_UPDATE_ADD_HTLC', '-WIRE_COMMITMENT_SIGNED']}, + {'may_reconnect': True, + 'dev-no-reconnect': None}]) + blockheight = l1.rpc.getinfo()['blockheight'] + + inv1 = l2.rpc.invoice(1000, "test_injectpaymentonion_peerfail", "test_injectpaymentonion_peerfail") + + # First hop for injectpaymentonion is self. + hops = [{'pubkey': l1.info['id'], + 'payload': serialize_payload_tlv(1000, 18 + 6, first_scid(l1, l2), blockheight).hex()}, + {'pubkey': l2.info['id'], + 'payload': serialize_payload_final_tlv(1000, 18, 1000, blockheight, inv1['payment_secret']).hex()}] + onion = l1.rpc.createonion(hops=hops, assocdata=inv1['payment_hash']) + + l1.rpc.disconnect(l2.info['id'], force=True) + with pytest.raises(RpcError, match='WIRE_TEMPORARY_CHANNEL_FAILURE'): + l1.rpc.injectpaymentonion(onion=onion['onion'], + payment_hash=inv1['payment_hash'], + amount_msat=1000, + cltv_expiry=blockheight + 18 + 6, + partid=1, + groupid=0) + # In fact, it won't create any sendpays entry, since it fails too early. + assert l1.rpc.listsendpays() == {'payments': []} + + # This will hang, since we disconnect once committed. But provides another + # (legitimately) pending payment for our migration code to test. + l1.rpc.connect(l2.info['id'], 'localhost', l2.port) + executor.submit(l1.rpc.injectpaymentonion, + onion=onion['onion'], + payment_hash=inv1['payment_hash'], + amount_msat=1000, + cltv_expiry=blockheight + 18 + 6, + partid=2, + groupid=0) + l1.daemon.wait_for_log("dev_disconnect: =WIRE_UPDATE_ADD_HTLC") + assert [p['status'] for p in l1.rpc.listsendpays()['payments']] == ['pending'] + + def test_parallel_channels_reserve(node_factory, bitcoind): """Tests wether we are able to pay through parallel channels concurrently. To do that we need to enable strict-forwarding.""" diff --git a/tests/test_plugin.py b/tests/test_plugin.py index b01005479322..649acee9aa9a 100644 --- a/tests/test_plugin.py +++ b/tests/test_plugin.py @@ -1852,6 +1852,22 @@ def test_bitcoin_backend(node_factory, bitcoind): " bitcoind") +def test_bitcoin_backend_gianttx(node_factory, bitcoind): + """Test that a giant tx doesn't crash bcli""" + l1 = node_factory.get_node(start=False) + # With memleak we spend far too much time gathering backtraces. + if "LIGHTNINGD_DEV_MEMLEAK" in l1.daemon.env: + del l1.daemon.env["LIGHTNINGD_DEV_MEMLEAK"] + l1.start() + addrs = {addr: 0.00200000 for addr in [l1.rpc.newaddr('bech32')['bech32'] for _ in range(700)]} + bitcoind.rpc.sendmany("", addrs) + bitcoind.generate_block(1, wait_for_mempool=1) + sync_blockheight(bitcoind, [l1]) + + l1.rpc.withdraw(bitcoind.getnewaddress(), 'all') + bitcoind.generate_block(1, wait_for_mempool=1) + + def test_bitcoin_bad_estimatefee(node_factory, bitcoind): """ This tests that we don't crash if bitcoind backend gives bad estimatefees. @@ -3795,7 +3811,7 @@ def test_sql(node_factory, bitcoind): {'name': 'extra_tags', 'type': 'string'}]}, 'channelmoves': { - 'indices': [['account_id']], + 'indices': [['account_id'], ['payment_hash']], 'columns': [{'name': 'created_index', 'type': 'u64'}, {'name': 'account_id', diff --git a/tools/build-release.sh b/tools/build-release.sh index a42f1dcdd32f..1280d7999f15 100755 --- a/tools/build-release.sh +++ b/tools/build-release.sh @@ -228,7 +228,7 @@ if [ -z "${TARGETS##* sign *}" ]; then echo "Signing Release" cd release/ || exit sha256sum clightning-"$VERSION"* > SHA256SUMS-"$VERSION" - gpg -sb --armor -o SHA256SUMS-"$VERSION".asc "$(gpgconf --list-options gpg | awk -F: '$1 == "default-key" {print $10}' | tr -d '"')" SHA256SUMS-"$VERSION" + gpg -sb --armor --default-key "$(gpgconf --list-options gpg | awk -F: '$1 == "default-key" {print $10}' | tr -d '"')" -o SHA256SUMS-"$VERSION".asc SHA256SUMS-"$VERSION" cd .. echo "Release Signed" fi diff --git a/tools/reckless b/tools/reckless index 33ef0fa2a338..1777472a6b6f 100755 --- a/tools/reckless +++ b/tools/reckless @@ -21,7 +21,7 @@ from urllib.error import HTTPError import venv -__VERSION__ = '25.09.1' +__VERSION__ = '25.09.2' logging.basicConfig( level=logging.INFO, diff --git a/uv.lock b/uv.lock index 2ffbc99f932d..24034ff1bf4b 100644 --- a/uv.lock +++ b/uv.lock @@ -1830,7 +1830,7 @@ dev = [{ name = "pyln-proto", editable = "contrib/pyln-proto" }] [[package]] name = "pyln-client" -version = "25.9.1" +version = "25.9.2" source = { editable = "contrib/pyln-client" } dependencies = [ { name = "pyln-bolt7" }, @@ -1890,7 +1890,7 @@ dev = [ [[package]] name = "pyln-proto" -version = "25.9.1" +version = "25.9.2" source = { editable = "contrib/pyln-proto" } dependencies = [ { name = "base58" }, @@ -1919,7 +1919,7 @@ dev = [{ name = "pytest", specifier = ">=7.0.0" }] [[package]] name = "pyln-testing" -version = "25.9.1" +version = "25.9.2" source = { editable = "contrib/pyln-testing" } dependencies = [ { name = "cheroot" },