From f3a8ed6ed8a5853061565d9990300a15b586eb98 Mon Sep 17 00:00:00 2001 From: Deirdre Connolly Date: Thu, 9 Jun 2022 11:08:57 -0400 Subject: [PATCH 1/9] Update transaction verification Grafana dashboard to show all shielded pool sigs, proofs, nullifiers (#4585) --- grafana/transaction-verification.json | 237 +++++++++++++++++++++----- 1 file changed, 192 insertions(+), 45 deletions(-) diff --git a/grafana/transaction-verification.json b/grafana/transaction-verification.json index 18c08469b7f..ca7b641ea4b 100644 --- a/grafana/transaction-verification.json +++ b/grafana/transaction-verification.json @@ -3,7 +3,10 @@ "list": [ { "builtIn": 1, - "datasource": "-- Grafana --", + "datasource": { + "type": "datasource", + "uid": "grafana" + }, "enable": true, "hide": true, "iconColor": "rgba(0, 211, 255, 1)", @@ -19,13 +22,17 @@ ] }, "editable": true, - "gnetId": null, + "fiscalYearStartMonth": 0, "graphTooltip": 0, "id": 6, "links": [], + "liveNow": false, "panels": [ { - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "qdDbj0Dnz" + }, "fieldConfig": { "defaults": { "color": { @@ -66,7 +73,7 @@ "text": {}, "textMode": "auto" }, - "pluginVersion": "8.1.7", + "pluginVersion": "8.5.5", "targets": [ { "exemplar": true, @@ -81,7 +88,10 @@ "type": "stat" }, { - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "qdDbj0Dnz" + }, "fieldConfig": { "defaults": { "color": { @@ -132,25 +142,61 @@ }, "overrides": [ { - "__systemRef": "hideSeriesFrom", "matcher": { - "id": "byNames", - "options": { - "mode": "exclude", - "names": [ - "{instance=\"localhost:9999\", job=\"zebrad\"}" - ], - "prefix": "All except:", - "readOnly": true + "id": "byName", + "options": "{instance=\"localhost:9999\", job=\"zebrad\"}" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "purple", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "RedPallas" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "purple", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "RedJubjub" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "green", + "mode": "fixed" + } } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Ed25519" }, "properties": [ { - "id": "custom.hideFrom", + "id": "color", "value": { - "legend": false, - "tooltip": false, - "viz": true + "fixedColor": "blue", + "mode": "fixed" } } ] @@ -171,32 +217,61 @@ "placement": "bottom" }, "tooltip": { - "mode": "single" + "mode": "single", + "sort": "none" } }, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "qdDbj0Dnz" + }, + "editorMode": "code", "exemplar": false, - "expr": "rate(signatures_redjubjub_validated{}[$__interval])", + "expr": "rate(signatures_ed25519_validated{}[$__interval])", "hide": false, "interval": "", - "legendFormat": "RedJubjub", + "legendFormat": "Ed25519", + "range": true, "refId": "B" }, { - "exemplar": true, - "expr": "rate(signatures_ed25519_validated{}[$__interval])", + "datasource": { + "type": "prometheus", + "uid": "qdDbj0Dnz" + }, + "editorMode": "code", + "exemplar": false, + "expr": "rate(signatures_redjubjub_validated{}[$__interval])", "hide": false, "interval": "", - "legendFormat": "Ed25519", + "legendFormat": "RedJubjub", + "range": true, "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "qdDbj0Dnz" + }, + "editorMode": "code", + "exemplar": false, + "expr": "rate(signatures_redpallas_validated{}[$__interval])", + "hide": false, + "legendFormat": "RedPallas", + "range": true, + "refId": "C" } ], "title": "Signatures validated", "type": "timeseries" }, { - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "qdDbj0Dnz" + }, "fieldConfig": { "defaults": { "color": { @@ -237,7 +312,7 @@ "text": {}, "textMode": "auto" }, - "pluginVersion": "8.1.7", + "pluginVersion": "8.5.5", "targets": [ { "exemplar": true, @@ -251,7 +326,10 @@ "type": "stat" }, { - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "qdDbj0Dnz" + }, "fieldConfig": { "defaults": { "color": { @@ -300,7 +378,23 @@ ] } }, - "overrides": [] + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Halo2" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "purple", + "mode": "fixed" + } + } + ] + } + ] }, "gridPos": { "h": 8, @@ -316,23 +410,43 @@ "placement": "bottom" }, "tooltip": { - "mode": "single" + "mode": "single", + "sort": "none" } }, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "qdDbj0Dnz" + }, "exemplar": true, "expr": "rate(proofs_groth16_verified{}[$__interval])", "interval": "", "legendFormat": "Groth16", "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "qdDbj0Dnz" + }, + "editorMode": "code", + "expr": "rate(proofs_halo2_verified{}[$__interval])", + "hide": false, + "legendFormat": "Halo2", + "range": true, + "refId": "B" } ], "title": "Proofs verified", "type": "timeseries" }, { - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "qdDbj0Dnz" + }, "fieldConfig": { "defaults": { "color": { @@ -428,7 +542,8 @@ "placement": "bottom" }, "tooltip": { - "mode": "single" + "mode": "single", + "sort": "none" } }, "targets": [ @@ -452,7 +567,10 @@ "type": "timeseries" }, { - "datasource": null, + "datasource": { + "type": "prometheus", + "uid": "qdDbj0Dnz" + }, "fieldConfig": { "defaults": { "color": { @@ -531,6 +649,21 @@ } } ] + }, + { + "matcher": { + "id": "byName", + "options": "Sprout" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "blue", + "mode": "fixed" + } + } + ] } ] }, @@ -548,11 +681,28 @@ "placement": "bottom" }, "tooltip": { - "mode": "single" + "mode": "single", + "sort": "none" } }, "targets": [ { + "datasource": { + "type": "prometheus", + "uid": "qdDbj0Dnz" + }, + "exemplar": true, + "expr": "rate(state_finalized_cumulative_sprout_nullifiers{}[$__interval])", + "hide": false, + "interval": "", + "legendFormat": "Sprout", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "qdDbj0Dnz" + }, "exemplar": true, "expr": "rate(state_finalized_cumulative_sapling_nullifiers{}[$__interval])", "hide": false, @@ -561,20 +711,16 @@ "refId": "B" }, { + "datasource": { + "type": "prometheus", + "uid": "qdDbj0Dnz" + }, "exemplar": true, "expr": "rate(state_finalized_cumulative_orchard_nullifiers{}[$__interval])", "hide": false, "instant": false, "interval": "", "legendFormat": "Orchard", - "refId": "A" - }, - { - "exemplar": true, - "expr": "rate(state_finalized_cumulative_sprout_nullifiers{}[$__interval])", - "hide": false, - "interval": "", - "legendFormat": "Sprout", "refId": "C" } ], @@ -583,14 +729,14 @@ } ], "refresh": "5s", - "schemaVersion": 30, + "schemaVersion": 36, "style": "dark", "tags": [], "templating": { "list": [] }, "time": { - "from": "now-3h", + "from": "now-12h", "to": "now" }, "timepicker": { @@ -610,5 +756,6 @@ "timezone": "", "title": "🔎", "uid": "UXVRR1v7z", - "version": 18 -} + "version": 23, + "weekStart": "" +} \ No newline at end of file From 3985bd151ee846795d3d5af005dcac85f2107e1e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 10 Jun 2022 19:46:19 +0000 Subject: [PATCH 2/9] build(deps): bump w9jds/firebase-action from 2.1.2 to 2.2.0 (#4588) Bumps [w9jds/firebase-action](https://github.com/w9jds/firebase-action) from 2.1.2 to 2.2.0. - [Release notes](https://github.com/w9jds/firebase-action/releases) - [Commits](https://github.com/w9jds/firebase-action/compare/v2.1.2...v2.2.0) --- updated-dependencies: - dependency-name: w9jds/firebase-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/docs.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 898c89c70e6..73401e47e1d 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -47,7 +47,7 @@ jobs: mdbook build book/ - name: Deploy Zebra book to firebase - uses: w9jds/firebase-action@v2.1.2 + uses: w9jds/firebase-action@v2.2.0 with: args: deploy env: @@ -63,7 +63,7 @@ jobs: RUSTDOCFLAGS: '--html-in-header katex-header.html' - name: Deploy external docs to firebase - uses: w9jds/firebase-action@v2.1.2 + uses: w9jds/firebase-action@v2.2.0 with: args: deploy env: @@ -77,7 +77,7 @@ jobs: RUSTDOCFLAGS: '--html-in-header katex-header.html' - name: Deploy internal docs to firebase - uses: w9jds/firebase-action@v2.1.2 + uses: w9jds/firebase-action@v2.2.0 with: args: deploy env: From 45c3d7327776794c306d3c86769929ae45ecda2e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jun 2022 17:24:27 +0000 Subject: [PATCH 3/9] build(deps): bump tj-actions/changed-files from 22 to 23 (#4593) Bumps [tj-actions/changed-files](https://github.com/tj-actions/changed-files) from 22 to 23. - [Release notes](https://github.com/tj-actions/changed-files/releases) - [Changelog](https://github.com/tj-actions/changed-files/blob/main/HISTORY.md) - [Commits](https://github.com/tj-actions/changed-files/compare/v22...v23) --- updated-dependencies: - dependency-name: tj-actions/changed-files dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/lint.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 81da8df53c4..3b74716df11 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -25,7 +25,7 @@ jobs: - name: Rust files id: changed-files-rust - uses: tj-actions/changed-files@v22 + uses: tj-actions/changed-files@v23 with: files: | **/*.rs @@ -37,7 +37,7 @@ jobs: - name: Workflow files id: changed-files-workflows - uses: tj-actions/changed-files@v22 + uses: tj-actions/changed-files@v23 with: files: | .github/workflows/*.yml From bbf77e53e4293ffbab82b92d95ced91ff45073c7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jun 2022 17:24:46 +0000 Subject: [PATCH 4/9] build(deps): bump w9jds/firebase-action from 2.2.0 to 2.2.2 (#4592) Bumps [w9jds/firebase-action](https://github.com/w9jds/firebase-action) from 2.2.0 to 2.2.2. - [Release notes](https://github.com/w9jds/firebase-action/releases) - [Commits](https://github.com/w9jds/firebase-action/compare/v2.2.0...v2.2.2) --- updated-dependencies: - dependency-name: w9jds/firebase-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/docs.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 73401e47e1d..ba57228e7b5 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -47,7 +47,7 @@ jobs: mdbook build book/ - name: Deploy Zebra book to firebase - uses: w9jds/firebase-action@v2.2.0 + uses: w9jds/firebase-action@v2.2.2 with: args: deploy env: @@ -63,7 +63,7 @@ jobs: RUSTDOCFLAGS: '--html-in-header katex-header.html' - name: Deploy external docs to firebase - uses: w9jds/firebase-action@v2.2.0 + uses: w9jds/firebase-action@v2.2.2 with: args: deploy env: @@ -77,7 +77,7 @@ jobs: RUSTDOCFLAGS: '--html-in-header katex-header.html' - name: Deploy internal docs to firebase - uses: w9jds/firebase-action@v2.2.0 + uses: w9jds/firebase-action@v2.2.2 with: args: deploy env: From e016bbe8e3196e6b9dddccc6bb789d15ea1ce0f5 Mon Sep 17 00:00:00 2001 From: Conrado Gouvea Date: Mon, 13 Jun 2022 17:13:30 -0300 Subject: [PATCH 5/9] increase lightwalletd timeout, remove testnet tests (#4584) * increase lightwalletd timeout * switch back to aditya's fork * manually point to new aditya's lightwalletd image * disable sync_one_checkpoint_testnet test * disable restart_stop_at_height in testnet * rever to 'latest' lightwalletd image --- .github/workflows/zcash-lightwalletd.yml | 4 ++-- zebrad/tests/acceptance.rs | 7 +++++-- zebrad/tests/common/launch.rs | 4 ++-- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/.github/workflows/zcash-lightwalletd.yml b/.github/workflows/zcash-lightwalletd.yml index 0964962e9f4..ebd8ce1ffc6 100644 --- a/.github/workflows/zcash-lightwalletd.yml +++ b/.github/workflows/zcash-lightwalletd.yml @@ -11,7 +11,7 @@ on: # rebuild lightwalletd whenever the related Zebra code changes # # TODO: this code isn't compiled in this docker image - # rebuild whenever the actual code at zcash/lightwalletd/master changes + # rebuild whenever the actual code at lightwalletd/master changes - 'zebra-rpc/**' - 'zebrad/tests/acceptance.rs' - 'zebrad/src/config.rs' @@ -50,7 +50,7 @@ jobs: steps: - uses: actions/checkout@v3.0.2 with: - repository: zcash/lightwalletd + repository: adityapk00/lightwalletd ref: 'master' persist-credentials: false diff --git a/zebrad/tests/acceptance.rs b/zebrad/tests/acceptance.rs index 18e99e581c5..628a543e4e4 100644 --- a/zebrad/tests/acceptance.rs +++ b/zebrad/tests/acceptance.rs @@ -501,7 +501,9 @@ fn sync_one_checkpoint_mainnet() -> Result<()> { /// Test if `zebrad` can sync the first checkpoint on testnet. /// /// The first checkpoint contains a single genesis block. -#[test] +// TODO: disabled because testnet is not currently reliable +// #[test] +#[allow(dead_code)] fn sync_one_checkpoint_testnet() -> Result<()> { sync_until( TINY_CHECKPOINT_TEST_HEIGHT, @@ -523,7 +525,8 @@ fn restart_stop_at_height() -> Result<()> { zebra_test::init(); restart_stop_at_height_for_network(Network::Mainnet, TINY_CHECKPOINT_TEST_HEIGHT)?; - restart_stop_at_height_for_network(Network::Testnet, TINY_CHECKPOINT_TEST_HEIGHT)?; + // TODO: disabled because testnet is not currently reliable + // restart_stop_at_height_for_network(Network::Testnet, TINY_CHECKPOINT_TEST_HEIGHT)?; Ok(()) } diff --git a/zebrad/tests/common/launch.rs b/zebrad/tests/common/launch.rs index 97bdc014f33..fad4315d393 100644 --- a/zebrad/tests/common/launch.rs +++ b/zebrad/tests/common/launch.rs @@ -49,9 +49,9 @@ pub const LIGHTWALLETD_UPDATE_TIP_DELAY: Duration = Duration::from_secs(20 * 60) /// The amount of time we wait for lightwalletd to do a full sync to the tip. /// -/// `lightwalletd` takes about half an hour to fully sync, +/// `lightwalletd` takes about an hour to fully sync, /// and Zebra needs time to activate its mempool. -pub const LIGHTWALLETD_FULL_SYNC_TIP_DELAY: Duration = Duration::from_secs(45 * 60); +pub const LIGHTWALLETD_FULL_SYNC_TIP_DELAY: Duration = Duration::from_secs(90 * 60); /// Extension trait for methods on `tempfile::TempDir` for using it as a test /// directory for `zebrad`. From 64ac9e4cc2d8c57f8aceb15befa3ee85980750dc Mon Sep 17 00:00:00 2001 From: Marek Date: Mon, 13 Jun 2022 23:26:58 +0200 Subject: [PATCH 6/9] Change the order of instructions for creating a release (#4595) --- .github/PULL_REQUEST_TEMPLATE/release-checklist.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/PULL_REQUEST_TEMPLATE/release-checklist.md b/.github/PULL_REQUEST_TEMPLATE/release-checklist.md index 092cf7705a8..3d137f43e8d 100644 --- a/.github/PULL_REQUEST_TEMPLATE/release-checklist.md +++ b/.github/PULL_REQUEST_TEMPLATE/release-checklist.md @@ -136,12 +136,12 @@ After you have the version increments and the updated changelog: - [ ] Once the PR has been merged, create a new release using the draft release as a base, by clicking the Edit icon in the [draft release](https://github.com/ZcashFoundation/zebra/releases). +- [ ] Set the tag name to the version tag, for example: `v1.0.0-alpha.0` +- [ ] Set the release to target the `main` branch - [ ] Set the release title to `Zebra ` followed by the version tag, for example: `Zebra 1.0.0-alpha.0` - [ ] Copy the final changelog of this release to the release description (starting just _after_ the title `## [Zebra ...`) -- [ ] Set the tag name to the version tag, for example: `v1.0.0-alpha.0` -- [ ] Set the release to target the `main` branch - [ ] Mark the release as 'pre-release' (until we are no longer alpha/beta) - [ ] Publish the release From 3ec6bd4c293ec7239515c20c7a7a45586d67f80f Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Mon, 13 Jun 2022 17:38:18 -0400 Subject: [PATCH 7/9] ci(lightwalletd): Zebra's Dockerfile needs `latest` lwd image (#4599) We need to add this condtion to lightwalletd docker build, to always create a `latest` tag when merging to the default branch: `main` --- .github/workflows/zcash-lightwalletd.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/zcash-lightwalletd.yml b/.github/workflows/zcash-lightwalletd.yml index ebd8ce1ffc6..e75e2d791fa 100644 --- a/.github/workflows/zcash-lightwalletd.yml +++ b/.github/workflows/zcash-lightwalletd.yml @@ -73,6 +73,7 @@ jobs: images: | ${{ env.GAR_BASE }}/${{ env.IMAGE_NAME }} # generate Docker tags based on the following events/attributes + # set latest tag for default branch tags: | type=schedule type=ref,event=branch @@ -81,6 +82,7 @@ jobs: type=semver,pattern={{major}}.{{minor}} type=semver,pattern={{major}} type=sha + type=raw,value=latest,enable={{is_default_branch}} - name: Set up QEMU id: qemu From 00f23e1d96e64b7133d5835cad5dfbf2e1d060b4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jun 2022 23:26:52 +0000 Subject: [PATCH 8/9] build(deps): bump tokio-util from 0.7.2 to 0.7.3 (#4572) Bumps [tokio-util](https://github.com/tokio-rs/tokio) from 0.7.2 to 0.7.3. - [Release notes](https://github.com/tokio-rs/tokio/releases) - [Commits](https://github.com/tokio-rs/tokio/compare/tokio-util-0.7.2...tokio-util-0.7.3) --- updated-dependencies: - dependency-name: tokio-util dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- Cargo.lock | 14 +++++++------- zebra-network/Cargo.toml | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d48b5b0dc6a..743d8e2b94d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4747,9 +4747,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.18.2" +version = "1.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4903bf0427cf68dddd5aa6a93220756f8be0c34fcfa9f5e6191e103e15a31395" +checksum = "95eec79ea28c00a365f539f1961e9278fbcaf81c0ff6aaf0e93c181352446948" dependencies = [ "bytes", "libc", @@ -4850,9 +4850,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f988a1a1adc2fb21f9c12aa96441da33a1728193ae0b95d2be22dbd17fcb4e5c" +checksum = "cc463cd8deddc3770d20f9852143d50bf6094e640b485cb2e189a2099085ff45" dependencies = [ "bytes", "futures-core", @@ -4895,7 +4895,7 @@ dependencies = [ "prost-derive", "tokio", "tokio-stream", - "tokio-util 0.7.2", + "tokio-util 0.7.3", "tower", "tower-layer", "tower-service", @@ -5318,7 +5318,7 @@ dependencies = [ "rand 0.8.5", "slab", "tokio", - "tokio-util 0.7.2", + "tokio-util 0.7.3", "tower-layer", "tower-service", "tracing", @@ -6258,7 +6258,7 @@ dependencies = [ "thiserror", "tokio", "tokio-stream", - "tokio-util 0.7.2", + "tokio-util 0.7.3", "toml", "tor-rtcompat", "tower", diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index 2a2205886b8..ad815635ba4 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -29,7 +29,7 @@ thiserror = "1.0.31" futures = "0.3.21" tokio = { version = "1.18.2", features = ["net", "time", "tracing", "macros", "rt-multi-thread"] } tokio-stream = { version = "0.1.8", features = ["sync", "time"] } -tokio-util = { version = "0.7.2", features = ["codec"] } +tokio-util = { version = "0.7.3", features = ["codec"] } tower = { version = "0.4.12", features = ["retry", "discover", "load", "load-shed", "timeout", "util", "buffer"] } metrics = "0.18.1" From 2e50ccc8f3d067c5aa3c8924c3ccedabc1af477b Mon Sep 17 00:00:00 2001 From: Marek Date: Tue, 14 Jun 2022 03:22:16 +0200 Subject: [PATCH 9/9] fix(doc): Fix various doc warnings, part 2 (#4561) * Fix the syntax of links in comments * Fix a mistake in the docs Co-authored-by: Alfredo Garcia * Remove unnecessary angle brackets from a link * Revert the changes for links that serve as references * Revert "Revert the changes for links that serve as references" This reverts commit 8b091aa9fab453e7d3559a5d474e0879183b9bfb. * Remove `<` `>` from links that serve as references This reverts commit 046ef25620ae1a2140760ae7ea379deecb4b583c. * Don't use `<` `>` in normal comments * Don't use `<` `>` for normal comments * Revert changes for comments starting with `//` * Fix some warnings produced by `cargo doc` * Fix some rustdoc warnings * Fix some warnings * Refactor some changes * Fix some rustdoc warnings * Fix some rustdoc warnings * Resolve various TODOs Co-authored-by: teor Co-authored-by: Alfredo Garcia Co-authored-by: teor Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- zebra-chain/src/sapling/spend.rs | 2 +- zebra-consensus/src/transaction.rs | 13 ++-- zebra-network/src/constants.rs | 19 +++--- zebra-network/src/isolated.rs | 6 +- zebra-network/src/lib.rs | 13 ++-- zebra-network/src/meta_addr/arbitrary.rs | 31 ++++++--- zebra-network/src/peer/client.rs | 4 +- zebra-network/src/peer/error.rs | 22 +++--- zebra-network/src/peer/handshake.rs | 2 +- zebra-node-services/src/mempool.rs | 10 +-- zebra-state/src/arbitrary.rs | 4 +- zebra-state/src/response.rs | 21 ++++-- zebra-state/src/service.rs | 29 +++++--- zebra-state/src/service/chain_tip.rs | 25 ++++--- zebra-state/src/service/check/difficulty.rs | 8 +-- zebra-state/src/service/check/nullifier.rs | 29 +++++--- zebra-state/src/service/finalized_state.rs | 11 +-- .../src/service/finalized_state/disk_db.rs | 3 +- .../service/finalized_state/disk_format.rs | 11 ++- .../finalized_state/disk_format/block.rs | 4 +- .../disk_format/transparent.rs | 55 +++++++++------ .../src/service/finalized_state/zebra_db.rs | 2 +- .../finalized_state/zebra_db/transparent.rs | 2 +- .../src/service/non_finalized_state/chain.rs | 68 +++++++++++-------- .../non_finalized_state/chain/index.rs | 31 ++++----- zebra-state/src/service/pending_utxos.rs | 10 +-- zebra-state/src/service/read.rs | 16 +++-- zebra-state/src/service/watch_receiver.rs | 7 +- zebra-test/src/command.rs | 12 ++-- zebra-test/src/network_addr.rs | 3 +- zebra-test/src/prelude.rs | 2 +- 31 files changed, 280 insertions(+), 195 deletions(-) diff --git a/zebra-chain/src/sapling/spend.rs b/zebra-chain/src/sapling/spend.rs index 7a9c0f78d8b..5b634e9aeb6 100644 --- a/zebra-chain/src/sapling/spend.rs +++ b/zebra-chain/src/sapling/spend.rs @@ -182,7 +182,7 @@ impl ZcashDeserialize for Spend { /// /// This rule is also implemented in /// `zebra_state::service::check::anchors` and - /// [`crate::transaction::serialize`]. + /// `crate::transaction::serialize`. /// /// The "anchor encoding for v4 transactions" is implemented here. fn zcash_deserialize(mut reader: R) -> Result { diff --git a/zebra-consensus/src/transaction.rs b/zebra-consensus/src/transaction.rs index b31be57608f..c13b9e66516 100644 --- a/zebra-consensus/src/transaction.rs +++ b/zebra-consensus/src/transaction.rs @@ -124,14 +124,17 @@ pub enum Response { Block { /// The witnessed transaction ID for this transaction. /// - /// [`Block`] responses can be uniquely identified by [`UnminedTxId::mined_id`], - /// because the block's authorizing data root will be checked during contextual validation. + /// [`Response::Block`] responses can be uniquely identified by + /// [`UnminedTxId::mined_id`], because the block's authorizing data root + /// will be checked during contextual validation. tx_id: UnminedTxId, /// The miner fee for this transaction. + /// /// `None` for coinbase transactions. /// - /// Consensus rule: + /// # Consensus + /// /// > The remaining value in the transparent transaction value pool /// > of a coinbase transaction is destroyed. /// @@ -151,8 +154,8 @@ pub enum Response { /// Mempool transactions always have a transaction fee, /// because coinbase transactions are rejected from the mempool. /// - /// [`Mempool`] responses are uniquely identified by the [`UnminedTxId`] - /// variant for their transaction version. + /// [`Response::Mempool`] responses are uniquely identified by the + /// [`UnminedTxId`] variant for their transaction version. transaction: VerifiedUnminedTx, }, } diff --git a/zebra-network/src/constants.rs b/zebra-network/src/constants.rs index 22253dce4d7..3167dc5d3cb 100644 --- a/zebra-network/src/constants.rs +++ b/zebra-network/src/constants.rs @@ -53,7 +53,7 @@ use zebra_chain::{ /// /// Since the inbound peer limit is higher than the outbound peer limit, /// Zebra can be connected to a majority of peers -/// that it has *not* chosen from its [`AddressBook`]. +/// that it has *not* chosen from its [`crate::AddressBook`]. /// /// Inbound peer connections are initiated by the remote peer, /// so inbound peer selection is not controlled by the local node. @@ -149,25 +149,28 @@ pub const MAX_RECENT_PEER_AGE: Duration32 = Duration32::from_days(3); /// Using a prime number makes sure that heartbeats don't synchronise with crawls. pub const HEARTBEAT_INTERVAL: Duration = Duration::from_secs(59); -/// The minimum time between successive calls to [`CandidateSet::next()`][Self::next]. +/// The minimum time between successive calls to +/// [`CandidateSet::next`][crate::peer_set::CandidateSet::next]. /// /// ## Security /// /// Zebra resists distributed denial of service attacks by making sure that new peer connections -/// are initiated at least `MIN_PEER_CONNECTION_INTERVAL` apart. +/// are initiated at least [`MIN_PEER_CONNECTION_INTERVAL`] apart. pub const MIN_PEER_CONNECTION_INTERVAL: Duration = Duration::from_millis(25); -/// The minimum time between successive calls to [`CandidateSet::update()`][Self::update]. +/// The minimum time between successive calls to +/// [`CandidateSet::update`][crate::peer_set::CandidateSet::update]. /// /// Using a prime number makes sure that peer address crawls don't synchronise with other crawls. /// /// ## Security /// /// Zebra resists distributed denial of service attacks by making sure that requests for more -/// peer addresses are sent at least `MIN_PEER_GET_ADDR_INTERVAL` apart. +/// peer addresses are sent at least [`MIN_PEER_GET_ADDR_INTERVAL`] apart. pub const MIN_PEER_GET_ADDR_INTERVAL: Duration = Duration::from_secs(31); -/// The combined timeout for all the requests in [`CandidateSet::update()`][Self::update]. +/// The combined timeout for all the requests in +/// [`CandidateSet::update`][crate::peer_set::CandidateSet::update]. /// /// `zcashd` doesn't respond to most `getaddr` requests, /// so this timeout needs to be short. @@ -329,8 +332,8 @@ mod tests { use super::*; /// This assures that the `Duration` value we are computing for - /// MIN_PEER_RECONNECTION_DELAY actually matches the other const values it - /// relies on. + /// [`MIN_PEER_RECONNECTION_DELAY`] actually matches the other const values + /// it relies on. #[test] fn ensure_live_peer_duration_value_matches_others() { zebra_test::init(); diff --git a/zebra-network/src/isolated.rs b/zebra-network/src/isolated.rs index 0d4130bbae9..42240281e8e 100644 --- a/zebra-network/src/isolated.rs +++ b/zebra-network/src/isolated.rs @@ -26,7 +26,7 @@ mod tests; /// Creates a Zcash peer connection using the provided data stream. /// This connection is completely isolated from all other node state. /// -/// The connection pool returned by [`init`](zebra_network::init) +/// The connection pool returned by [`init`](crate::init) /// should be used for all requests that /// don't require isolated state or use of an existing TCP connection. However, /// this low-level API is useful for custom network crawlers or Tor connections. @@ -44,7 +44,7 @@ mod tests; /// - `network`: the Zcash [`Network`] used for this connection. /// /// - `data_stream`: an existing data stream. This can be a non-anonymised TCP connection, -/// or a Tor client [`DataStream`]. +/// or a Tor client `arti_client::DataStream`. /// /// - `user_agent`: a valid BIP14 user-agent, e.g., the empty string. pub fn connect_isolated( @@ -124,7 +124,7 @@ where /// Transactions sent over this connection can be linked to the sending and receiving IP address /// by passive internet observers. /// -/// Prefer [`connect_isolated_run_tor`](tor::connect_isolated_run_tor) if available. +/// Prefer [`connect_isolated_tor`](tor::connect_isolated_tor) if available. pub fn connect_isolated_tcp_direct( network: Network, addr: SocketAddr, diff --git a/zebra-network/src/lib.rs b/zebra-network/src/lib.rs index de0dcb68901..a8309fa4042 100644 --- a/zebra-network/src/lib.rs +++ b/zebra-network/src/lib.rs @@ -16,11 +16,11 @@ //! [`tower::Service`] representing "the network", which load-balances //! outbound [`Request`]s over available peers. //! -//! Unlike the underlying legacy network protocol, Zebra's [`PeerSet`] -//! [`tower::Service`] guarantees that each `Request` future will resolve to -//! the correct `Response`, rather than an unrelated `Response` message. +//! Unlike the underlying legacy network protocol, Zebra's `PeerSet` +//! [`tower::Service`] guarantees that each `Request` future will resolve to the +//! correct `Response`, rather than an unrelated `Response` message. //! -//! Each peer connection is handled by a distinct [`Connection`] task. +//! Each peer connection is handled by a distinct [`peer::Connection`] task. //! The Zcash network protocol is bidirectional, so Zebra interprets incoming //! Zcash messages as either: //! - [`Response`]s to previously sent outbound [`Request`]s, or @@ -84,7 +84,7 @@ //! //! ### Connection Pool //! -//! [`PeerSet`] Network Service: +//! `PeerSet` Network Service: //! * provides an interface for other services and tasks running within this node //! to make requests to remote peers ("the rest of the network") //! * accepts [`Request`]s from the local node @@ -102,7 +102,8 @@ //! Peer Inventory Service: //! * tracks gossiped `inv` advertisements for each peer //! * tracks missing inventory for each peer -//! * used by the [`PeerSet`] to route block and transaction requests to peers that have the requested data +//! * used by the `PeerSet` to route block and transaction requests +//! to peers that have the requested data //! //! ### Peer Discovery //! diff --git a/zebra-network/src/meta_addr/arbitrary.rs b/zebra-network/src/meta_addr/arbitrary.rs index 2090441d24e..5ed654b5697 100644 --- a/zebra-network/src/meta_addr/arbitrary.rs +++ b/zebra-network/src/meta_addr/arbitrary.rs @@ -10,21 +10,28 @@ use super::{MetaAddr, MetaAddrChange, PeerServices}; /// The largest number of random changes we want to apply to a [`MetaAddr`]. /// -/// This should be at least twice the number of [`PeerAddrState`]s, so the tests -/// can cover multiple transitions through every state. +/// This should be at least twice the number of [`PeerAddrState`][1]s, so the +/// tests can cover multiple transitions through every state. +/// +/// [1]: super::PeerAddrState #[allow(dead_code)] pub const MAX_ADDR_CHANGE: usize = 15; -/// The largest number of random addresses we want to add to an [`AddressBook`]. +/// The largest number of random addresses we want to add to an [`AddressBook`][2]. +/// +/// This should be at least the number of [`PeerAddrState`][1]s, so the tests +/// can cover interactions between addresses in different states. /// -/// This should be at least the number of [`PeerAddrState`]s, so the tests can -/// cover interactions between addresses in different states. +/// [1]: super::PeerAddrState +/// [2]: crate::AddressBook #[allow(dead_code)] pub const MAX_META_ADDR: usize = 8; impl MetaAddr { /// Create a strategy that generates [`MetaAddr`]s in the - /// [`PeerAddrState::NeverAttemptedGossiped`] state. + /// [`NeverAttemptedGossiped`][1] state. + /// + /// [1]: super::PeerAddrState::NeverAttemptedGossiped pub fn gossiped_strategy() -> BoxedStrategy { ( canonical_socket_addr_strategy(), @@ -38,7 +45,9 @@ impl MetaAddr { } /// Create a strategy that generates [`MetaAddr`]s in the - /// [`PeerAddrState::NeverAttemptedAlternate`] state. + /// [`NeverAttemptedAlternate`][1] state. + /// + /// [1]: super::PeerAddrState::NeverAttemptedAlternate pub fn alternate_strategy() -> BoxedStrategy { (canonical_socket_addr_strategy(), any::()) .prop_map(|(socket_addr, untrusted_services)| { @@ -84,9 +93,11 @@ impl MetaAddrChange { /// Create a strategy that generates [`MetaAddrChange`]s which are ready for /// outbound connections. /// - /// Currently, all generated changes are the [`NewAlternate`] variant. - /// TODO: Generate all [`MetaAddrChange`] variants, and give them ready fields. - /// (After PR #2276 merges.) + /// Currently, all generated changes are the [`NewAlternate`][1] variant. + /// TODO: Generate all [`MetaAddrChange`] variants, and give them ready + /// fields. (After PR #2276 merges.) + /// + /// [1]: super::NewAlternate pub fn ready_outbound_strategy() -> BoxedStrategy { canonical_socket_addr_strategy() .prop_filter_map("failed MetaAddr::is_valid_for_outbound", |addr| { diff --git a/zebra-network/src/peer/client.rs b/zebra-network/src/peer/client.rs index 3d9f9dd1a96..5f56e6c1b3a 100644 --- a/zebra-network/src/peer/client.rs +++ b/zebra-network/src/peer/client.rs @@ -76,8 +76,8 @@ pub(crate) struct ClientRequest { /// The actual network request for the peer. pub request: Request, - /// The response [`Message`] channel, included because `peer::Client::call` returns a - /// future that may be moved around before it resolves. + /// The response `Message` channel, included because `peer::Client::call` + /// returns a future that may be moved around before it resolves. pub tx: oneshot::Sender>, /// Used to register missing inventory in responses on `tx`, diff --git a/zebra-network/src/peer/error.rs b/zebra-network/src/peer/error.rs index eb42cde4bb1..b96a7f2db39 100644 --- a/zebra-network/src/peer/error.rs +++ b/zebra-network/src/peer/error.rs @@ -94,28 +94,30 @@ pub enum PeerError { /// or peers can download and verify the missing data. /// /// If the peer has some of the data, the request returns an [`Ok`] response, - /// with any `notfound` data is marked as [`Missing`][m]. + /// with any `notfound` data is marked as [`Missing`][1]. /// - /// [m] crate::protocol::external::InventoryResponse::Missing + /// [1]: crate::protocol::internal::InventoryResponse::Missing #[error("Remote peer could not find any of the items: {0:?}")] NotFoundResponse(Vec), /// We requested data, but all our ready peers are marked as recently - /// [`Missing`](InventoryResponse::Missing) that data in our local inventory registry. + /// [`Missing`][1] that data in our local inventory registry. /// /// This is a temporary error. /// - /// Peers with the inventory can finish their requests and become ready, - /// or other peers can download and verify the missing data. + /// Peers with the inventory can finish their requests and become ready, or + /// other peers can download and verify the missing data. /// /// # Correctness /// - /// This error is produced using Zebra's local inventory registry, - /// without contacting any peers. + /// This error is produced using Zebra's local inventory registry, without + /// contacting any peers. /// - /// Client responses containing this error must not be used to update the inventory registry. - /// This makes sure that we eventually expire our local cache of missing inventory, - /// and send requests to peers again. + /// Client responses containing this error must not be used to update the + /// inventory registry. This makes sure that we eventually expire our local + /// cache of missing inventory, and send requests to peers again. + /// + /// [1]: crate::protocol::internal::InventoryResponse::Missing #[error("All ready peers are registered as recently missing these items: {0:?}")] NotFoundRegistry(Vec), } diff --git a/zebra-network/src/peer/handshake.rs b/zebra-network/src/peer/handshake.rs index 0c4f967f784..2010a89db58 100644 --- a/zebra-network/src/peer/handshake.rs +++ b/zebra-network/src/peer/handshake.rs @@ -1,4 +1,4 @@ -//! Initial [`Handshake`]s with Zebra peers over a [`PeerTransport`]. +//! Initial [`Handshake`]s with Zebra peers over a `PeerTransport`. use std::{ cmp::min, diff --git a/zebra-node-services/src/mempool.rs b/zebra-node-services/src/mempool.rs index d4b6b104239..320032da251 100644 --- a/zebra-node-services/src/mempool.rs +++ b/zebra-node-services/src/mempool.rs @@ -32,7 +32,7 @@ pub enum Request { /// Query matching transactions in the mempool, /// using a unique set of [`struct@Hash`]s. Pre-V5 transactions are matched /// directly; V5 transaction are matched just by the Hash, disregarding - /// the [`AuthDigest`]. + /// the [`AuthDigest`](zebra_chain::transaction::AuthDigest). TransactionsByMinedId(HashSet), /// Query matching cached rejected transaction IDs in the mempool, @@ -59,10 +59,10 @@ pub enum Request { /// /// This request is required to avoid hangs in the mempool. /// - /// The queue checker task can't call `poll_ready` directly on the [`Mempool`] service, - /// because the mempool service is wrapped in a `Buffer`. - /// Calling [`Buffer::poll_ready`] reserves a buffer slot, which can cause hangs when - /// too many slots are reserved but unused: + /// The queue checker task can't call `poll_ready` directly on the mempool + /// service, because the service is wrapped in a `Buffer`. Calling + /// `Buffer::poll_ready` reserves a buffer slot, which can cause hangs + /// when too many slots are reserved but unused: /// CheckForVerifiedTransactions, } diff --git a/zebra-state/src/arbitrary.rs b/zebra-state/src/arbitrary.rs index bd322e0200a..2d0c95d3c1a 100644 --- a/zebra-state/src/arbitrary.rs +++ b/zebra-state/src/arbitrary.rs @@ -100,8 +100,8 @@ impl PreparedBlock { } impl ContextuallyValidBlock { - /// Create a block that's ready for non-finalized `Chain` contextual validation, - /// using a [`PreparedBlock`] and fake zero-valued spent UTXOs. + /// Create a block that's ready for non-finalized `Chain` contextual + /// validation, using a [`PreparedBlock`] and fake zero-valued spent UTXOs. /// /// Only for use in tests. pub fn test_with_zero_spent_utxos(block: impl Into) -> Self { diff --git a/zebra-state/src/response.rs b/zebra-state/src/response.rs index a2607108226..7c1eae40b9b 100644 --- a/zebra-state/src/response.rs +++ b/zebra-state/src/response.rs @@ -18,7 +18,10 @@ use crate::Request; use crate::{service::read::AddressUtxos, TransactionLocation}; #[derive(Clone, Debug, PartialEq, Eq)] -/// A response to a [`StateService`] [`Request`]. +/// A response to a [`StateService`][1] [`Request`][2]. +/// +/// [1]: crate::service::StateService +/// [2]: crate::Request pub enum Response { /// Response to [`Request::CommitBlock`] indicating that a block was /// successfully committed to the state. @@ -50,7 +53,8 @@ pub enum Response { } #[derive(Clone, Debug, PartialEq, Eq)] -/// A response to a read-only [`ReadStateService`](crate::ReadStateService)'s +/// A response to a read-only +/// [`ReadStateService`](crate::service::ReadStateService)'s /// [`ReadRequest`](crate::ReadRequest). pub enum ReadResponse { /// Response to [`ReadRequest::Block`](crate::ReadRequest::Block) with the @@ -72,13 +76,18 @@ pub enum ReadResponse { /// specified Orchard note commitment tree. OrchardTree(Option>), - /// Response to [`ReadRequest::AddressBalance`] with the total balance of the addresses. + /// Response to + /// [`ReadRequest::AddressBalance`](crate::ReadRequest::AddressBalance) with + /// the total balance of the addresses. AddressBalance(Amount), - /// Response to [`ReadRequest::TransactionIdsByAddresses`] with the obtained transaction ids, - /// in the order they appear in blocks. + /// Response to + /// [`ReadRequest::TransactionIdsByAddresses`](crate::ReadRequest::TransactionIdsByAddresses) + /// with the obtained transaction ids, in the order they appear in blocks. AddressesTransactionIds(BTreeMap), - /// Response to [`ReadRequest::UtxosByAddresses`] with found utxos and transaction data. + /// Response to + /// [`ReadRequest::UtxosByAddresses`](crate::ReadRequest::UtxosByAddresses) + /// with found utxos and transaction data. Utxos(AddressUtxos), } diff --git a/zebra-state/src/service.rs b/zebra-state/src/service.rs index 9545dacfb28..a8f321cf41d 100644 --- a/zebra-state/src/service.rs +++ b/zebra-state/src/service.rs @@ -2,14 +2,18 @@ //! //! Zebra provides cached state access via two main services: //! - [`StateService`]: a read-write service that waits for queued blocks. -//! - [`ReadStateService`]: a read-only service that answers from the most recent committed block. +//! - [`ReadStateService`]: a read-only service that answers from the most +//! recent committed block. //! //! Most users should prefer [`ReadStateService`], unless they need to wait for -//! verified blocks to be committed. (For example, the syncer and mempool tasks.) +//! verified blocks to be committed. (For example, the syncer and mempool +//! tasks.) //! //! Zebra also provides access to the best chain tip via: -//! - [`LatestChainTip`]: a read-only channel that contains the latest committed tip. -//! - [`ChainTipChange`]: a read-only channel that can asynchronously await chain tip changes. +//! - [`LatestChainTip`]: a read-only channel that contains the latest committed +//! tip. +//! - [`ChainTipChange`]: a read-only channel that can asynchronously await +//! chain tip changes. use std::{ convert, @@ -95,7 +99,7 @@ pub type QueuedFinalized = ( /// to delay the next ObtainTips until all queued blocks have been committed. /// /// But most state users can ignore any queued blocks, and get faster read responses -/// using the [`ReadOnlyStateService`]. +/// using the [`ReadStateService`]. #[derive(Debug)] pub(crate) struct StateService { /// The finalized chain state, including its on-disk database. @@ -317,11 +321,14 @@ impl StateService { rsp_rx } - /// Update the [`LatestChainTip`], [`ChainTipChange`], and [`LatestChain`] channels - /// with the latest non-finalized [`ChainTipBlock`] and [`Chain`]. + /// Update the [`LatestChainTip`], [`ChainTipChange`], and `best_chain_sender` + /// channels with the latest non-finalized [`ChainTipBlock`] and + /// [`Chain`][1]. /// - /// Returns the latest non-finalized chain tip height, - /// or `None` if the non-finalized state is empty. + /// Returns the latest non-finalized chain tip height, or `None` if the + /// non-finalized state is empty. + /// + /// [1]: non_finalized_state::Chain #[instrument(level = "debug", skip(self))] fn update_latest_chain_channels(&mut self) -> Option { let best_chain = self.mem.best_chain(); @@ -502,8 +509,8 @@ impl StateService { .or_else(|| self.disk.db().height(hash)) } - /// Return the [`Utxo`] pointed to by `outpoint`, if it exists in any chain, - /// or in any pending block. + /// Return the [`transparent::Utxo`] pointed to by `outpoint`, if it exists + /// in any chain, or in any pending block. /// /// Some of the returned UTXOs may be invalid, because: /// - they are not in the best chain, or diff --git a/zebra-state/src/service/chain_tip.rs b/zebra-state/src/service/chain_tip.rs index bddde612daa..b7e82343d49 100644 --- a/zebra-state/src/service/chain_tip.rs +++ b/zebra-state/src/service/chain_tip.rs @@ -372,8 +372,8 @@ impl ChainTip for LatestChainTip { /// Awaits changes and resets of the state's best chain tip, /// returning the latest [`TipAction`] once the state is updated. /// -/// Each cloned instance separately tracks the last block data it provided. -/// If the best chain fork has changed since the last [`tip_change`] on that instance, +/// Each cloned instance separately tracks the last block data it provided. If +/// the best chain fork has changed since the last tip change on that instance, /// it returns a [`Reset`]. /// /// The chain tip data is based on: @@ -411,16 +411,19 @@ pub enum TipAction { /// The chain tip was reset to a block with `height` and `hash`. /// /// Resets can happen for different reasons: - /// * a newly created or cloned [`ChainTipChange`], which is behind the current tip, - /// * extending the chain with a network upgrade activation block, - /// * switching to a different best [`Chain`], also known as a rollback, and - /// * receiving multiple blocks since the previous change. + /// - a newly created or cloned [`ChainTipChange`], which is behind the + /// current tip, + /// - extending the chain with a network upgrade activation block, + /// - switching to a different best [`Chain`][1], also known as a rollback, and + /// - receiving multiple blocks since the previous change. /// - /// To keep the code and tests simple, Zebra performs the same reset actions, - /// regardless of the reset reason. + /// To keep the code and tests simple, Zebra performs the same reset + /// actions, regardless of the reset reason. /// - /// `Reset`s do not have the transaction hashes from the tip block, - /// because all transactions should be cleared by a reset. + /// `Reset`s do not have the transaction hashes from the tip block, because + /// all transactions should be cleared by a reset. + /// + /// [1]: super::non_finalized_state::Chain Reset { /// The block height of the tip, after the chain reset. height: block::Height, @@ -470,7 +473,7 @@ impl ChainTipChange { /// - `Some(`[`TipAction`]`)` if there has been a change since the last time the method was called. /// - `None` if there has been no change. /// - /// See [`wait_for_tip_change`] for details. + /// See [`Self::wait_for_tip_change`] for details. #[instrument( skip(self), fields( diff --git a/zebra-state/src/service/check/difficulty.rs b/zebra-state/src/service/check/difficulty.rs index 0944add36d0..a7973b45bc1 100644 --- a/zebra-state/src/service/check/difficulty.rs +++ b/zebra-state/src/service/check/difficulty.rs @@ -107,13 +107,13 @@ impl AdjustedDifficulty { ) } - /// Initialise and return a new `AdjustedDifficulty` using a + /// Initialise and return a new [`AdjustedDifficulty`] using a /// `candidate_header`, `previous_block_height`, `network`, and a `context`. /// /// Designed for use when validating block headers, where the full block has not /// been downloaded yet. /// - /// See [`new_from_block()`] for detailed information about the `context`. + /// See [`Self::new_from_block`] for detailed information about the `context`. /// /// # Panics /// @@ -194,7 +194,7 @@ impl AdjustedDifficulty { /// `candidate_height`, `network`, and the relevant `difficulty_threshold`s and /// `time`s. /// - /// See [`expected_difficulty_threshold()`] for details. + /// See [`Self::expected_difficulty_threshold`] for details. /// /// Implements `ThresholdBits` from the Zcash specification. (Which excludes the /// Testnet minimum difficulty adjustment.) @@ -293,7 +293,7 @@ impl AdjustedDifficulty { /// /// Implements `ActualTimespan` from the Zcash specification. /// - /// See [`median_timespan_bounded()`] for details. + /// See [`Self::median_timespan_bounded`] for details. fn median_timespan(&self) -> Duration { let newer_median = self.median_time_past(); diff --git a/zebra-state/src/service/check/nullifier.rs b/zebra-state/src/service/check/nullifier.rs index ac2adfc1e80..4012d7ff967 100644 --- a/zebra-state/src/service/check/nullifier.rs +++ b/zebra-state/src/service/check/nullifier.rs @@ -10,7 +10,8 @@ use crate::{ }; /// Reject double-spends of nullifers: -/// - one from this [`PreparedBlock`], and the other already committed to the [`FinalizedState`]. +/// - one from this [`PreparedBlock`], and the other already committed to the +/// [`FinalizedState`](super::super::FinalizedState). /// /// (Duplicate non-finalized nullifiers are rejected during the chain update, /// see [`add_to_non_finalized_chain_unique`] for details.) @@ -50,12 +51,12 @@ pub(crate) fn no_duplicates_in_finalized_chain( } /// Reject double-spends of nullifers: -/// - both within the same [`JoinSplit`] (sprout only), -/// - from different [`JoinSplit`]s, [`sapling::Spend`]s or [`Action`]s -/// in this [`Transaction`]'s shielded data, or +/// - both within the same `JoinSplit` (sprout only), +/// - from different `JoinSplit`s, [`sapling::Spend`][2]s or +/// [`orchard::Action`][3]s in this [`Transaction`][1]'s shielded data, or /// - one from this shielded data, and another from: -/// - a previous transaction in this [`Block`], or -/// - a previous block in this non-finalized [`Chain`]. +/// - a previous transaction in this [`Block`][4], or +/// - a previous block in this non-finalized [`Chain`][5]. /// /// (Duplicate finalized nullifiers are rejected during service contextual validation, /// see [`no_duplicates_in_finalized_chain`] for details.) @@ -74,6 +75,12 @@ pub(crate) fn no_duplicates_in_finalized_chain( /// different pools have nullifiers with same bit pattern, they won't be /// considered the same when determining uniqueness. This is enforced by the /// callers of this function. +/// +/// [1]: zebra_chain::transaction::Transaction +/// [2]: zebra_chain::sapling::Spend +/// [3]: zebra_chain::orchard::Action +/// [4]: zebra_chain::block::Block +/// [5]: super::super::Chain #[tracing::instrument(skip(chain_nullifiers, shielded_data_nullifiers))] pub(crate) fn add_to_non_finalized_chain_unique<'block, NullifierT>( chain_nullifiers: &mut HashSet, @@ -94,8 +101,8 @@ where Ok(()) } -/// Remove nullifiers that were previously added to this non-finalized [`Chain`] -/// by this shielded data. +/// Remove nullifiers that were previously added to this non-finalized +/// [`Chain`][1] by this shielded data. /// /// "A note can change from being unspent to spent as a node’s view /// of the best valid block chain is extended by new transactions. @@ -114,8 +121,10 @@ where /// Panics if any nullifier is missing from the chain when we try to remove it. /// /// Blocks with duplicate nullifiers are rejected by -/// [`add_to_non_finalized_chain_unique`], so this shielded data should -/// be the only shielded data that added this nullifier to this [`Chain`]. +/// [`add_to_non_finalized_chain_unique`], so this shielded data should be the +/// only shielded data that added this nullifier to this [`Chain`][1]. +/// +/// [1]: super::super::Chain #[tracing::instrument(skip(chain_nullifiers, shielded_data_nullifiers))] pub(crate) fn remove_from_non_finalized_chain<'block, NullifierT>( chain_nullifiers: &mut HashSet, diff --git a/zebra-state/src/service/finalized_state.rs b/zebra-state/src/service/finalized_state.rs index f1a8af827bd..ea8cbe325fe 100644 --- a/zebra-state/src/service/finalized_state.rs +++ b/zebra-state/src/service/finalized_state.rs @@ -3,7 +3,8 @@ //! Zebra's database is implemented in 4 layers: //! - [`FinalizedState`]: queues, validates, and commits blocks, using... //! - [`ZebraDb`]: reads and writes [`zebra_chain`] types to the database, using... -//! - [`DiskDb`]: reads and writes format-specific types to the database, using... +//! - [`DiskDb`](disk_db::DiskDb): reads and writes format-specific types +//! to the database, using... //! - [`disk_format`]: converts types to raw database bytes. //! //! These layers allow us to split [`zebra_chain`] types for efficient database storage. @@ -136,7 +137,7 @@ impl FinalizedState { /// /// Returns the highest finalized tip block committed from the queue, /// or `None` if no blocks were committed in this call. - /// (Use [`tip_block`] to get the finalized tip, regardless of when it was committed.) + /// (Use `tip_block` to get the finalized tip, regardless of when it was committed.) pub fn queue_and_commit_finalized( &mut self, queued: QueuedFinalized, @@ -182,9 +183,9 @@ impl FinalizedState { /// Commit a finalized block to the state. /// /// It's the caller's responsibility to ensure that blocks are committed in - /// order. This function is called by [`queue`], which ensures order. - /// It is intentionally not exposed as part of the public API of the - /// [`FinalizedState`]. + /// order. This function is called by [`Self::queue_and_commit_finalized`], + /// which ensures order. It is intentionally not exposed as part of the + /// public API of the [`FinalizedState`]. fn commit_finalized(&mut self, queued_block: QueuedFinalized) -> Result { let (finalized, rsp_tx) = queued_block; let result = self.commit_finalized_direct(finalized.clone(), "CommitFinalized request"); diff --git a/zebra-state/src/service/finalized_state/disk_db.rs b/zebra-state/src/service/finalized_state/disk_db.rs index 81fa1c00961..5bee586fa90 100644 --- a/zebra-state/src/service/finalized_state/disk_db.rs +++ b/zebra-state/src/service/finalized_state/disk_db.rs @@ -1,7 +1,8 @@ //! Provides low-level access to RocksDB using some database-specific types. //! //! This module makes sure that: -//! - all disk writes happen inside a RocksDB transaction ([`WriteBatch`]), and +//! - all disk writes happen inside a RocksDB transaction +//! ([`rocksdb::WriteBatch`]), and //! - format-specific invariants are maintained. //! //! # Correctness diff --git a/zebra-state/src/service/finalized_state/disk_format.rs b/zebra-state/src/service/finalized_state/disk_format.rs index b7261e00847..8782b323775 100644 --- a/zebra-state/src/service/finalized_state/disk_format.rs +++ b/zebra-state/src/service/finalized_state/disk_format.rs @@ -27,12 +27,15 @@ pub trait IntoDisk { /// Converts the current type into serialized raw bytes. /// - /// Used to convert keys to bytes in [`ReadDisk`], - /// and keys and values to bytes in [`WriteDisk`]. + /// Used to convert keys to bytes in [`ReadDisk`][1], + /// and keys and values to bytes in [`WriteDisk`][2]. /// /// # Panics /// /// - if the input data doesn't serialize correctly + /// + /// [1]: super::disk_db::ReadDisk + /// [2]: super::disk_db::WriteDisk fn as_bytes(&self) -> Self::Bytes; } @@ -40,11 +43,13 @@ pub trait IntoDisk { pub trait FromDisk: Sized { /// Converts raw disk bytes back into the deserialized type. /// - /// Used to convert keys and values from bytes in [`ReadDisk`]. + /// Used to convert keys and values from bytes in [`ReadDisk`][1]. /// /// # Panics /// /// - if the input data doesn't deserialize correctly + /// + /// [1]: super::disk_db::ReadDisk fn from_bytes(bytes: impl AsRef<[u8]>) -> Self; } diff --git a/zebra-state/src/service/finalized_state/disk_format/block.rs b/zebra-state/src/service/finalized_state/disk_format/block.rs index 71312ab6719..fafa63d1add 100644 --- a/zebra-state/src/service/finalized_state/disk_format/block.rs +++ b/zebra-state/src/service/finalized_state/disk_format/block.rs @@ -22,8 +22,8 @@ use serde::{Deserialize, Serialize}; /// The maximum value of an on-disk serialized [`Height`]. /// -/// This allows us to store [`OutputLocation`]s in 8 bytes, -/// which makes database searches more efficient. +/// This allows us to store [`OutputLocation`](crate::OutputLocation)s in +/// 8 bytes, which makes database searches more efficient. /// /// # Consensus /// diff --git a/zebra-state/src/service/finalized_state/disk_format/transparent.rs b/zebra-state/src/service/finalized_state/disk_format/transparent.rs index e695c7a8ec9..0f48213380d 100644 --- a/zebra-state/src/service/finalized_state/disk_format/transparent.rs +++ b/zebra-state/src/service/finalized_state/disk_format/transparent.rs @@ -128,7 +128,7 @@ impl OutputLocation { } } - /// Creates an output location from an [`Outpoint`], + /// Creates an output location from an [`transparent::OutPoint`], /// and the [`TransactionLocation`] of its transaction. /// /// The [`TransactionLocation`] is provided separately, @@ -304,12 +304,16 @@ impl AddressUnspentOutput { } } - /// Create an [`AddressUnspentOutput`] which starts iteration for the supplied address. - /// Used to look up the first output with [`ReadDisk::zs_next_key_value_from`]. + /// Create an [`AddressUnspentOutput`] which starts iteration for the + /// supplied address. Used to look up the first output with + /// [`ReadDisk::zs_next_key_value_from`][1]. /// - /// The unspent output location is before all unspent output locations in the index. - /// It is always invalid, due to the genesis consensus rules. But this is not an issue - /// since [`ReadDisk::zs_next_key_value_from`] will fetch the next existing (valid) value. + /// The unspent output location is before all unspent output locations in + /// the index. It is always invalid, due to the genesis consensus rules. But + /// this is not an issue since [`ReadDisk::zs_next_key_value_from`][1] will + /// fetch the next existing (valid) value. + /// + /// [1]: super::super::disk_db::ReadDisk::zs_next_key_value_from pub fn address_iterator_start(address_location: AddressLocation) -> AddressUnspentOutput { // Iterating from the lowest possible output location gets us the first output. let zero_output_location = OutputLocation::from_usize(Height(0), 0, 0); @@ -320,11 +324,15 @@ impl AddressUnspentOutput { } } - /// Update the unspent output location to the next possible output for the supplied address. - /// Used to look up the next output with [`ReadDisk::zs_next_key_value_from`]. + /// Update the unspent output location to the next possible output for the + /// supplied address. Used to look up the next output with + /// [`ReadDisk::zs_next_key_value_from`][1]. + /// + /// The updated unspent output location may be invalid, which is not an + /// issue since [`ReadDisk::zs_next_key_value_from`][1] will fetch the next + /// existing (valid) value. /// - /// The updated unspent output location may be invalid, which is not an issue - /// since [`ReadDisk::zs_next_key_value_from`] will fetch the next existing (valid) value. + /// [1]: super::super::disk_db::ReadDisk::zs_next_key_value_from pub fn address_iterator_next(&mut self) { // Iterating from the next possible output location gets us the next output, // even if it is in a later block or transaction. @@ -394,14 +402,19 @@ impl AddressTransaction { } } - /// Create an [`AddressTransaction`] which starts iteration for the supplied address. - /// Starts at the first UTXO, or at the `query_start` height, whichever is greater. + /// Create an [`AddressTransaction`] which starts iteration for the supplied + /// address. Starts at the first UTXO, or at the `query_start` height, + /// whichever is greater. /// - /// Used to look up the first transaction with [`ReadDisk::zs_next_key_value_from`]. + /// Used to look up the first transaction with + /// [`ReadDisk::zs_next_key_value_from`][1]. /// - /// The transaction location might be invalid, if it is based on the `query_start` height. - /// But this is not an issue, since [`ReadDisk::zs_next_key_value_from`] - /// will fetch the next existing (valid) value. + /// The transaction location might be invalid, if it is based on the + /// `query_start` height. But this is not an issue, since + /// [`ReadDisk::zs_next_key_value_from`][1] will fetch the next existing + /// (valid) value. + /// + /// [1]: super::super::disk_db::ReadDisk::zs_next_key_value_from pub fn address_iterator_start( address_location: AddressLocation, query_start: Height, @@ -421,11 +434,15 @@ impl AddressTransaction { } } - /// Update the transaction location to the next possible transaction for the supplied address. - /// Used to look up the next output with [`ReadDisk::zs_next_key_value_from`]. + /// Update the transaction location to the next possible transaction for the + /// supplied address. Used to look up the next output with + /// [`ReadDisk::zs_next_key_value_from`][1]. /// /// The updated transaction location may be invalid, which is not an issue - /// since [`ReadDisk::zs_next_key_value_from`] will fetch the next existing (valid) value. + /// since [`ReadDisk::zs_next_key_value_from`][1] will fetch the next + /// existing (valid) value. + /// + /// [1]: super::super::disk_db::ReadDisk::zs_next_key_value_from pub fn address_iterator_next(&mut self) { // Iterating from the next possible output location gets us the next output, // even if it is in a later block or transaction. diff --git a/zebra-state/src/service/finalized_state/zebra_db.rs b/zebra-state/src/service/finalized_state/zebra_db.rs index 7736582ddcb..60bb11389c2 100644 --- a/zebra-state/src/service/finalized_state/zebra_db.rs +++ b/zebra-state/src/service/finalized_state/zebra_db.rs @@ -72,7 +72,7 @@ impl ZebraDb { /// /// # Logs an Error /// - /// If Zebra is storing block heights that are close to [`MAX_ON_DISK_BLOCK_HEIGHT`]. + /// If Zebra is storing block heights that are close to [`MAX_ON_DISK_HEIGHT`]. fn check_max_on_disk_tip_height(&self) { if let Some((tip_height, tip_hash)) = self.tip() { if tip_height.0 > MAX_ON_DISK_HEIGHT.0 / 2 { diff --git a/zebra-state/src/service/finalized_state/zebra_db/transparent.rs b/zebra-state/src/service/finalized_state/zebra_db/transparent.rs index 30fb11e2a9d..756189e2aae 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/transparent.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/transparent.rs @@ -1,5 +1,5 @@ //! Provides high-level access to database: -//! - unspent [`transparent::Outputs`]s (UTXOs), and +//! - unspent [`transparent::Output`]s (UTXOs), and //! - transparent address indexes. //! //! This module makes sure that: diff --git a/zebra-state/src/service/non_finalized_state/chain.rs b/zebra-state/src/service/non_finalized_state/chain.rs index 1ea6995a445..eef54f7990e 100644 --- a/zebra-state/src/service/non_finalized_state/chain.rs +++ b/zebra-state/src/service/non_finalized_state/chain.rs @@ -37,7 +37,7 @@ pub mod index; #[derive(Debug, Clone)] pub struct Chain { - // The function `eq_internal_state` must be updated every time a field is added to `Chain`. + // The function `eq_internal_state` must be updated every time a field is added to [`Chain`]. /// The configured network for this chain. network: Network, @@ -50,35 +50,35 @@ pub struct Chain { /// An index of [`TransactionLocation`]s for each transaction hash in `blocks`. pub tx_by_hash: HashMap, - /// The [`Utxo`]s created by `blocks`. + /// The [`transparent::Utxo`]s created by `blocks`. /// /// Note that these UTXOs may not be unspent. /// Outputs can be spent by later transactions or blocks in the chain. // // TODO: replace OutPoint with OutputLocation? pub(crate) created_utxos: HashMap, - /// The [`OutPoint`]s spent by `blocks`, + /// The [`transparent::OutPoint`]s spent by `blocks`, /// including those created by earlier transactions or blocks in the chain. pub(crate) spent_utxos: HashSet, - /// The Sprout note commitment tree of the tip of this `Chain`, + /// The Sprout note commitment tree of the tip of this [`Chain`], /// including all finalized notes, and the non-finalized notes in this chain. pub(super) sprout_note_commitment_tree: sprout::tree::NoteCommitmentTree, /// The Sprout note commitment tree for each anchor. /// This is required for interstitial states. pub(crate) sprout_trees_by_anchor: HashMap, - /// The Sapling note commitment tree of the tip of this `Chain`, + /// The Sapling note commitment tree of the tip of this [`Chain`], /// including all finalized notes, and the non-finalized notes in this chain. pub(super) sapling_note_commitment_tree: sapling::tree::NoteCommitmentTree, /// The Sapling note commitment tree for each height. pub(crate) sapling_trees_by_height: BTreeMap, - /// The Orchard note commitment tree of the tip of this `Chain`, + /// The Orchard note commitment tree of the tip of this [`Chain`], /// including all finalized notes, and the non-finalized notes in this chain. pub(super) orchard_note_commitment_tree: orchard::tree::NoteCommitmentTree, /// The Orchard note commitment tree for each height. pub(crate) orchard_trees_by_height: BTreeMap, - /// The ZIP-221 history tree of the tip of this `Chain`, + /// The ZIP-221 history tree of the tip of this [`Chain`], /// including all finalized blocks, and the non-finalized `blocks` in this chain. pub(crate) history_tree: HistoryTree, @@ -112,7 +112,7 @@ pub struct Chain { /// because they are common to all non-finalized chains. pub(super) partial_cumulative_work: PartialCumulativeWork, - /// The chain value pool balances of the tip of this `Chain`, + /// The chain value pool balances of the tip of this [`Chain`], /// including the block value pool changes from all finalized blocks, /// and the non-finalized blocks in this chain. /// @@ -222,7 +222,7 @@ impl Chain { /// If the block is invalid, drops this chain, and returns an error. /// /// Note: a [`ContextuallyValidBlock`] isn't actually contextually valid until - /// [`update_chain_state_with`] returns success. + /// [`Self::update_chain_tip_with`] returns success. #[instrument(level = "debug", skip(self, block), fields(block = %block.block))] pub fn push(mut self, block: ContextuallyValidBlock) -> Result { // update cumulative data members @@ -291,7 +291,7 @@ impl Chain { // Rebuild the note commitment trees, starting from the finalized tip tree. // TODO: change to a more efficient approach by removing nodes - // from the tree of the original chain (in `pop_tip()`). + // from the tree of the original chain (in [`Self::pop_tip`]). // See https://github.com/ZcashFoundation/zebra/issues/2378 for block in forked.blocks.values() { for transaction in block.block.transactions.iter() { @@ -674,30 +674,31 @@ impl Chain { /// The revert position being performed on a chain. #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] enum RevertPosition { - /// The chain root is being reverted via [`pop_root`], - /// when a block is finalized. + /// The chain root is being reverted via [`Chain::pop_root`], when a block + /// is finalized. Root, - /// The chain tip is being reverted via [`pop_tip`], + /// The chain tip is being reverted via [`Chain::pop_tip`], /// when a chain is forked. Tip, } -/// Helper trait to organize inverse operations done on the `Chain` type. +/// Helper trait to organize inverse operations done on the [`Chain`] type. /// /// Used to overload update and revert methods, based on the type of the argument, /// and the position of the removed block in the chain. /// -/// This trait was motivated by the length of the `push`, `pop_root`, and `pop_tip` functions, -/// and fear that it would be easy to introduce bugs when updating them, -/// unless the code was reorganized to keep related operations adjacent to each other. +/// This trait was motivated by the length of the `push`, [`Chain::pop_root`], +/// and [`Chain::pop_tip`] functions, and fear that it would be easy to +/// introduce bugs when updating them, unless the code was reorganized to keep +/// related operations adjacent to each other. trait UpdateWith { /// When `T` is added to the chain tip, - /// update `Chain` cumulative data members to add data that are derived from `T`. + /// update [`Chain`] cumulative data members to add data that are derived from `T`. fn update_chain_tip_with(&mut self, _: &T) -> Result<(), ValidateContextError>; /// When `T` is removed from `position` in the chain, - /// revert `Chain` cumulative data members to remove data that are derived from `T`. + /// revert [`Chain`] cumulative data members to remove data that are derived from `T`. fn revert_chain_with(&mut self, _: &T, position: RevertPosition); } @@ -1268,7 +1269,7 @@ where if let Some(sapling_shielded_data) = sapling_shielded_data { // Note commitments are not removed from the tree here because we // don't support that operation yet. Instead, we recreate the tree - // from the finalized tip in NonFinalizedState. + // from the finalized tip in `NonFinalizedState`. check::nullifier::remove_from_non_finalized_chain( &mut self.sapling_nullifiers, @@ -1348,9 +1349,9 @@ impl UpdateWith> for Chain { /// When forking from the tip, subtract the block's chain value pool change. /// /// When finalizing the root, leave the chain value pool balances unchanged. - /// [`chain_value_pools`] tracks the chain value pools for all finalized blocks, - /// and the non-finalized blocks in this chain. - /// So finalizing the root doesn't change the set of blocks it tracks. + /// [`Self::chain_value_pools`] tracks the chain value pools for all + /// finalized blocks, and the non-finalized blocks in this chain. So + /// finalizing the root doesn't change the set of blocks it tracks. /// /// # Panics /// @@ -1373,13 +1374,15 @@ impl UpdateWith> for Chain { } impl Ord for Chain { - /// Chain order for the [`NonFinalizedState`]'s `chain_set`. + /// Chain order for the [`NonFinalizedState`][1]'s `chain_set`. + /// /// Chains with higher cumulative Proof of Work are [`Ordering::Greater`], /// breaking ties using the tip block hash. /// - /// Despite the consensus rules, Zebra uses the tip block hash as a tie-breaker. - /// Zebra blocks are downloaded in parallel, so download timestamps may not be unique. - /// (And Zebra currently doesn't track download times, because [`Block`]s are immutable.) + /// Despite the consensus rules, Zebra uses the tip block hash as a + /// tie-breaker. Zebra blocks are downloaded in parallel, so download + /// timestamps may not be unique. (And Zebra currently doesn't track + /// download times, because [`Block`](block::Block)s are immutable.) /// /// This departure from the consensus rules may delay network convergence, /// for as long as the greater hash belongs to the later mined block. @@ -1414,10 +1417,13 @@ impl Ord for Chain { /// /// If two chains compare equal. /// - /// This panic enforces the `NonFinalizedState.chain_set` unique chain invariant. + /// This panic enforces the [`NonFinalizedState::chain_set`][2] unique chain invariant. /// /// If the chain set contains duplicate chains, the non-finalized state might /// handle new blocks or block finalization incorrectly. + /// + /// [1]: super::NonFinalizedState + /// [2]: super::NonFinalizedState::chain_set fn cmp(&self, other: &Self) -> Ordering { if self.partial_cumulative_work != other.partial_cumulative_work { self.partial_cumulative_work @@ -1454,14 +1460,16 @@ impl PartialOrd for Chain { } impl PartialEq for Chain { - /// Chain equality for the [`NonFinalizedState`]'s `chain_set`, - /// using proof of work, then the tip block hash as a tie-breaker. + /// Chain equality for [`NonFinalizedState::chain_set`][1], using proof of + /// work, then the tip block hash as a tie-breaker. /// /// # Panics /// /// If two chains compare equal. /// /// See [`Chain::cmp`] for details. + /// + /// [1]: super::NonFinalizedState::chain_set fn eq(&self, other: &Self) -> bool { self.partial_cmp(other) == Some(Ordering::Equal) } diff --git a/zebra-state/src/service/non_finalized_state/chain/index.rs b/zebra-state/src/service/non_finalized_state/chain/index.rs index d2a6323017e..d1f5c2d543b 100644 --- a/zebra-state/src/service/non_finalized_state/chain/index.rs +++ b/zebra-state/src/service/non_finalized_state/chain/index.rs @@ -20,16 +20,14 @@ use super::{RevertPosition, UpdateWith}; #[derive(Clone, Debug, Eq, PartialEq)] pub struct TransparentTransfers { /// The partial chain balance for a transparent address. - /// - /// TODO: - /// - to avoid [`ReadStateService`] response inconsistencies when a block has just been finalized, - /// revert UTXO receives and spends that are at a height less than or equal to the finalized tip. balance: Amount, /// The partial list of transactions that spent or received UTXOs to a transparent address. /// - /// Since transactions can only be added to this set, it does not need special handling - /// for [`ReadStateService`] response inconsistencies. + /// Since transactions can only be added to this set, it does not need + /// special handling for + /// [`ReadStateService`](crate::service::ReadStateService) response + /// inconsistencies. /// /// The `getaddresstxids` RPC needs these transaction IDs to be sorted in chain order. tx_ids: MultiSet, @@ -39,11 +37,7 @@ pub struct TransparentTransfers { /// The `getaddressutxos` RPC doesn't need these transaction IDs to be sorted in chain order, /// but it might in future. So Zebra does it anyway. /// - /// TODO: - /// - to avoid [`ReadStateService`] response inconsistencies when a block has just been finalized, - /// combine the created UTXOs, combine the spent UTXOs, and then remove spent from created - /// - /// Optional: + /// Optional TODOs: /// - store `Utxo`s in the chain, and just store the created locations for this address /// - if we add an OutputLocation to UTXO, remove this OutputLocation, /// and use the inner OutputLocation to sort Utxos in chain order @@ -210,16 +204,21 @@ impl TransparentTransfers { self.balance } - /// Returns the [`transaction::Hash`]es of the transactions that sent or received - /// transparent transfers to this address, in this partial chain, filtered by `query_height_range`. + /// Returns the [`transaction::Hash`]es of the transactions that sent or + /// received transparent transfers to this address, in this partial chain, + /// filtered by `query_height_range`. /// /// The transactions are returned in chain order. /// - /// `chain_tx_by_hash` should be the `tx_by_hash` field from the [`Chain`] containing this index. + /// `chain_tx_by_hash` should be the `tx_by_hash` field from the + /// [`Chain`][1] containing this index. /// /// # Panics /// - /// If `chain_tx_by_hash` is missing some transaction hashes from this index. + /// If `chain_tx_by_hash` is missing some transaction hashes from this + /// index. + /// + /// [1]: super::super::Chain pub fn tx_ids( &self, chain_tx_by_hash: &HashMap, @@ -270,7 +269,7 @@ impl Default for TransparentTransfers { } } -/// Returns the transaction location for an [`OrderedUtxo`]. +/// Returns the transaction location for an [`transparent::OrderedUtxo`]. pub fn transaction_location(ordered_utxo: &transparent::OrderedUtxo) -> TransactionLocation { TransactionLocation::from_usize(ordered_utxo.utxo.height, ordered_utxo.tx_index_in_block) } diff --git a/zebra-state/src/service/pending_utxos.rs b/zebra-state/src/service/pending_utxos.rs index 76f09018510..b2566ff1139 100644 --- a/zebra-state/src/service/pending_utxos.rs +++ b/zebra-state/src/service/pending_utxos.rs @@ -35,8 +35,9 @@ impl PendingUtxos { } } - /// Notify all requests waiting for the [`Utxo`] pointed to by the given - /// [`transparent::OutPoint`] that the [`Utxo`] has arrived. + /// Notify all requests waiting for the [`transparent::Utxo`] pointed to by + /// the given [`transparent::OutPoint`] that the [`transparent::Utxo`] has + /// arrived. pub fn respond(&mut self, outpoint: &transparent::OutPoint, utxo: transparent::Utxo) { if let Some(sender) = self.0.remove(outpoint) { // Adding the outpoint as a field lets us cross-reference @@ -46,7 +47,8 @@ impl PendingUtxos { } } - /// Check the list of pending UTXO requests against the supplied [`OrderedUtxo`] index. + /// Check the list of pending UTXO requests against the supplied + /// [`transparent::OrderedUtxo`] index. pub fn check_against_ordered( &mut self, ordered_utxos: &HashMap, @@ -56,7 +58,7 @@ impl PendingUtxos { } } - /// Check the list of pending UTXO requests against the supplied [`Utxo`] index. + /// Check the list of pending UTXO requests against the supplied [`transparent::Utxo`] index. pub fn check_against(&mut self, utxos: &HashMap) { for (outpoint, utxo) in utxos.iter() { self.respond(outpoint, utxo.clone()) diff --git a/zebra-state/src/service/read.rs b/zebra-state/src/service/read.rs index c0fc5c06077..06d182d0002 100644 --- a/zebra-state/src/service/read.rs +++ b/zebra-state/src/service/read.rs @@ -1,12 +1,14 @@ //! Shared state reading code. //! -//! Used by [`StateService`](crate::StateService) and -//! [`ReadStateService`](crate::ReadStateService) to read from the best -//! [`Chain`] in the -//! [`NonFinalizedState`](crate::service::non_finalized_state::NonFinalizedState), -//! and the database in the -//! [`FinalizedState`](crate::service::finalized_state::FinalizedState). - +//! Used by [`StateService`][1] and [`ReadStateService`][2] to read from the +//! best [`Chain`][5] in the [`NonFinalizedState`][3], and the database in the +//! [`FinalizedState`][4]. +//! +//! [1]: super::StateService +//! [2]: super::ReadStateService +//! [3]: super::non_finalized_state::NonFinalizedState +//! [4]: super::finalized_state::FinalizedState +//! [5]: super::Chain use std::{ collections::{BTreeMap, BTreeSet, HashSet}, ops::RangeInclusive, diff --git a/zebra-state/src/service/watch_receiver.rs b/zebra-state/src/service/watch_receiver.rs index 2500c425db3..a3cb4826477 100644 --- a/zebra-state/src/service/watch_receiver.rs +++ b/zebra-state/src/service/watch_receiver.rs @@ -46,9 +46,10 @@ where /// /// # Performance /// - /// A single read lock is acquired to clone `T`, and then released after the clone. - /// To make this clone efficient, large or expensive `T` can be wrapped in an [`Arc`]. - /// (Or individual fields can be wrapped in an `Arc`.) + /// A single read lock is acquired to clone `T`, and then released after the + /// clone. To make this clone efficient, large or expensive `T` can be + /// wrapped in an [`std::sync::Arc`]. (Or individual fields can be wrapped + /// in an [`std::sync::Arc`].) /// /// # Correctness /// diff --git a/zebra-test/src/command.rs b/zebra-test/src/command.rs index 2ffef044e0b..93ff57daa43 100644 --- a/zebra-test/src/command.rs +++ b/zebra-test/src/command.rs @@ -41,7 +41,7 @@ pub fn test_cmd(command_path: &str, tempdir: &Path) -> Result { // TODO: split these extensions into their own module -/// Wrappers for `Command` methods to integrate with [`zebra_test`]. +/// Wrappers for `Command` methods to integrate with [`zebra_test`](crate). pub trait CommandExt { /// wrapper for `status` fn on `Command` that constructs informative error /// reports @@ -637,7 +637,7 @@ impl TestChild { /// if a line matches. /// /// Kills the child on error, or after the configured timeout has elapsed. - /// See `expect_line_matching` for details. + /// See [`Self::expect_line_matching_regex_set`] for details. #[instrument(skip(self))] pub fn expect_stdout_line_matches(&mut self, success_regex: R) -> Result<&mut Self> where @@ -663,7 +663,7 @@ impl TestChild { /// if a line matches. /// /// Kills the child on error, or after the configured timeout has elapsed. - /// See `expect_line_matching` for details. + /// See [`Self::expect_line_matching_regex_set`] for details. #[instrument(skip(self))] pub fn expect_stderr_line_matches(&mut self, success_regex: R) -> Result<&mut Self> where @@ -687,8 +687,8 @@ impl TestChild { /// Checks each line in `lines` against a regex set, and returns Ok if a line matches. /// - /// [`TestChild::expect_line_matching`] wrapper for strings, [`Regex`]es, - /// and [`RegexSet`]s. + /// [`Self::expect_line_matching_regexes`] wrapper for strings, + /// [`Regex`](regex::Regex)es, and [`RegexSet`]s. pub fn expect_line_matching_regex_set( &mut self, lines: &mut L, @@ -708,7 +708,7 @@ impl TestChild { /// Checks each line in `lines` against a regex set, and returns Ok if a line matches. /// - /// [`TestChild::expect_line_matching`] wrapper for regular expression iterators. + /// [`Self::expect_line_matching_regexes`] wrapper for regular expression iterators. pub fn expect_line_matching_regex_iter( &mut self, lines: &mut L, diff --git a/zebra-test/src/network_addr.rs b/zebra-test/src/network_addr.rs index b95d6ff51cd..dbf54ab45e6 100644 --- a/zebra-test/src/network_addr.rs +++ b/zebra-test/src/network_addr.rs @@ -2,7 +2,8 @@ //! * addr (v1): [addr Bitcoin Reference](https://developer.bitcoin.org/reference/p2p_networking.html#addr) //! * addrv2: [ZIP-155](https://zips.z.cash/zip-0155#specification) //! -//! These formats are deserialized into the [`zebra_network::Message::Addr`] variant. +//! These formats are deserialized into the +//! `zebra_network::protocol::external::Message::Addr` variant. use hex::FromHex; use lazy_static::lazy_static; diff --git a/zebra-test/src/prelude.rs b/zebra-test/src/prelude.rs index 0c50438fd4f..e6cf987315e 100644 --- a/zebra-test/src/prelude.rs +++ b/zebra-test/src/prelude.rs @@ -1,4 +1,4 @@ -//! Common [`zebra_test`] types, traits, and functions. +//! Common [`zebra_test`](crate) types, traits, and functions. pub use crate::command::{test_cmd, CommandExt, TestChild}; pub use std::process::Stdio;