diff --git a/.config/nextest.toml b/.config/nextest.toml index a4a1e566..81536abc 100644 --- a/.config/nextest.toml +++ b/.config/nextest.toml @@ -6,6 +6,8 @@ threads-required = 1 [profile.ci] retries = { backoff = "exponential", count = 3, delay = "30s", jitter = true, max-delay = "300s" } failure-output = "immediate-final" +leak-timeout = "800ms" +slow-timeout = { period = "120s", terminate-after = 2 } fail-fast = false [test-groups] diff --git a/.envrc b/.envrc index 8ea2cc72..6e5c116c 100644 --- a/.envrc +++ b/.envrc @@ -1,5 +1,5 @@ use_flake -export RUST_LOG=homestar=debug,homestar_runtime=debug,libp2p=info,libp2p_gossipsub::behaviour=debug,tarpc=info,tower_http=debug,moka=debug +export RUST_LOG=homestar=debug,homestar_runtime=debug,libp2p=info,libp2p_gossipsub::behaviour=debug,tarpc=info,tower_http=debug,jsonrpsee_server=debug,moka=debug export RUST_BACKTRACE=full export RUSTFLAGS="--cfg tokio_unstable" diff --git a/.github/workflows/builds.yml b/.github/workflows/builds.yml index dd87191d..e22ad4e9 100644 --- a/.github/workflows/builds.yml +++ b/.github/workflows/builds.yml @@ -15,7 +15,7 @@ on: # for debugging # pull_request: - # branches: [ '**' ] + # branches: ['**'] concurrency: group: ${{ github.workflow }}-${{ github.ref }} @@ -29,14 +29,19 @@ jobs: include: - target: aarch64-unknown-linux-gnu - target: aarch64-unknown-linux-musl + npm: linux-arm64 - target: aarch64-apple-darwin os: macos-latest + npm: darwin-arm64 - target: x86_64-unknown-linux-gnu - target: x86_64-unknown-linux-musl + npm: linux-x64 - target: x86_64-apple-darwin os: macos-latest + npm: darwin-x64 - target: x86_64-pc-windows-msvc os: windows-latest + npm: windows-x64 - target: x86_64-unknown-freebsd permissions: @@ -94,25 +99,91 @@ jobs: include: LICENSE,README.md token: ${{ secrets.GITHUB_TOKEN }} - build-packages: + npm-publish: + needs: binary-builds runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + include: + - target: aarch64-unknown-linux-musl + os: linux + arch: arm64 + - target: x86_64-unknown-linux-musl + os: linux + arch: x64 + - target: aarch64-apple-darwin + os: darwin + arch: arm64 + - target: x86_64-apple-darwin + os: darwin + arch: x64 + - target: x86_64-pc-windows-msvc + os: windows + arch: x64 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-node@v4 + with: + node-version: lts/* + registry-url: 'https://registry.npmjs.org' + - name: Install cargo get + run: cargo install cargo-get + - name: Prepare os/arch packages + shell: bash + env: + node_os: ${{ matrix.os }} + node_arch: ${{ matrix.arch }} + node_pkg: homestar-${{ matrix.os }}-${{ matrix.arch }} + run: | + export node_version=$(cargo get workspace.package.version) + echo "node_pkg=${node_pkg}" >> "$GITHUB_ENV" + cd homestar-runtime/npm + mkdir -p "${node_pkg}/bin" + envsubst < package.json.tmpl > "${node_pkg}/package.json" + - name: Download build artifacts + uses: actions/download-artifact@v3 + with: + name: ${{ matrix.target }} + path: 'homestar-runtime/npm/${{ env.node_pkg }}/bin' + - name: Publish production + if: github.event_name == 'release' && github.event.action == 'published' + run: | + cd "homestar-runtime/npm/${{ env.node_pkg }}" + npm publish --access=public + env: + NODE_AUTH_TOKEN: ${{secrets.NPM_TOKEN}} + - name: Publish RC + if: github.event_name == 'workflow_dispatch' + run: | + cd "homestar-runtime/npm/${{ env.node_pkg }}" + npm version $(cargo get package.version)-rc.$(date +%s) --git-tag-version false + npm publish --access public --tag rc + env: + NODE_AUTH_TOKEN: ${{secrets.NPM_TOKEN}} - env: - LINUX_TARGET: x86_64-unknown-linux-musl - + build-packages: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + include: + - target: x86_64-unknown-linux-gnu + - target: x86_64-unknown-linux-musl steps: - name: Checkout uses: actions/checkout@v4 - name: Install musl-tools run: sudo apt update && sudo apt install -y musl-dev musl-tools + if: matrix.target == 'x86_64-unknown-linux-musl' - name: Install Rust toolchain id: toolchain uses: dtolnay/rust-toolchain@master with: toolchain: stable - targets: ${{ env.LINUX_TARGET }} + targets: ${{ matrix.target }} - name: Override rust-toolchain.toml run: rustup override set ${{steps.toolchain.outputs.name}} @@ -131,18 +202,18 @@ jobs: uses: Swatinem/rust-cache@v2 with: cache-on-failure: true - shared-key: check-${{ env.LINUX_TARGET }}-ubuntu-latest + shared-key: check-${{ matrix.target }}-ubuntu-latest - name: Create .deb - run: cargo deb -p homestar-runtime --target ${{ env.LINUX_TARGET }} --output homestar.deb + run: cargo deb -p homestar-runtime --target ${{ matrix.target }} --variant ${{ matrix.target }} --output homestar.deb - name: Create .rpm - run: cargo generate-rpm -p homestar-runtime --target ${{ env.LINUX_TARGET }} --output homestar.rpm + run: cargo generate-rpm -p homestar-runtime --target ${{ matrix.target }} --variant ${{ matrix.target }} --output homestar.rpm - name: Upload Release Artifacts uses: actions/upload-artifact@v3 with: - name: ${{ env.LINUX_TARGET }} + name: ${{ matrix.target }} path: | *.deb *.rpm diff --git a/.github/workflows/tests_and_checks.yml b/.github/workflows/tests_and_checks.yml index f6788cca..a55a611c 100644 --- a/.github/workflows/tests_and_checks.yml +++ b/.github/workflows/tests_and_checks.yml @@ -189,7 +189,7 @@ jobs: - name: Run Tests (no-default-features) if: ${{ matrix.default-features == 'none' }} - run: cargo nextest run --workspace --profile ci --no-default-features --features "test-utils" + run: cargo nextest run --profile ci --no-default-features --features "test-utils" - name: Run Doc Tests if: ${{ matrix.default-features == 'all' }} @@ -199,7 +199,8 @@ jobs: needs: changes if: ${{ needs.changes.outputs.rust == 'true' }} env: - RUSTFLAGS: -Ctarget-feature=+crt-static + RUSTFLAGS: -Dwarnings -Ctarget-feature=+crt-static + CARGO_INCREMENTAL: 0 strategy: fail-fast: false matrix: @@ -241,12 +242,11 @@ jobs: - name: Run Tests (no-default-features) if: ${{ matrix.default-features == 'none' }} - run: cargo nextest run --workspace --profile ci --no-default-features --features "test-utils" + run: cargo nextest run --profile ci --no-default-features --features "test-utils" - name: Run Doc Tests if: ${{ matrix.default-features == 'all' }} run: cargo test --doc --workspace - continue-on-error: ${{ matrix.rust-toolchain == 'nightly' }} run-docs: needs: changes diff --git a/.gitignore b/.gitignore index 0b74cf8d..2279e665 100644 --- a/.gitignore +++ b/.gitignore @@ -35,6 +35,7 @@ report.json homestar.err homestar.out homestar.pid +homestar.log* # locks homestar-wasm/Cargo.lock @@ -54,3 +55,6 @@ examples/**/tmp/* # nix build results /result + +# npm packages +homestar-runtime/npm/binaries \ No newline at end of file diff --git a/.ignore b/.ignore index 0ea79a35..feba533b 100644 --- a/.ignore +++ b/.ignore @@ -17,6 +17,8 @@ LICENSE .pre-commit-config.yaml **/fixtures +*.ipfs* +*.db ## examples examples/websocket-relay/relay-app diff --git a/Cargo.lock b/Cargo.lock index 7db5ee22..3c858554 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,9 +4,9 @@ version = 3 [[package]] name = "ab_glyph" -version = "0.2.21" +version = "0.2.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5110f1c78cf582855d895ecd0746b653db010cec6d9f5575293f27934d980a39" +checksum = "80179d7dd5d7e8c285d67c4a1e652972a92de7475beddfb92028c76463b13225" dependencies = [ "ab_glyph_rasterizer", "owned_ttf_parser", @@ -79,20 +79,30 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.3" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" +checksum = "91429305e9f0a25f6205c5b8e0d2db09e0708a7a6df0f42212bb56c32c8ac97a" dependencies = [ "cfg-if", "once_cell", "version_check", + "zerocopy", ] [[package]] name = "aho-corasick" -version = "1.0.5" +version = "0.7.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac" +dependencies = [ + "memchr", +] + +[[package]] +name = "aho-corasick" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c378d78423fdad8089616f827526ee33c19f2fddbd5de1629152c9593ba4783" +checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" dependencies = [ "memchr", ] @@ -146,15 +156,15 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b84bf0a05bbb2a83e5eb6fa36bb6e87baa08193c35ff52bbf6b38d8af2890e46" +checksum = "7079075b41f533b8c61d2a4d073c4676e1f8b249ff94a393b0595db304e0dd87" [[package]] name = "anstyle-parse" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "938874ff5980b03a87c5524b3ae5b59cf99b1d6bc836848df7bc5ada9643c333" +checksum = "317b9a89c1868f5ea6ff1d9539a69f45dffc21ce321ac1fd1160dfa48c8e2140" dependencies = [ "utf8parse", ] @@ -189,9 +199,9 @@ dependencies = [ [[package]] name = "arbitrary" -version = "1.3.0" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2d098ff73c1ca148721f37baad5ea6a465a13f9573aba8641fbbbae8164a54e" +checksum = "7d5a26814d8dcb93b0e5a0ff3c6d80a8843bafb21b39e8e18a6f05471870e110" [[package]] name = "arrayref" @@ -279,9 +289,9 @@ dependencies = [ "log", "parking", "polling", - "rustix 0.37.25", + "rustix 0.37.27", "slab", - "socket2 0.4.9", + "socket2 0.4.10", "waker-fn", ] @@ -308,7 +318,7 @@ checksum = "5fd55a5ba1179988837d24ab4c7cc8ed6efdeff578ede0416b4225a5fca35bd0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.39", ] [[package]] @@ -330,7 +340,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.39", ] [[package]] @@ -341,7 +351,7 @@ checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.39", ] [[package]] @@ -388,11 +398,9 @@ checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" dependencies = [ "async-trait", "axum-core", - "base64 0.21.4", "bitflags 1.3.2", "bytes", "futures-util", - "headers", "http", "http-body", "hyper", @@ -404,10 +412,7 @@ dependencies = [ "pin-project-lite", "rustversion", "serde", - "sha1", "sync_wrapper", - "tokio", - "tokio-tungstenite", "tower", "tower-layer", "tower-service", @@ -468,9 +473,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.4" +version = "0.21.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ba43ea6f343b788c8764558649e08df62f86c6ef251fdaeb1ffd010a9ae50a2" +checksum = "35636a1494ede3b646cc98f74f8e62c773a38a659ebc777a2cf26b9b74171df9" [[package]] name = "base64ct" @@ -478,6 +483,15 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" +[[package]] +name = "beef" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a8241f3ebb85c056b509d4327ad0358fbbba6ffb340bf388f26350aeda225b1" +dependencies = [ + "serde", +] + [[package]] name = "bimap" version = "0.6.3" @@ -522,9 +536,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635" +checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" [[package]] name = "blake2" @@ -559,9 +573,9 @@ dependencies = [ [[package]] name = "blake3" -version = "1.4.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "199c42ab6972d92c9f8995f086273d25c42fc0f7b2a1fcefba465c1352d25ba5" +checksum = "0231f06152bf547e9c2b5194f247cd97aacf6dcd8b15d8e5ec0663f64580da87" dependencies = [ "arrayref", "arrayvec", @@ -599,12 +613,12 @@ dependencies = [ [[package]] name = "bstr" -version = "1.6.2" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c2f7349907b712260e64b0afe2f84692af14a454be26187d9df565c7f69266a" +checksum = "c79ad7fb2dd38f3dabd76b09c6a5a20c038fc0213ef1e9afd30eb777f120f019" dependencies = [ "memchr", - "regex-automata 0.3.8", + "regex-automata 0.4.3", "serde", ] @@ -625,9 +639,9 @@ dependencies = [ [[package]] name = "bytecount" -version = "0.6.3" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c676a478f63e9fa2dd5368a42f28bba0d6c560b775f38583c8bbaa7fcd67c9c" +checksum = "e1e5f035d16fc623ae5f74981db80a439803888314e3a555fd6f04acd51a3205" [[package]] name = "bytemuck" @@ -683,7 +697,7 @@ dependencies = [ "io-lifetimes 1.0.11", "ipnet", "maybe-owned", - "rustix 0.37.25", + "rustix 0.37.27", "windows-sys", "winx 0.35.1", ] @@ -707,7 +721,7 @@ dependencies = [ "cap-primitives", "io-extras", "io-lifetimes 1.0.11", - "rustix 0.37.25", + "rustix 0.37.27", ] [[package]] @@ -718,7 +732,7 @@ checksum = "e95002993b7baee6b66c8950470e59e5226a23b3af39fc59c47fe416dd39821a" dependencies = [ "cap-primitives", "once_cell", - "rustix 0.37.25", + "rustix 0.37.27", "winx 0.35.1", ] @@ -917,7 +931,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.39", ] [[package]] @@ -956,9 +970,9 @@ dependencies = [ [[package]] name = "concurrent-queue" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62ec6771ecfa0762d24683ee5a32ad78487a3d3afdc0fb8cae19d2c5deb50b7c" +checksum = "f057a694a54f12365049b0958a1685bb52d567f5593b355fbf685838e873d400" dependencies = [ "crossbeam-utils", ] @@ -1021,6 +1035,26 @@ version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28c122c3980598d243d63d9a704629a2d748d101f278052ff068be5a4423ab6f" +[[package]] +name = "const_format" +version = "0.2.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3a214c7af3d04997541b18d432afaff4c455e79e2029079647e72fc2bd27673" +dependencies = [ + "const_format_proc_macros", +] + +[[package]] +name = "const_format_proc_macros" +version = "0.2.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7f6ff08fd20f4f299298a28e2dfa8a8ba1036e6cd2460ac1de7b425d76f2500" +dependencies = [ + "proc-macro2", + "quote", + "unicode-xid", +] + [[package]] name = "constant_time_eq" version = "0.3.0" @@ -1063,37 +1097,37 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.9" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a17b76ff3a4162b0b27f354a0c87015ddad39d35f9c0c36607a3bdd175dde1f1" +checksum = "ce420fe07aecd3e67c5f910618fe65e94158f6dcc0adf44e00d69ce2bdfe0fd0" dependencies = [ "libc", ] [[package]] name = "cranelift-bforest" -version = "0.100.0" +version = "0.100.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03b9d1a9e776c27ad55d7792a380785d1fe8c2d7b099eed8dbd8f4af2b598192" +checksum = "751cbf89e513f283c0641eb7f95dc72fda5051dd95ca203d1dc45e26bc89dba8" dependencies = [ - "cranelift-entity 0.100.0", + "cranelift-entity 0.100.1", ] [[package]] name = "cranelift-codegen" -version = "0.100.0" +version = "0.100.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5528483314c2dd5da438576cd8a9d0b3cedad66fb8a4727f90cd319a81950038" +checksum = "210730edc05121e915201cc36595e1f00062094669fa07ac362340e3627b3dc5" dependencies = [ "bumpalo", "cranelift-bforest", "cranelift-codegen-meta", "cranelift-codegen-shared", "cranelift-control", - "cranelift-entity 0.100.0", + "cranelift-entity 0.100.1", "cranelift-isle", "gimli 0.28.0", - "hashbrown 0.14.1", + "hashbrown 0.14.2", "log", "regalloc2", "smallvec", @@ -1102,24 +1136,24 @@ dependencies = [ [[package]] name = "cranelift-codegen-meta" -version = "0.100.0" +version = "0.100.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f46a8318163f7682e35b8730ba93c1b586a2da8ce12a0ed545efc1218550f70" +checksum = "b5dc7fdf210c53db047f3eaf49b3a89efee0cc3d9a2ce0c0f0236933273d0c53" dependencies = [ "cranelift-codegen-shared", ] [[package]] name = "cranelift-codegen-shared" -version = "0.100.0" +version = "0.100.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37d1239cfd50eecfaed468d46943f8650e32969591868ad50111613704da6c70" +checksum = "f46875cc87d963119d78fe5c19852757dc6eea3cb9622c0df69c26b242cd44b4" [[package]] name = "cranelift-control" -version = "0.100.0" +version = "0.100.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcc530560c8f16cc1d4dd7ea000c56f519c60d1a914977abe849ce555c35a61d" +checksum = "375dca8f58d8a801a85e11730c1529c5c4a9c3593dfb12118391ac437b037155" dependencies = [ "arbitrary", ] @@ -1135,9 +1169,9 @@ dependencies = [ [[package]] name = "cranelift-entity" -version = "0.100.0" +version = "0.100.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f333fa641a9ad2bff0b107767dcb972c18c2bfab7969805a1d7e42449ccb0408" +checksum = "cc619b86fe3c72f43fc417c9fd67a04ec0c98296e5940922d9fd9e6eedf72521" dependencies = [ "serde", "serde_derive", @@ -1145,9 +1179,9 @@ dependencies = [ [[package]] name = "cranelift-frontend" -version = "0.100.0" +version = "0.100.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06abf6563015a80f03f8bc4df307d0a81363f4eb73108df3a34f6e66fb6d5307" +checksum = "7eb607fd19ae264da18f9f2532e7302b826f7fbf77bf88365fc075f2e3419436" dependencies = [ "cranelift-codegen", "log", @@ -1157,15 +1191,15 @@ dependencies = [ [[package]] name = "cranelift-isle" -version = "0.100.0" +version = "0.100.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0eb29d0edc8a5c029ed0f7ca77501f272738e3c410020b4a00f42ffe8ad2a8aa" +checksum = "9fe806a6470dddfdf79e878af6a96afb1235a09fe3e21f9e0c2f18d402820432" [[package]] name = "cranelift-native" -version = "0.100.0" +version = "0.100.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "006056a7fa920870bad06bf8e1b3033d70cbb7ee625b035efa9d90882a931868" +checksum = "fac7f1722660b10af1f7229c0048f716bfd8bd344549b0e06e3eb6417ec3fe5b" dependencies = [ "cranelift-codegen", "libc", @@ -1174,18 +1208,18 @@ dependencies = [ [[package]] name = "cranelift-wasm" -version = "0.100.0" +version = "0.100.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b3d08c05f82903a1f6a04d89c4b9ecb47a4035710f89a39a21a147a80214672" +checksum = "b1b65810be56b619c3c55debade92798d999f34bf0670370c578afab5d905f06" dependencies = [ "cranelift-codegen", - "cranelift-entity 0.100.0", + "cranelift-entity 0.100.1", "cranelift-frontend", "itertools 0.10.5", "log", "smallvec", "wasmparser 0.112.0", - "wasmtime-types 13.0.0", + "wasmtime-types 13.0.1", ] [[package]] @@ -1354,13 +1388,13 @@ dependencies = [ [[package]] name = "curve25519-dalek-derive" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" +checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.39", ] [[package]] @@ -1405,7 +1439,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.33", + "syn 2.0.39", ] [[package]] @@ -1416,7 +1450,7 @@ checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5" dependencies = [ "darling_core", "quote", - "syn 2.0.33", + "syn 2.0.39", ] [[package]] @@ -1426,7 +1460,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" dependencies = [ "cfg-if", - "hashbrown 0.14.1", + "hashbrown 0.14.2", "lock_api", "once_cell", "parking_lot_core", @@ -1494,10 +1528,11 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2696e8a945f658fd14dc3b87242e6b80cd0f36ff04ea560fa39082368847946" +checksum = "0f32d04922c60427da6f9fef14d042d9edddef64cb9d4ce0d64d0685fbeb1fd3" dependencies = [ + "powerfmt", "serde", ] @@ -1516,14 +1551,14 @@ dependencies = [ [[package]] name = "diesel_derives" -version = "2.1.1" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e054665eaf6d97d1e7125512bb2d35d07c73ac86cc6920174cb42d1ab697a554" +checksum = "ef8337737574f55a468005a83499da720f20c65586241ffea339db9ecdfd2b44" dependencies = [ "diesel_table_macro_syntax", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.39", ] [[package]] @@ -1543,7 +1578,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc5557efc453706fed5e4fa85006fe9817c224c3f480a34c7e5959fd700921c5" dependencies = [ - "syn 2.0.33", + "syn 2.0.39", ] [[package]] @@ -1621,7 +1656,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.39", ] [[package]] @@ -1648,6 +1683,12 @@ version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcbb2bf8e87535c23f7a8a321e364ce21462d0ff10cb6407820e8e96dfff6653" +[[package]] +name = "dyn-clone" +version = "1.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "545b22097d44f8a9581187cdf93de7a71e4722bf51200cfaba810865b49a495d" + [[package]] name = "ecolor" version = "0.23.0" @@ -1656,9 +1697,9 @@ checksum = "cfdf4e52dbbb615cfd30cf5a5265335c217b5fd8d669593cea74a517d9c605af" [[package]] name = "ed25519" -version = "2.2.2" +version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60f6d271ca33075c88028be6f04d502853d63a5ece419d269c15315d4fc1cf1d" +checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" dependencies = [ "pkcs8", "signature", @@ -1722,6 +1763,12 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "endian-type" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c34f04666d835ff5d62e058c3995147c06f42fe86ff053337632bca83e42702d" + [[package]] name = "enum-as-inner" version = "0.5.1" @@ -1743,7 +1790,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.39", ] [[package]] @@ -1759,15 +1806,15 @@ dependencies = [ [[package]] name = "enum-ordinalize" -version = "3.1.13" +version = "3.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4f76552f53cefc9a7f64987c3701b99d982f7690606fd67de1d09712fbf52f1" +checksum = "1bf1fa3f06bbff1ea5b1a9c7b14aa992a39657db60a2759457328d7e058f49ee" dependencies = [ "num-bigint", "num-traits", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.39", ] [[package]] @@ -1791,24 +1838,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] -name = "errno" -version = "0.3.3" +name = "erased-serde" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "136526188508e25c6fef639d7927dfb3e0e3084488bf202267829cf7fc23dbdd" +checksum = "6c138974f9d5e7fe373eb04df7cae98833802ae4b11c24ac7039a21d5af4b26c" dependencies = [ - "errno-dragonfly", - "libc", - "windows-sys", + "serde", ] [[package]] -name = "errno-dragonfly" -version = "0.1.2" +name = "errno" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" +checksum = "7c18ee0ed65a5f1f81cac6b1d213b69c35fa47d4252ad41f1486dbd8226fe36e" dependencies = [ - "cc", "libc", + "windows-sys", ] [[package]] @@ -1828,12 +1873,12 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "exr" -version = "1.7.0" +version = "1.71.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1e481eb11a482815d3e9d618db8c42a93207134662873809335a92327440c18" +checksum = "832a761f35ab3e6664babfbdc6cef35a4860e816ec3916dcfd0882954e98a8a8" dependencies = [ "bit_field", - "flume 0.10.14", + "flume", "half 2.2.1", "lebe", "miniz_oxide", @@ -1865,9 +1910,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764" +checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" [[package]] name = "faststr" @@ -1886,48 +1931,35 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b0377f1edc77dbd1118507bc7a66e4ab64d2b90c66f90726dc801e73a8c68f9" dependencies = [ "cfg-if", - "rustix 0.38.13", + "rustix 0.38.21", "windows-sys", ] [[package]] name = "fdeflate" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d329bdeac514ee06249dabc27877490f17f5d371ec693360768b838e19f3ae10" +checksum = "64d6dafc854908ff5da46ff3f8f473c6984119a2876a383a860246dd7841a868" dependencies = [ "simd-adler32", ] [[package]] name = "fiat-crypto" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0870c84016d4b481be5c9f323c24f65e31e901ae618f0e80f4308fb00de1d2d" +checksum = "a481586acf778f1b1455424c343f71124b048ffa5f4fc3f8f6ae9dc432dcb3c7" [[package]] name = "flate2" -version = "1.0.27" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6c98ee8095e9d1dcbf2fcc6d95acccb90d1c81db1e44725c6a984b1dbdfb010" +checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e" dependencies = [ "crc32fast", "miniz_oxide", ] -[[package]] -name = "flume" -version = "0.10.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1657b4441c3403d9f7b3409e47575237dac27b1b5726df654a6ecbf92f0f7577" -dependencies = [ - "futures-core", - "futures-sink", - "nanorand", - "pin-project", - "spin 0.9.8", -] - [[package]] name = "flume" version = "0.11.0" @@ -1961,7 +1993,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d167b646a876ba8fda6b50ac645cfd96242553cbaf0ca4fccaa39afcbf0801f" dependencies = [ "io-lifetimes 1.0.11", - "rustix 0.38.13", + "rustix 0.38.21", "windows-sys", ] @@ -1977,9 +2009,9 @@ dependencies = [ [[package]] name = "futures" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" +checksum = "da0290714b38af9b4a7b094b8a37086d1b4e61f2df9122c3cad2577669145335" dependencies = [ "futures-channel", "futures-core", @@ -2002,9 +2034,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" +checksum = "ff4dd66668b557604244583e3e1e1eada8c5c2e96a6d0d6653ede395b78bbacb" dependencies = [ "futures-core", "futures-sink", @@ -2012,15 +2044,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" +checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c" [[package]] name = "futures-executor" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" +checksum = "0f4fb8693db0cf099eadcca0efe2a5a22e4550f98ed16aba6c48700da29597bc" dependencies = [ "futures-core", "futures-task", @@ -2030,9 +2062,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" +checksum = "8bf34a163b5c4c52d0478a4d757da8fb65cabef42ba90515efee0f6f9fa45aaa" [[package]] name = "futures-lite" @@ -2051,13 +2083,13 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" +checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.39", ] [[package]] @@ -2072,15 +2104,15 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" +checksum = "e36d3378ee38c2a36ad710c5d30c2911d752cb941c00c72dbabfb786a7970817" [[package]] name = "futures-task" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" +checksum = "efd193069b0ddadc69c46389b740bbccdd97203899b48d09c5f7969591d6bae2" [[package]] name = "futures-ticker" @@ -2098,12 +2130,16 @@ name = "futures-timer" version = "3.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" +dependencies = [ + "gloo-timers", + "send_wrapper", +] [[package]] name = "futures-util" -version = "0.3.28" +version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" +checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104" dependencies = [ "futures-channel", "futures-core", @@ -2132,7 +2168,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "27d12c0aed7f1e24276a241aadc4cb8ea9f83000f34bc062b7cc2d51e3b0fabd" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.4.1", "debugid", "fxhash", "serde", @@ -2161,9 +2197,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" +checksum = "fe9006bed769170c11f845cf00c7c1e9092aeb3f268e007c3e760ac68008070f" dependencies = [ "cfg-if", "js-sys", @@ -2210,6 +2246,52 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" +[[package]] +name = "gloo-net" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ac9e8288ae2c632fa9f8657ac70bfe38a1530f345282d7ba66a1f70b72b7dc4" +dependencies = [ + "futures-channel", + "futures-core", + "futures-sink", + "gloo-utils", + "http", + "js-sys", + "pin-project", + "serde", + "serde_json", + "thiserror", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + +[[package]] +name = "gloo-timers" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" +dependencies = [ + "futures-channel", + "futures-core", + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "gloo-utils" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b5555354113b18c547c1d3a98fbf7fb32a9ff4f6fa112ce823a21641a0ba3aa" +dependencies = [ + "js-sys", + "serde", + "serde_json", + "wasm-bindgen", + "web-sys", +] + [[package]] name = "h2" version = "0.3.21" @@ -2261,9 +2343,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.14.1" +version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dfda62a12f55daeae5015f81b0baea145391cb4520f86c248fc615d72640d12" +checksum = "f93e7192158dbcda357bdec5fb5788eebf8bbac027f3f33e719d29135ae84156" dependencies = [ "ahash", "allocator-api2", @@ -2288,7 +2370,7 @@ version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06683b93020a07e3dbcf5f8c0f6d40080d725bea7936fc01ad345c01b97dc270" dependencies = [ - "base64 0.21.4", + "base64 0.21.5", "bytes", "headers-core", "http", @@ -2317,9 +2399,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b" +checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7" [[package]] name = "hex" @@ -2414,6 +2496,7 @@ dependencies = [ name = "homestar-functions-test" version = "0.1.0" dependencies = [ + "base64 0.21.5", "image", "wit-bindgen", ] @@ -2426,12 +2509,12 @@ dependencies = [ "assert_cmd", "async-trait", "atomic_refcell", - "axum", "byte-unit", "chrono", "clap", "config", "console-subscriber", + "const_format", "criterion", "crossbeam", "daemonize", @@ -2440,9 +2523,10 @@ dependencies = [ "diesel", "diesel_migrations", "dotenvy", + "dyn-clone", "enum-assoc", "faststr", - "flume 0.11.0", + "flume", "fnv", "futures", "headers", @@ -2456,11 +2540,14 @@ dependencies = [ "ipfs-api", "ipfs-api-backend-hyper", "itertools 0.11.0", + "jsonrpsee", "libipld", "libp2p", "libsqlite3-sys", + "maplit", "metrics", "metrics-exporter-prometheus", + "metrics-util", "miette", "moka", "names", @@ -2472,6 +2559,7 @@ dependencies = [ "puffin", "puffin_egui", "rand", + "regex", "reqwest", "retry", "rm_rf", @@ -2491,13 +2579,18 @@ dependencies = [ "thiserror", "tokio", "tokio-serde", + "tokio-stream", + "tokio-test", "tokio-tungstenite", "tokio-util", + "tower", + "tower-http", "tracing", "tracing-appender", "tracing-logfmt", "tracing-subscriber", "tryhard", + "typetag", "url", "wait-timeout", "wnfs-common", @@ -2509,7 +2602,7 @@ version = "0.0.0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.39", ] [[package]] @@ -2534,11 +2627,11 @@ dependencies = [ "tracing", "wasi-common", "wasmparser 0.115.0", - "wasmtime 13.0.0", - "wasmtime-component-util 13.0.0", + "wasmtime 13.0.1", + "wasmtime-component-util 13.0.1", "wasmtime-wasi", "wat", - "wit-component 0.14.2", + "wit-component 0.14.7", ] [[package]] @@ -2574,6 +2667,12 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "http-range-header" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "add0ab9360ddbd88cfeb3bd9574a1d85cfdfa14db10b3e21d3700dbc4328758f" + [[package]] name = "http-serde" version = "1.1.3" @@ -2619,7 +2718,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.9", + "socket2 0.4.10", "tokio", "tower-service", "tracing", @@ -2639,6 +2738,22 @@ dependencies = [ "hyper", ] +[[package]] +name = "hyper-rustls" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" +dependencies = [ + "futures-util", + "http", + "hyper", + "log", + "rustls", + "rustls-native-certs", + "tokio", + "tokio-rustls", +] + [[package]] name = "hyper-timeout" version = "0.4.1" @@ -2653,16 +2768,16 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.57" +version = "0.1.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fad5b825842d2b38bd206f3e81d6957625fd7f0a361e345c30e01a0ae2dd613" +checksum = "8326b86b6cff230b97d0d312a6c40a60726df3332e721f72a1b035f451663b20" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "windows 0.48.0", + "windows-core", ] [[package]] @@ -2733,7 +2848,7 @@ dependencies = [ "rtnetlink", "system-configuration", "tokio", - "windows 0.51.1", + "windows", ] [[package]] @@ -2789,7 +2904,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" dependencies = [ "equivalent", - "hashbrown 0.14.1", + "hashbrown 0.14.2", "serde", ] @@ -2805,6 +2920,12 @@ dependencies = [ "web-sys", ] +[[package]] +name = "inventory" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0508c56cfe9bfd5dfeb0c22ab9a6abfda2f27bdca422132e494266351ed8d83c" + [[package]] name = "io-extras" version = "0.17.4" @@ -2838,7 +2959,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.4", + "socket2 0.5.5", "widestring", "windows-sys", "winreg 0.50.0", @@ -2898,9 +3019,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.8.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6" +checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" [[package]] name = "is-terminal" @@ -2909,7 +3030,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi", - "rustix 0.38.13", + "rustix 0.38.21", "windows-sys", ] @@ -2945,9 +3066,9 @@ checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" [[package]] name = "ittapi" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41e0d0b7b3b53d92a7e8b80ede3400112a6b8b4c98d1f5b8b16bb787c780582c" +checksum = "25a5c0b993601cad796222ea076565c5d9f337d35592f8622c753724f06d7271" dependencies = [ "anyhow", "ittapi-sys", @@ -2956,18 +3077,18 @@ dependencies = [ [[package]] name = "ittapi-sys" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2f8763c96e54e6d6a0dccc2990d8b5e33e3313aaeae6185921a3f4c1614a77c" +checksum = "cb7b5e473765060536a660eed127f758cf1a810c73e49063264959c60d1727d9" dependencies = [ "cc", ] [[package]] name = "jobserver" -version = "0.1.26" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "936cfd212a0155903bcbc060e316fb6cc7cbf2e1907329391ebadc1fe0ce77c2" +checksum = "8c37f63953c4c63420ed5fd3d6d398c719489b9f872b9fa683262f8edd363c7d" dependencies = [ "libc", ] @@ -2980,9 +3101,9 @@ checksum = "bc0000e42512c92e31c2252315bda326620a4e034105e900c98ec492fa077b3e" [[package]] name = "js-sys" -version = "0.3.64" +version = "0.3.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" +checksum = "54c0c35952f67de54bb584e9fd912b3023117cbafc0a77d8f3dee1fb5f572fe8" dependencies = [ "wasm-bindgen", ] @@ -2993,6 +3114,152 @@ version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "078e285eafdfb6c4b434e0d31e8cfcb5115b651496faca5749b88fafd4f23bfd" +[[package]] +name = "jsonrpsee" +version = "0.20.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "affdc52f7596ccb2d7645231fc6163bb314630c989b64998f3699a28b4d5d4dc" +dependencies = [ + "jsonrpsee-client-transport", + "jsonrpsee-core", + "jsonrpsee-http-client", + "jsonrpsee-server", + "jsonrpsee-types", + "jsonrpsee-wasm-client", + "jsonrpsee-ws-client", + "tokio", +] + +[[package]] +name = "jsonrpsee-client-transport" +version = "0.20.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5b005c793122d03217da09af68ba9383363caa950b90d3436106df8cabce935" +dependencies = [ + "futures-channel", + "futures-util", + "gloo-net", + "http", + "jsonrpsee-core", + "pin-project", + "rustls-native-certs", + "soketto", + "thiserror", + "tokio", + "tokio-rustls", + "tokio-util", + "tracing", + "url", + "webpki-roots", +] + +[[package]] +name = "jsonrpsee-core" +version = "0.20.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da2327ba8df2fdbd5e897e2b5ed25ce7f299d345b9736b6828814c3dbd1fd47b" +dependencies = [ + "anyhow", + "async-lock", + "async-trait", + "beef", + "futures-timer", + "futures-util", + "hyper", + "jsonrpsee-types", + "parking_lot", + "rand", + "rustc-hash", + "serde", + "serde_json", + "soketto", + "thiserror", + "tokio", + "tracing", + "wasm-bindgen-futures", +] + +[[package]] +name = "jsonrpsee-http-client" +version = "0.20.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f80c17f62c7653ce767e3d7288b793dfec920f97067ceb189ebdd3570f2bc20" +dependencies = [ + "async-trait", + "hyper", + "hyper-rustls", + "jsonrpsee-core", + "jsonrpsee-types", + "serde", + "serde_json", + "thiserror", + "tokio", + "tower", + "tracing", + "url", +] + +[[package]] +name = "jsonrpsee-server" +version = "0.20.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82c39a00449c9ef3f50b84fc00fc4acba20ef8f559f07902244abf4c15c5ab9c" +dependencies = [ + "futures-util", + "http", + "hyper", + "jsonrpsee-core", + "jsonrpsee-types", + "route-recognizer", + "serde", + "serde_json", + "soketto", + "thiserror", + "tokio", + "tokio-stream", + "tokio-util", + "tower", + "tracing", +] + +[[package]] +name = "jsonrpsee-types" +version = "0.20.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5be0be325642e850ed0bdff426674d2e66b2b7117c9be23a7caef68a2902b7d9" +dependencies = [ + "anyhow", + "beef", + "serde", + "serde_json", + "thiserror", + "tracing", +] + +[[package]] +name = "jsonrpsee-wasm-client" +version = "0.20.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c7cbb3447cf14fd4d2f407c3cc96e6c9634d5440aa1fbed868a31f3c02b27f0" +dependencies = [ + "jsonrpsee-client-transport", + "jsonrpsee-core", + "jsonrpsee-types", +] + +[[package]] +name = "jsonrpsee-ws-client" +version = "0.20.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bca9cb3933ccae417eb6b08c3448eb1cb46e39834e5b503e395e5e5bd08546c0" +dependencies = [ + "http", + "jsonrpsee-client-transport", + "jsonrpsee-core", + "jsonrpsee-types", + "url", +] + [[package]] name = "keccak" version = "0.1.4" @@ -3022,9 +3289,9 @@ checksum = "03087c2bad5e1034e8cace5926dec053fb3790248370865f5117a7d0213354c8" [[package]] name = "libc" -version = "0.2.149" +version = "0.2.150" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a08173bc88b7955d1b3145aa561539096c421ac8debde8cbc3612ec635fee29b" +checksum = "89d92a4743f9a61002fae18374ed11e7973f530cb3a3255fb354818118b2203c" [[package]] name = "libipld" @@ -3118,9 +3385,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4" +checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" [[package]] name = "libp2p" @@ -3204,6 +3471,7 @@ dependencies = [ "quick-protobuf", "rand", "rw-stream-sink", + "serde", "smallvec", "thiserror", "unsigned-varint", @@ -3233,7 +3501,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1f9624e2a843b655f1c1b8262b8d5de6f309413fca4d66f01bb0662429f84dc" dependencies = [ "asynchronous-codec", - "base64 0.21.4", + "base64 0.21.5", "byteorder", "bytes", "either", @@ -3252,6 +3520,7 @@ dependencies = [ "quick-protobuf-codec", "rand", "regex", + "serde", "sha2 0.10.8", "smallvec", "unsigned-varint", @@ -3296,6 +3565,7 @@ dependencies = [ "multihash 0.19.1", "quick-protobuf", "rand", + "serde", "sha2 0.10.8", "thiserror", "zeroize", @@ -3322,6 +3592,7 @@ dependencies = [ "quick-protobuf", "quick-protobuf-codec", "rand", + "serde", "sha2 0.10.8", "smallvec", "thiserror", @@ -3345,7 +3616,7 @@ dependencies = [ "log", "rand", "smallvec", - "socket2 0.5.4", + "socket2 0.5.5", "tokio", "trust-dns-proto 0.22.0", "void", @@ -3410,9 +3681,9 @@ dependencies = [ "parking_lot", "quinn", "rand", - "ring", + "ring 0.16.20", "rustls", - "socket2 0.5.4", + "socket2 0.5.5", "thiserror", "tokio", ] @@ -3494,7 +3765,7 @@ dependencies = [ "proc-macro-warning", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.39", ] [[package]] @@ -3510,7 +3781,7 @@ dependencies = [ "libp2p-core", "libp2p-identity", "log", - "socket2 0.5.4", + "socket2 0.5.5", "tokio", ] @@ -3525,7 +3796,7 @@ dependencies = [ "libp2p-core", "libp2p-identity", "rcgen", - "ring", + "ring 0.16.20", "rustls", "rustls-webpki", "thiserror", @@ -3562,6 +3833,17 @@ dependencies = [ "yamux", ] +[[package]] +name = "libredox" +version = "0.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8" +dependencies = [ + "bitflags 2.4.1", + "libc", + "redox_syscall", +] + [[package]] name = "libsecp256k1" version = "0.7.1" @@ -3635,15 +3917,15 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" -version = "0.4.7" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a9bad9f94746442c783ca431b22403b519cd7fbeed0533fdd6328b2f2212128" +checksum = "969488b55f8ac402214f3f5fd243ebb7206cf82de60d3172994707a4bcc2b829" [[package]] name = "lock_api" -version = "0.4.10" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" +checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" dependencies = [ "autocfg", "scopeguard", @@ -3661,7 +3943,7 @@ version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1efa59af2ddfad1854ae27d75009d538d0998b4b2fd47083e743ac1a10e46c60" dependencies = [ - "hashbrown 0.14.1", + "hashbrown 0.14.2", ] [[package]] @@ -3697,6 +3979,12 @@ dependencies = [ "libc", ] +[[package]] +name = "maplit" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" + [[package]] name = "match_cfg" version = "0.1.0" @@ -3720,9 +4008,9 @@ checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" [[package]] name = "matchit" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed1202b2a6f884ae56f04cff409ab315c5ce26b5e58d7412e484f01fd52f52ef" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" [[package]] name = "maybe-owned" @@ -3738,17 +4026,17 @@ checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771" [[package]] name = "memchr" -version = "2.6.3" +version = "2.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f232d6ef707e1956a43342693d2a31e72989554d58299d7a88738cc95b0d35c" +checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" [[package]] name = "memfd" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffc89ccdc6e10d6907450f753537ebc5c5d3460d2e4e62ea74bd571db62c0f9e" +checksum = "b2cffa4ad52c6f791f4f8b15f0c05f9824b2ced1160e88cc393d64fff9a8ac64" dependencies = [ - "rustix 0.37.25", + "rustix 0.38.21", ] [[package]] @@ -3786,7 +4074,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a4964177ddfdab1e3a2b37aec7cf320e14169abb0ed73999f558136409178d5" dependencies = [ - "base64 0.21.4", + "base64 0.21.5", "hyper", "indexmap 1.9.3", "ipnet", @@ -3805,7 +4093,7 @@ checksum = "ddece26afd34c31585c74a4db0630c376df271c285d682d1e55012197830b6df" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.39", ] [[package]] @@ -3814,12 +4102,16 @@ version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "111cb375987443c3de8d503580b536f77dc8416d32db62d9456db5d93bd7ac47" dependencies = [ + "aho-corasick 0.7.20", "crossbeam-epoch", "crossbeam-utils", "hashbrown 0.13.2", + "indexmap 1.9.3", "metrics", "num_cpus", + "ordered-float", "quanta", + "radix_trie", "sketches-ddsketch", ] @@ -3852,7 +4144,7 @@ checksum = "49e7bc1560b95a3c4a25d03de42fe76ca718ab92d1a22a55b9b4cf67b3ae635c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.39", ] [[package]] @@ -3910,9 +4202,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" +checksum = "3dce281c5e46beae905d4de1870d8b1509a9142b62eedf18b443b011ca8343d0" dependencies = [ "libc", "wasi", @@ -4041,6 +4333,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "076d548d76a0e2a0d4ab471d0b1c36c577786dfc4471242035d97a12a735c492" dependencies = [ "core2", + "serde", "unsigned-varint", ] @@ -4081,15 +4374,6 @@ dependencies = [ "rand", ] -[[package]] -name = "nanorand" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a51313c5820b0b02bd422f4b44776fbf47961755c74ce64afc73bfad10226c3" -dependencies = [ - "getrandom", -] - [[package]] name = "natord" version = "1.0.9" @@ -4162,6 +4446,15 @@ dependencies = [ "tokio", ] +[[package]] +name = "nibble_vec" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a5d83df9f36fe23f0c3648c6bbb8b0298bb5f1939c8f2704431371f4b84d43" +dependencies = [ + "smallvec", +] + [[package]] name = "nix" version = "0.24.3" @@ -4179,7 +4472,7 @@ version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.4.1", "cfg-if", "libc", ] @@ -4252,9 +4545,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2" +checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" dependencies = [ "autocfg", "libm", @@ -4289,7 +4582,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" dependencies = [ "crc32fast", - "hashbrown 0.14.1", + "hashbrown 0.14.2", "indexmap 2.1.0", "memchr", ] @@ -4321,6 +4614,12 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" +[[package]] +name = "openssl-probe" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" + [[package]] name = "opentelemetry" version = "0.18.0" @@ -4364,11 +4663,20 @@ dependencies = [ "thiserror", ] +[[package]] +name = "ordered-float" +version = "3.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1e1c390732d15f1d48471625cd92d154e66db2c56645e29a9cd26f4699f72dc" +dependencies = [ + "num-traits", +] + [[package]] name = "owned_ttf_parser" -version = "0.19.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "706de7e2214113d63a8238d1910463cfce781129a6f263d13fdb09ff64355ba4" +checksum = "d4586edfe4c648c71797a74c84bacb32b52b212eff5dfe2bb9f2c599844023e7" dependencies = [ "ttf-parser", ] @@ -4392,9 +4700,9 @@ dependencies = [ [[package]] name = "parking" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14f2252c834a40ed9bb5422029649578e63aa341ac401f74e719dd1afda8394e" +checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" [[package]] name = "parking_lot" @@ -4408,13 +4716,13 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.8" +version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" +checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.3.5", + "redox_syscall", "smallvec", "windows-targets", ] @@ -4472,7 +4780,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.39", ] [[package]] @@ -4505,9 +4813,9 @@ checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" [[package]] name = "platforms" -version = "3.1.2" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4503fa043bf02cee09a9582e9554b4c6403b2ef55e4612e96561d294419429f8" +checksum = "14e6ab3f592e6fb464fc9712d8d6e6912de6473954635fd76a589d832cffcbb0" [[package]] name = "plotters" @@ -4591,9 +4899,15 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.4.3" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3bccab0e7fd7cc19f820a1c8c91720af652d0c88dc9664dd72aef2614f04af3b" + +[[package]] +name = "powerfmt" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31114a898e107c51bb1609ffaf55a0e011cf6a4d7f1170d0015a165082c0338b" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "ppv-lite86" @@ -4671,14 +4985,14 @@ checksum = "3d1eaa7fa0aa1929ffdf7eeb6eac234dde6268914a14ad44d23521ab6a9b258e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.39", ] [[package]] name = "proc-macro2" -version = "1.0.67" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d433d9f1a3e8c1263d9456598b16fec66f4acc9a74dacffd35c7bb09b3a1328" +checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da" dependencies = [ "unicode-ident", ] @@ -4703,7 +5017,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.39", ] [[package]] @@ -4726,7 +5040,7 @@ checksum = "7c003ac8c77cb07bb74f5f198bce836a689bcd5a42574612bf14d17bfd08c20e" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.4.0", + "bitflags 2.4.1", "lazy_static", "num-traits", "rand", @@ -4758,7 +5072,7 @@ dependencies = [ "itertools 0.11.0", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.39", ] [[package]] @@ -4897,13 +5211,13 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.10.4" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13f81c9a9d574310b8351f8666f5a93ac3b0069c45c28ad52c10291389a7cf9" +checksum = "141bf7dfde2fbc246bfd3fe12f2455aa24b0fbd9af535d8c86c7bd1381ff2b1a" dependencies = [ "bytes", "rand", - "ring", + "ring 0.16.20", "rustc-hash", "rustls", "slab", @@ -4920,7 +5234,7 @@ checksum = "055b4e778e8feb9f93c4e439f71dc2156ef13360b432b799e179a8c4cdf0b1d7" dependencies = [ "bytes", "libc", - "socket2 0.5.4", + "socket2 0.5.5", "tracing", "windows-sys", ] @@ -4945,6 +5259,16 @@ dependencies = [ "scheduled-thread-pool", ] +[[package]] +name = "radix_trie" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c069c179fcdc6a2fe24d8d18305cf085fdbd4f922c041943e203685d6a1c58fd" +dependencies = [ + "endian-type", + "nibble_vec", +] + [[package]] name = "rand" version = "0.8.5" @@ -4995,9 +5319,9 @@ dependencies = [ [[package]] name = "rayon" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b" +checksum = "9c27db03db7734835b3f53954b534c91069375ce6ccaa2e065441e07d9b6cdb1" dependencies = [ "either", "rayon-core", @@ -5005,14 +5329,12 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" +checksum = "5ce3fb6ad83f861aac485e76e1985cd109d9a3713802152be56c3b1f0e0658ed" dependencies = [ - "crossbeam-channel", "crossbeam-deque", "crossbeam-utils", - "num_cpus", ] [[package]] @@ -5022,45 +5344,36 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffbe84efe2f38dea12e9bfc1f65377fdf03e53a18cb3b995faedf7934c7e785b" dependencies = [ "pem", - "ring", + "ring 0.16.20", "time", "yasna", ] [[package]] name = "redox_syscall" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" -dependencies = [ - "bitflags 1.3.2", -] - -[[package]] -name = "redox_syscall" -version = "0.3.5" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" +checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" dependencies = [ "bitflags 1.3.2", ] [[package]] name = "redox_users" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" +checksum = "a18479200779601e498ada4e8c1e1f50e3ee19deb0259c25825a98b5603b2cb4" dependencies = [ "getrandom", - "redox_syscall 0.2.16", + "libredox", "thiserror", ] [[package]] name = "regalloc2" -version = "0.9.2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b4dcbd3a2ae7fb94b5813fa0e957c6ab51bf5d0a8ee1b69e0c2d0f1e6eb8485" +checksum = "ad156d539c879b7a24a363a2016d77961786e71f48f2e2fc8302a92abd2429a6" dependencies = [ "hashbrown 0.13.2", "log", @@ -5075,7 +5388,7 @@ version = "1.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" dependencies = [ - "aho-corasick", + "aho-corasick 1.1.2", "memchr", "regex-automata 0.4.3", "regex-syntax 0.8.2", @@ -5090,19 +5403,13 @@ dependencies = [ "regex-syntax 0.6.29", ] -[[package]] -name = "regex-automata" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2f401f4955220693b56f8ec66ee9c78abffd8d1c4f23dc41a23839eb88f0795" - [[package]] name = "regex-automata" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" dependencies = [ - "aho-corasick", + "aho-corasick 1.1.2", "memchr", "regex-syntax 0.8.2", ] @@ -5131,7 +5438,7 @@ version = "0.11.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "046cd98826c46c2ac8ddecae268eb5c2e58628688a5fc7a2643704a73faba95b" dependencies = [ - "base64 0.21.4", + "base64 0.21.5", "bytes", "encoding_rs", "futures-core", @@ -5189,11 +5496,25 @@ dependencies = [ "libc", "once_cell", "spin 0.5.2", - "untrusted", + "untrusted 0.7.1", "web-sys", "winapi", ] +[[package]] +name = "ring" +version = "0.17.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb0205304757e5d899b9c2e448b867ffd03ae7f988002e47cd24954391394d0b" +dependencies = [ + "cc", + "getrandom", + "libc", + "spin 0.9.8", + "untrusted 0.9.0", + "windows-sys", +] + [[package]] name = "rm_rf" version = "0.6.2" @@ -5225,6 +5546,12 @@ dependencies = [ "serde", ] +[[package]] +name = "route-recognizer" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afab94fb28594581f62d981211a9a4d53cc8130bbcbbb89a0440d9b8e81a7746" + [[package]] name = "rtnetlink" version = "0.10.1" @@ -5282,9 +5609,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.37.25" +version = "0.37.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4eb579851244c2c03e7c24f501c3432bed80b8f720af1d6e5b0e0f01555a035" +checksum = "fea8ca367a3a01fe35e6943c400addf443c0f57670e6ec51196f71a4b8762dd2" dependencies = [ "bitflags 1.3.2", "errno", @@ -5298,37 +5625,58 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.13" +version = "0.38.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7db8590df6dfcd144d22afd1b83b36c21a18d7cbc1dc4bb5295a8712e9eb662" +checksum = "2b426b0506e5d50a7d8dafcf2e81471400deb602392c7dd110815afb4eaf02a3" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.4.1", "errno", "libc", - "linux-raw-sys 0.4.7", + "linux-raw-sys 0.4.11", "windows-sys", ] [[package]] name = "rustls" -version = "0.21.7" +version = "0.21.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd8d6c9f025a446bc4d18ad9632e69aec8f287aa84499ee335599fabd20c3fd8" +checksum = "446e14c5cda4f3f30fe71863c34ec70f5ac79d6087097ad0bb433e1be5edf04c" dependencies = [ "log", - "ring", + "ring 0.17.5", "rustls-webpki", "sct", ] +[[package]] +name = "rustls-native-certs" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" +dependencies = [ + "openssl-probe", + "rustls-pemfile", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-pemfile" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" +dependencies = [ + "base64 0.21.5", +] + [[package]] name = "rustls-webpki" -version = "0.101.5" +version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45a27e3b59326c16e23d30aeb7a36a24cc0d29e71d68ff611cdfb4a01d013bed" +checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ - "ring", - "untrusted", + "ring 0.17.5", + "untrusted 0.9.0", ] [[package]] @@ -5375,6 +5723,15 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "schannel" +version = "0.1.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" +dependencies = [ + "windows-sys", +] + [[package]] name = "scheduled-thread-pool" version = "0.2.7" @@ -5392,12 +5749,12 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "sct" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" +checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" dependencies = [ - "ring", - "untrusted", + "ring 0.17.5", + "untrusted 0.9.0", ] [[package]] @@ -5411,6 +5768,29 @@ dependencies = [ "zeroize", ] +[[package]] +name = "security-framework" +version = "2.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" +dependencies = [ + "bitflags 1.3.2", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "semver" version = "1.0.20" @@ -5420,11 +5800,17 @@ dependencies = [ "serde", ] +[[package]] +name = "send_wrapper" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" + [[package]] name = "serde" -version = "1.0.190" +version = "1.0.192" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91d3c334ca1ee894a2c6f6ad698fe8c435b76d504b13d436f0685d648d6d96f7" +checksum = "bca2a08484b285dcb282d0f67b26cadc0df8b19f8c12502c13d966bf9482f001" dependencies = [ "serde_derive", ] @@ -5449,13 +5835,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.190" +version = "1.0.192" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67c5609f394e5c2bd7fc51efda478004ea80ef42fee983d5c67a65e34f32c0e3" +checksum = "d6c7207fbec9faa48073f3e3074cbe553af6ea512d7c21ba46e434e70ea9fbc1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.39", ] [[package]] @@ -5483,9 +5869,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96426c9936fd7a0124915f9185ea1d20aa9445cc9821142f0a73bc9207a2e186" +checksum = "12022b835073e5b11e90a14f86838ceb1c8fb0325b72416845c487ac0fa95e80" dependencies = [ "serde", ] @@ -5504,11 +5890,11 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.3.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ca3b16a3d82c4088f343b7480a93550b3eabe1a358569c2dfe38bbcead07237" +checksum = "64cd236ccc1b7a29e7e2739f27c0b2dd199804abc4290e32f59f3b68d6405c23" dependencies = [ - "base64 0.21.4", + "base64 0.21.5", "chrono", "hex", "indexmap 1.9.3", @@ -5521,14 +5907,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.3.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e6be15c453eb305019bfa438b1593c731f36a289a7853f7707ee29e870b3b3c" +checksum = "93634eb5f75a2323b16de4748022ac4297f9e76b6dced2be287a099f41b5e788" dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.39", ] [[package]] @@ -5552,14 +5938,27 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.39", +] + +[[package]] +name = "sha-1" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" +dependencies = [ + "block-buffer 0.9.0", + "cfg-if", + "cpufeatures", + "digest 0.9.0", + "opaque-debug", ] [[package]] name = "sha1" -version = "0.10.5" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" +checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" dependencies = [ "cfg-if", "cpufeatures", @@ -5602,9 +6001,9 @@ dependencies = [ [[package]] name = "sharded-slab" -version = "0.1.4" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" dependencies = [ "lazy_static", ] @@ -5686,9 +6085,9 @@ checksum = "942b4a808e05215192e39f4ab80813e599068285906cc91aa64f923db842bd5a" [[package]] name = "smawk" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f67ad224767faa3c7d8b6d91985b78e70a1324408abcb1cfcc2be4c06bc06043" +checksum = "b7c388c1b5e93756d0c740965c41e8822f866621d41acbdf6336a6a168f8840c" [[package]] name = "snafu" @@ -5723,7 +6122,7 @@ dependencies = [ "chacha20poly1305", "curve25519-dalek", "rand_core", - "ring", + "ring 0.16.20", "rustc_version", "sha2 0.10.8", "subtle", @@ -5731,9 +6130,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" +checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" dependencies = [ "libc", "winapi", @@ -5741,14 +6140,30 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.4" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4031e820eb552adee9295814c0ced9e5cf38ddf1e8b7d566d6de8e2538ea989e" +checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" dependencies = [ "libc", "windows-sys", ] +[[package]] +name = "soketto" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" +dependencies = [ + "base64 0.13.1", + "bytes", + "futures", + "http", + "httparse", + "log", + "rand", + "sha-1", +] + [[package]] name = "spdx" version = "0.10.2" @@ -5857,15 +6272,15 @@ dependencies = [ [[package]] name = "strum_macros" -version = "0.25.2" +version = "0.25.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad8d03b598d3d0fff69bf533ee3ef19b8eeb342729596df84bcc7e1f96ec4059" +checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0" dependencies = [ "heck", "proc-macro2", "quote", "rustversion", - "syn 2.0.33", + "syn 2.0.39", ] [[package]] @@ -5876,9 +6291,9 @@ checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" [[package]] name = "supports-color" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4950e7174bffabe99455511c39707310e7e9b440364a2fcb1cc21521be57b354" +checksum = "d6398cde53adc3c4557306a96ce67b302968513830a77a95b2b17305d9719a89" dependencies = [ "is-terminal", "is_ci", @@ -5915,9 +6330,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.33" +version = "2.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9caece70c63bfba29ec2fed841a09851b14a235c60010fa4de58089b6c025668" +checksum = "23e78b90f2fcf45d3e842032ce32e3f2d1545ba6636271dcbf24fa306d87be7a" dependencies = [ "proc-macro2", "quote", @@ -5996,12 +6411,12 @@ version = "0.25.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "10081a99cbecbc363d381b9503563785f0b02735fccbb0d4c1a2cb3d39f7e7fe" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.4.1", "cap-fs-ext", "cap-std", "fd-lock", "io-lifetimes 2.0.2", - "rustix 0.38.13", + "rustix 0.38.21", "windows-sys", "winx 0.36.2", ] @@ -6038,9 +6453,9 @@ checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" [[package]] name = "target-lexicon" -version = "0.12.11" +version = "0.12.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d0e916b1148c8e263850e1ebcbd046f333e0683c724876bb0da63ea4373dc8a" +checksum = "14c39fd04924ca3a864207c66fc2cd7d22d7c016007f9ce846cbb9326331930a" [[package]] name = "tarpc" @@ -6079,14 +6494,14 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.8.0" +version = "3.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef" +checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" dependencies = [ "cfg-if", - "fastrand 2.0.0", - "redox_syscall 0.3.5", - "rustix 0.38.13", + "fastrand 2.0.1", + "redox_syscall", + "rustix 0.38.21", "windows-sys", ] @@ -6134,7 +6549,7 @@ checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.39", ] [[package]] @@ -6149,12 +6564,13 @@ dependencies = [ [[package]] name = "time" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "426f806f4089c493dcac0d24c29c01e2c38baf8e30f1b716ee37e83d200b18fe" +checksum = "c4a34ab300f2dee6e562c10a046fc05e358b29f9bf92277f30c3c8d82275f6f5" dependencies = [ "deranged", "itoa", + "powerfmt", "serde", "time-core", "time-macros", @@ -6213,7 +6629,7 @@ dependencies = [ "num_cpus", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.4", + "socket2 0.5.5", "tokio-macros", "tracing", "windows-sys", @@ -6237,7 +6653,17 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.39", +] + +[[package]] +name = "tokio-rustls" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" +dependencies = [ + "rustls", + "tokio", ] [[package]] @@ -6264,6 +6690,20 @@ dependencies = [ "futures-core", "pin-project-lite", "tokio", + "tokio-util", +] + +[[package]] +name = "tokio-test" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e89b3cbabd3ae862100094ae433e1def582cf86451b4e9bf83aa7ac1d8a7d719" +dependencies = [ + "async-stream", + "bytes", + "futures-core", + "tokio", + "tokio-stream", ] [[package]] @@ -6280,12 +6720,13 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.9" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d68074620f57a0b21594d9735eb2e98ab38b17f80d3fcb189fca266771ca60d" +checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" dependencies = [ "bytes", "futures-core", + "futures-io", "futures-sink", "pin-project-lite", "slab", @@ -6316,9 +6757,9 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.3" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" +checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" dependencies = [ "serde", ] @@ -6345,7 +6786,7 @@ dependencies = [ "async-stream", "async-trait", "axum", - "base64 0.21.4", + "base64 0.21.5", "bytes", "h2", "http", @@ -6383,6 +6824,25 @@ dependencies = [ "tracing", ] +[[package]] +name = "tower-http" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61c5bb1d698276a2443e5ecfabc1008bf15a36c12e6a7176e7bf089ea9131140" +dependencies = [ + "bitflags 2.4.1", + "bytes", + "futures-core", + "futures-util", + "http", + "http-body", + "http-range-header", + "pin-project-lite", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "tower-layer" version = "0.3.2" @@ -6427,7 +6887,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.39", ] [[package]] @@ -6506,7 +6966,7 @@ dependencies = [ "lazy_static", "rand", "smallvec", - "socket2 0.4.9", + "socket2 0.4.10", "thiserror", "tinyvec", "tokio", @@ -6579,9 +7039,9 @@ dependencies = [ [[package]] name = "ttf-parser" -version = "0.19.2" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49d64318d8311fc2668e48b63969f4343e0a85c4a109aa8460d6672e364b8bd1" +checksum = "17f77d76d837a7830fe1d4f12b7b4ba4192c1888001c7164257e4bc6d21d96b4" [[package]] name = "tungstenite" @@ -6615,9 +7075,33 @@ dependencies = [ [[package]] name = "typenum" -version = "1.16.0" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" + +[[package]] +name = "typetag" +version = "0.2.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80960fd143d4c96275c0e60b08f14b81fbb468e79bc0ef8fbda69fb0afafae43" +dependencies = [ + "erased-serde", + "inventory", + "once_cell", + "serde", + "typetag-impl", +] + +[[package]] +name = "typetag-impl" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" +checksum = "bfc13d450dc4a695200da3074dacf43d449b968baee95e341920e47f61a3b40f" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.39", +] [[package]] name = "ucan" @@ -6628,7 +7112,7 @@ dependencies = [ "anyhow", "async-recursion", "async-trait", - "base64 0.21.4", + "base64 0.21.5", "bs58", "cid 0.10.1", "futures", @@ -6708,9 +7192,9 @@ checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36" [[package]] name = "unicode-width" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" +checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85" [[package]] name = "unicode-xid" @@ -6744,6 +7228,12 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + [[package]] name = "url" version = "2.4.1" @@ -6845,9 +7335,9 @@ dependencies = [ [[package]] name = "waker-fn" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" +checksum = "f3c4517f54858c779bbcbf228f4fca63d121bf85fbecb2dc578cdf4a39395690" [[package]] name = "walkdir" @@ -6891,7 +7381,7 @@ dependencies = [ "io-lifetimes 1.0.11", "is-terminal", "once_cell", - "rustix 0.37.25", + "rustix 0.37.27", "system-interface", "tracing", "wasi-common", @@ -6910,7 +7400,7 @@ dependencies = [ "cap-std", "io-extras", "log", - "rustix 0.37.25", + "rustix 0.37.27", "thiserror", "tracing", "wasmtime 11.0.2", @@ -6928,7 +7418,7 @@ dependencies = [ "cap-std", "io-extras", "io-lifetimes 1.0.11", - "rustix 0.37.25", + "rustix 0.37.27", "tokio", "wasi-cap-std-sync", "wasi-common", @@ -6937,9 +7427,9 @@ dependencies = [ [[package]] name = "wasm-bindgen" -version = "0.2.87" +version = "0.2.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" +checksum = "7daec296f25a1bae309c0cd5c29c4b260e510e6d813c286b19eaadf409d40fce" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -6947,24 +7437,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.87" +version = "0.2.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" +checksum = "e397f4664c0e4e428e8313a469aaa58310d302159845980fd23b0f22a847f217" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.39", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.37" +version = "0.4.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" +checksum = "9afec9963e3d0994cac82455b2b3502b81a7f40f9a0d32181f7528d9f4b43e02" dependencies = [ "cfg-if", "js-sys", @@ -6974,9 +7464,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.87" +version = "0.2.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" +checksum = "5961017b3b08ad5f3fe39f1e79877f8ee7c23c5e5fd5eb80de95abc41f1f16b2" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -6984,22 +7474,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.87" +version = "0.2.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" +checksum = "c5353b8dab669f5e10f5bd76df26a9360c748f054f862ff5f3f8aae0c7fb3907" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.39", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.87" +version = "0.2.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" +checksum = "0d046c5d029ba91a1ed14da14dca44b68bf2f124cfbaf741c54151fdb3e0750b" [[package]] name = "wasm-encoder" @@ -7012,27 +7502,27 @@ dependencies = [ [[package]] name = "wasm-encoder" -version = "0.33.1" +version = "0.33.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b39de0723a53d3c8f54bed106cfbc0d06b3e4d945c5c5022115a61e3b29183ae" +checksum = "34180c89672b3e4825c3a8db4b61a674f1447afd5fe2445b2d22c3d8b6ea086c" dependencies = [ "leb128", ] [[package]] name = "wasm-encoder" -version = "0.35.0" +version = "0.36.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ca90ba1b5b0a70d3d49473c5579951f3bddc78d47b59256d2f9d4922b150aca" +checksum = "822b645bf4f2446b949776ffca47e2af60b167209ffb70814ef8779d299cd421" dependencies = [ "leb128", ] [[package]] name = "wasm-metadata" -version = "0.10.9" +version = "0.10.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14abc161bfda5b519aa229758b68f2a52b45a12b993808665c857d1a9a00223c" +checksum = "2167ce53b2faa16a92c6cafd4942cff16c9a4fa0c5a5a0a41131ee4e49fc055f" dependencies = [ "anyhow", "indexmap 2.1.0", @@ -7040,8 +7530,8 @@ dependencies = [ "serde_derive", "serde_json", "spdx", - "wasm-encoder 0.35.0", - "wasmparser 0.115.0", + "wasm-encoder 0.36.2", + "wasmparser 0.116.1", ] [[package]] @@ -7084,14 +7574,24 @@ dependencies = [ "semver", ] +[[package]] +name = "wasmparser" +version = "0.116.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a58e28b80dd8340cb07b8242ae654756161f6fc8d0038123d679b7b99964fa50" +dependencies = [ + "indexmap 2.1.0", + "semver", +] + [[package]] name = "wasmprinter" -version = "0.2.64" +version = "0.2.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34ddf5892036cd4b780d505eff1194a0cbc10ed896097656fdcea3744b5e7c2f" +checksum = "9aff4df0cdf1906ec040e97d78c3fc8fd26d3f8d70adaac81f07f80957b63b54" dependencies = [ "anyhow", - "wasmparser 0.112.0", + "wasmparser 0.116.1", ] [[package]] @@ -7127,9 +7627,9 @@ dependencies = [ [[package]] name = "wasmtime" -version = "13.0.0" +version = "13.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16ed7db409c1acf60d33128b2a38bee25aaf38c4bd955ab98a5b623c8294593c" +checksum = "b0263693caa1486bd4d26a5f18511948a706c9290689386b81b851ce088063ce" dependencies = [ "anyhow", "async-trait", @@ -7153,13 +7653,13 @@ dependencies = [ "wasm-encoder 0.32.0", "wasmparser 0.112.0", "wasmtime-cache", - "wasmtime-component-macro 13.0.0", - "wasmtime-component-util 13.0.0", + "wasmtime-component-macro 13.0.1", + "wasmtime-component-util 13.0.1", "wasmtime-cranelift", - "wasmtime-environ 13.0.0", - "wasmtime-fiber 13.0.0", - "wasmtime-jit 13.0.0", - "wasmtime-runtime 13.0.0", + "wasmtime-environ 13.0.1", + "wasmtime-fiber 13.0.1", + "wasmtime-jit 13.0.1", + "wasmtime-runtime 13.0.1", "wasmtime-winch", "wat", "windows-sys", @@ -7176,25 +7676,25 @@ dependencies = [ [[package]] name = "wasmtime-asm-macros" -version = "13.0.0" +version = "13.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53af0f8f6271bd687fe5632c8fe0a0f061d0aa1b99a0cd4e1df8e4cbeb809d2f" +checksum = "4711e5969236ecfbe70c807804ff9ffb5206c1dbb5c55c5e8200d9f7e8e76adf" dependencies = [ "cfg-if", ] [[package]] name = "wasmtime-cache" -version = "13.0.0" +version = "13.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41376a7c094335ee08abe6a4eff79a32510cc805a249eff1b5e7adf0a42e7cdf" +checksum = "5b79f9f79188e5a26b6911b79d3171c06699d9a17ae07f6a265c51635b8d80c2" dependencies = [ "anyhow", - "base64 0.21.4", + "base64 0.21.5", "bincode", "directories-next", "log", - "rustix 0.38.13", + "rustix 0.38.21", "serde", "serde_derive", "sha2 0.10.8", @@ -7220,17 +7720,17 @@ dependencies = [ [[package]] name = "wasmtime-component-macro" -version = "13.0.0" +version = "13.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74ab5b291f2dad56f1e6929cc61fb7cac68845766ca77c3838b5d05d82c33976" +checksum = "ed724d0f41c21bcf8754651a59d0423c530069ddca4cf3822768489ad313a812" dependencies = [ "anyhow", "proc-macro2", "quote", - "syn 2.0.33", - "wasmtime-component-util 13.0.0", - "wasmtime-wit-bindgen 13.0.0", - "wit-parser 0.11.1", + "syn 2.0.39", + "wasmtime-component-util 13.0.1", + "wasmtime-wit-bindgen 13.0.1", + "wit-parser 0.11.3", ] [[package]] @@ -7241,21 +7741,21 @@ checksum = "31bd6b1c6d8ece2aa852bf5dad0ea91be63e81c7571d7bcf24238b05405adb70" [[package]] name = "wasmtime-component-util" -version = "13.0.0" +version = "13.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21436177bf19f6b60dc0b83ad5872e849892a4a90c3572785e1a28c0e2e1132c" +checksum = "7e7d69464b94bd312a27d93d0b482cd74bedf01f030199ef0740d6300ebca1d3" [[package]] name = "wasmtime-cranelift" -version = "13.0.0" +version = "13.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "920e42058862d1f7a3dd3fca73cb495a20d7506e3ada4bbc0a9780cd636da7ca" +checksum = "4e63f53c61ba05eb815f905c1738ad82c95333dd42ef5a8cc2aa3d7dfb2b08d7" dependencies = [ "anyhow", "cfg-if", "cranelift-codegen", "cranelift-control", - "cranelift-entity 0.100.0", + "cranelift-entity 0.100.1", "cranelift-frontend", "cranelift-native", "cranelift-wasm", @@ -7266,15 +7766,15 @@ dependencies = [ "thiserror", "wasmparser 0.112.0", "wasmtime-cranelift-shared", - "wasmtime-environ 13.0.0", + "wasmtime-environ 13.0.1", "wasmtime-versioned-export-macros", ] [[package]] name = "wasmtime-cranelift-shared" -version = "13.0.0" +version = "13.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "516d63bbe18219e64a9705cf3a2c865afe1fb711454ea03091dc85a1d708194d" +checksum = "4f6b197d68612f7dc3a17aa9f9587533715ecb8b4755609ce9baf7fb92b74ddc" dependencies = [ "anyhow", "cranelift-codegen", @@ -7283,7 +7783,7 @@ dependencies = [ "gimli 0.28.0", "object 0.32.1", "target-lexicon", - "wasmtime-environ 13.0.0", + "wasmtime-environ 13.0.1", ] [[package]] @@ -7307,12 +7807,12 @@ dependencies = [ [[package]] name = "wasmtime-environ" -version = "13.0.0" +version = "13.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59cef239d663885f1427f8b8f4fde7be6075249c282580d94b480f11953ca194" +checksum = "18e2558c8b04fd27764d8601d46b8dc39555b79720a41e626bce210a80758932" dependencies = [ "anyhow", - "cranelift-entity 0.100.0", + "cranelift-entity 0.100.1", "gimli 0.28.0", "indexmap 2.1.0", "log", @@ -7324,8 +7824,8 @@ dependencies = [ "wasm-encoder 0.32.0", "wasmparser 0.112.0", "wasmprinter", - "wasmtime-component-util 13.0.0", - "wasmtime-types 13.0.0", + "wasmtime-component-util 13.0.1", + "wasmtime-types 13.0.1", ] [[package]] @@ -7336,21 +7836,21 @@ checksum = "e0e22d42113a1181fee3477f96639fd88c757b303f7083e866691f47a06065c5" dependencies = [ "cc", "cfg-if", - "rustix 0.37.25", + "rustix 0.37.27", "wasmtime-asm-macros 11.0.2", "windows-sys", ] [[package]] name = "wasmtime-fiber" -version = "13.0.0" +version = "13.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ef118b557df6193cd82cfb45ab57cd12388fedfe2bb76f090b2d77c96c1b56e" +checksum = "a615a2cf64a49c0dc659c7d850c6cd377b975e0abfdcf0888b282d274a82e730" dependencies = [ "cc", "cfg-if", - "rustix 0.38.13", - "wasmtime-asm-macros 13.0.0", + "rustix 0.38.21", + "wasmtime-asm-macros 13.0.1", "wasmtime-versioned-export-macros", "windows-sys", ] @@ -7370,7 +7870,7 @@ dependencies = [ "log", "object 0.30.4", "rustc-demangle", - "rustix 0.37.25", + "rustix 0.37.27", "serde", "target-lexicon", "wasmtime-environ 11.0.2", @@ -7381,9 +7881,9 @@ dependencies = [ [[package]] name = "wasmtime-jit" -version = "13.0.0" +version = "13.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8089d5909b8f923aad57702ebaacb7b662aa9e43a3f71e83e025c5379a1205f" +checksum = "cd775514b8034b85b0323bfdc60abb1c28d27dbf6e22aad083ed57dac95cf72e" dependencies = [ "addr2line 0.21.0", "anyhow", @@ -7395,14 +7895,14 @@ dependencies = [ "log", "object 0.32.1", "rustc-demangle", - "rustix 0.38.13", + "rustix 0.38.21", "serde", "serde_derive", "target-lexicon", - "wasmtime-environ 13.0.0", - "wasmtime-jit-debug 13.0.0", - "wasmtime-jit-icache-coherence 13.0.0", - "wasmtime-runtime 13.0.0", + "wasmtime-environ 13.0.1", + "wasmtime-jit-debug 13.0.1", + "wasmtime-jit-icache-coherence 13.0.1", + "wasmtime-runtime 13.0.1", "windows-sys", ] @@ -7417,13 +7917,13 @@ dependencies = [ [[package]] name = "wasmtime-jit-debug" -version = "13.0.0" +version = "13.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b13924aedf6799ad66edb25500a20e3226629978b30a958c55285352bad130a" +checksum = "c054e27c6ce2a6191edabe89e646da013044dd5369e1d203c89f977f9bd32937" dependencies = [ "object 0.32.1", "once_cell", - "rustix 0.38.13", + "rustix 0.38.21", "wasmtime-versioned-export-macros", ] @@ -7440,9 +7940,9 @@ dependencies = [ [[package]] name = "wasmtime-jit-icache-coherence" -version = "13.0.0" +version = "13.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6ff5f3707a5e3797deeeeac6ac26b2e1dd32dbc06693c0ab52e8ac4d18ec706" +checksum = "7f323977cddf4a262d1b856366b665c5b4d01793c57b79fb42505b9fd9e61e5b" dependencies = [ "cfg-if", "libc", @@ -7466,7 +7966,7 @@ dependencies = [ "memoffset 0.8.0", "paste", "rand", - "rustix 0.37.25", + "rustix 0.37.27", "sptr", "wasmtime-asm-macros 11.0.2", "wasmtime-environ 11.0.2", @@ -7477,9 +7977,9 @@ dependencies = [ [[package]] name = "wasmtime-runtime" -version = "13.0.0" +version = "13.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11ab4ce04ac05342edfa7f42895f2a5d8b16ee914330869acb865cd1facf265f" +checksum = "29e26461bba043f73cb4183f4ce0d606c0eaac112475867b11e5ea36fe1cac8e" dependencies = [ "anyhow", "cc", @@ -7493,13 +7993,13 @@ dependencies = [ "memoffset 0.9.0", "paste", "rand", - "rustix 0.38.13", + "rustix 0.38.21", "sptr", "wasm-encoder 0.32.0", - "wasmtime-asm-macros 13.0.0", - "wasmtime-environ 13.0.0", - "wasmtime-fiber 13.0.0", - "wasmtime-jit-debug 13.0.0", + "wasmtime-asm-macros 13.0.1", + "wasmtime-environ 13.0.1", + "wasmtime-fiber 13.0.1", + "wasmtime-jit-debug 13.0.1", "wasmtime-versioned-export-macros", "wasmtime-wmemcheck", "windows-sys", @@ -7519,11 +8019,11 @@ dependencies = [ [[package]] name = "wasmtime-types" -version = "13.0.0" +version = "13.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecf61e21d5bd95e1ad7fa42b7bdabe21220682d6a6046d376edca29760849222" +checksum = "6fd7e9b29fee64eea5058cb5e7cb3480b52c2f1312d431d16ea8617ceebeb421" dependencies = [ - "cranelift-entity 0.100.0", + "cranelift-entity 0.100.1", "serde", "serde_derive", "thiserror", @@ -7532,13 +8032,13 @@ dependencies = [ [[package]] name = "wasmtime-versioned-export-macros" -version = "13.0.0" +version = "13.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe877472cbdd6d96b4ecdc112af764e3b9d58c2e4175a87828f892ab94c60643" +checksum = "6362c557c36d8ad4aaab735f14ed9e4f78d6b40ec85a02a88fd859af87682e52" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.39", ] [[package]] @@ -7550,7 +8050,7 @@ dependencies = [ "anyhow", "io-extras", "libc", - "rustix 0.37.25", + "rustix 0.37.27", "wasi-common", "wasi-tokio", "wasmtime 11.0.2", @@ -7560,9 +8060,9 @@ dependencies = [ [[package]] name = "wasmtime-winch" -version = "13.0.0" +version = "13.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bc5a770003807c55f2187a0092dea01722b0e24151e35816bd5091538bb8e88" +checksum = "aa5fc7212424c04c01a20bfa66c4c518e8749dde6546f5e05815dcacbec80723" dependencies = [ "anyhow", "cranelift-codegen", @@ -7571,7 +8071,7 @@ dependencies = [ "target-lexicon", "wasmparser 0.112.0", "wasmtime-cranelift-shared", - "wasmtime-environ 13.0.0", + "wasmtime-environ 13.0.1", "winch-codegen", ] @@ -7588,21 +8088,21 @@ dependencies = [ [[package]] name = "wasmtime-wit-bindgen" -version = "13.0.0" +version = "13.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62003d48822f89cc393e93643366ddbee1766779c0874353b8ba2ede4679fbf9" +checksum = "dcc03bd58f77a68dc6a0b2ba2f8e64b1f902b50389d21bbcc690ef2f3bb87198" dependencies = [ "anyhow", "heck", "indexmap 2.1.0", - "wit-parser 0.11.1", + "wit-parser 0.11.3", ] [[package]] name = "wasmtime-wmemcheck" -version = "13.0.0" +version = "13.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5412bb464066d64c3398c96e6974348f90fa2a55110ad7da3f9295438cd4de84" +checksum = "1e485bf54eba675ca615f8f55788d3a8cd44e7bd09b8b4011edc22c2c41d859e" [[package]] name = "wast" @@ -7615,30 +8115,30 @@ dependencies = [ [[package]] name = "wast" -version = "66.0.2" +version = "67.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93cb43b0ac6dd156f2c375735ccfd72b012a7c0a6e6d09503499b8d3cb6e6072" +checksum = "a974d82fac092b5227c1663e16514e7a85f32014e22e6fdcb08b71aec9d3fb1e" dependencies = [ "leb128", "memchr", "unicode-width", - "wasm-encoder 0.35.0", + "wasm-encoder 0.36.2", ] [[package]] name = "wat" -version = "1.0.77" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e367582095d2903caeeea9acbb140e1db9c7677001efa4347c3687fd34fe7072" +checksum = "adb220934f92f8551144c0003d1bc57a060674c99139f45ed623fbbf6d9262e7" dependencies = [ - "wast 66.0.2", + "wast 67.0.1", ] [[package]] name = "web-sys" -version = "0.3.64" +version = "0.3.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" +checksum = "5db499c5f66323272151db0e666cd34f78617522fb0c1604d31a27c50c206a85" dependencies = [ "js-sys", "wasm-bindgen", @@ -7646,14 +8146,20 @@ dependencies = [ [[package]] name = "web-time" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8208e3fdbc243c8fd30805721869242a7f6de3e2e9f3b057652ab36e52ae1e87" +checksum = "57099a701fb3a8043f993e8228dc24229c7b942e2b009a1b962e54489ba1d3bf" dependencies = [ "js-sys", "wasm-bindgen", ] +[[package]] +name = "webpki-roots" +version = "0.25.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14247bb57be4f377dfb94c72830b8ce8fc6beac03cf4bf7b9732eadd414123fc" + [[package]] name = "websocket-relay" version = "0.1.0" @@ -7732,9 +8238,9 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" dependencies = [ "winapi", ] @@ -7747,9 +8253,9 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "winch-codegen" -version = "0.11.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50647204d600a2a112eefac0645ba6653809a15bd362c7e4e6a049a5bdff0de9" +checksum = "b9b01ca6722f7421c9cdbe4c9b62342ce864d0a9e8736d56dac717a86b1a65ae" dependencies = [ "anyhow", "cranelift-codegen", @@ -7758,16 +8264,7 @@ dependencies = [ "smallvec", "target-lexicon", "wasmparser 0.112.0", - "wasmtime-environ 13.0.0", -] - -[[package]] -name = "windows" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" -dependencies = [ - "windows-targets", + "wasmtime-environ 13.0.1", ] [[package]] @@ -7857,9 +8354,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "winnow" -version = "0.5.15" +version = "0.5.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c2e3184b9c4e92ad5167ca73039d0c42476302ab603e2fec4487511f38ccefc" +checksum = "829846f3e3db426d4cee4510841b71a8e58aa2a76b1132579487ae430ccd9c7b" dependencies = [ "memchr", ] @@ -7900,94 +8397,94 @@ version = "0.36.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "357bb8e2932df531f83b052264b050b81ba0df90ee5a59b2d1d3949f344f81e5" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.4.1", "windows-sys", ] [[package]] name = "wit-bindgen" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7d92ce0ca6b6074059413a9581a637550c3a740581c854f9847ec293c8aed71" +checksum = "38726c54a5d7c03cac28a2a8de1006cfe40397ddf6def3f836189033a413bc08" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.4.1", "wit-bindgen-rust-macro", ] [[package]] name = "wit-bindgen-core" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "565b945ae074886071eccf9cdaf8ccd7b959c2b0d624095bea5fe62003e8b3e0" +checksum = "c8bf1fddccaff31a1ad57432d8bfb7027a7e552969b6c68d6d8820dcf5c2371f" dependencies = [ "anyhow", - "wit-component 0.16.0", - "wit-parser 0.12.1", + "wit-component 0.17.0", + "wit-parser 0.12.2", ] [[package]] name = "wit-bindgen-rust" -version = "0.13.0" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5695ff4e41873ed9ce56d2787e6b5772bdad9e70e2c1d2d160621d1762257f4f" +checksum = "0e7200e565124801e01b7b5ddafc559e1da1b2e1bed5364d669cd1d96fb88722" dependencies = [ "anyhow", "heck", "wasm-metadata", "wit-bindgen-core", - "wit-component 0.16.0", + "wit-component 0.17.0", ] [[package]] name = "wit-bindgen-rust-macro" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a91835ea4231da1fe7971679d505ba14be7826e192b6357f08465866ef482e08" +checksum = "4ae33920ad8119fe72cf59eb00f127c0b256a236b9de029a1a10397b1f38bdbd" dependencies = [ "anyhow", "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.39", "wit-bindgen-core", "wit-bindgen-rust", - "wit-component 0.16.0", + "wit-component 0.17.0", ] [[package]] name = "wit-component" -version = "0.14.2" +version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af872ef43ecb73cc49c7bd2dd19ef9117168e183c78cf70000dca0e14b6a5473" +checksum = "66981fe851118de3b6b7a92f51ce8a86b919569c37becbeca8df9bd30141da25" dependencies = [ "anyhow", - "bitflags 2.4.0", + "bitflags 2.4.1", "indexmap 2.1.0", "log", "serde", "serde_json", - "wasm-encoder 0.33.1", + "wasm-encoder 0.33.2", "wasm-metadata", "wasmparser 0.113.3", - "wit-parser 0.11.1", + "wit-parser 0.11.3", ] [[package]] name = "wit-component" -version = "0.16.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e87488b57a08e2cbbd076b325acbe7f8666965af174d69d5929cd373bd54547f" +checksum = "480cc1a078b305c1b8510f7c455c76cbd008ee49935f3a6c5fd5e937d8d95b1e" dependencies = [ "anyhow", - "bitflags 2.4.0", + "bitflags 2.4.1", "indexmap 2.1.0", "log", "serde", "serde_derive", "serde_json", - "wasm-encoder 0.35.0", + "wasm-encoder 0.36.2", "wasm-metadata", - "wasmparser 0.115.0", - "wit-parser 0.12.1", + "wasmparser 0.116.1", + "wit-parser 0.12.2", ] [[package]] @@ -8008,9 +8505,9 @@ dependencies = [ [[package]] name = "wit-parser" -version = "0.11.1" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1dcd022610436a1873e60bfdd9b407763f2404adf7d1cb57912c7ae4059e57a5" +checksum = "a39edca9abb16309def3843af73b58d47d243fe33a9ceee572446bcc57556b9a" dependencies = [ "anyhow", "id-arena", @@ -8026,9 +8523,9 @@ dependencies = [ [[package]] name = "wit-parser" -version = "0.12.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6ace9943d89bbf3dbbc71b966da0e7302057b311f36a4ac3d65ddfef17b52cf" +checksum = "43771ee863a16ec4ecf9da0fc65c3bbd4a1235c8e3da5f094b562894843dfa76" dependencies = [ "anyhow", "id-arena", @@ -8157,6 +8654,26 @@ dependencies = [ "time", ] +[[package]] +name = "zerocopy" +version = "0.7.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8cd369a67c0edfef15010f980c3cbe45d7f651deac2cd67ce097cd801de16557" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2f140bda219a26ccc0cdb03dba58af72590c53b22642577d88a927bc5c87d6b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.39", +] + [[package]] name = "zeroize" version = "1.6.0" @@ -8174,7 +8691,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.33", + "syn 2.0.39", ] [[package]] @@ -8198,12 +8715,11 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "2.0.8+zstd.1.5.5" +version = "2.0.9+zstd.1.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5556e6ee25d32df2586c098bbfa278803692a20d0ab9565e049480d52707ec8c" +checksum = "9e16efa8a874a0481a574084d34cc26fdb3b99627480f785888deb6386506656" dependencies = [ "cc", - "libc", "pkg-config", ] diff --git a/Cargo.toml b/Cargo.toml index d22f0536..0035ea79 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -76,6 +76,7 @@ lto = true debug-assertions = false [profile.release.package.homestar-runtime] +# Will slow-down compile, but improve perf on generated code. codegen-units = 1 debug-assertions = false @@ -87,6 +88,7 @@ debug-assertions = false # Example: `cargo build -p homestar-functions-test --target wasm32-unknown-unknown --profile release-wasm-fn` [profile.release-wasm-fn] inherits = "release" +# Will slow-down compile, but improve perf on generated code. codegen-units = 1 # Tell `rustc` to optimize for small code size. opt-level = "z" # 'z' to optimize "aggressively" for size diff --git a/Cross.toml b/Cross.toml index 688d1ac7..78ef8d29 100644 --- a/Cross.toml +++ b/Cross.toml @@ -6,10 +6,18 @@ passthrough = [ "RUSTFLAGS", ] +# When running `cross` with nix, do this within `nix-shell -p gcc rustup`. +# +# Then, run +# +# `cross build -p homestar-runtime --target x86_64-unknown-linux-musl` +# or +# `cross build -p homestar-runtime --target aarch64-unknown-linux-musl` + [target.x86_64-unknown-linux-musl] image = "burntsushi/cross:x86_64-unknown-linux-musl" -[target.aarch64-unknown-linux-musl] +[target.aarch64-unknown-linux-gnu] image = "burntsushi/cross:aarch64-unknown-linux-gnu" [target.x86_64-apple-darwin] diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index 694ae5de..a92f110e 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -2,6 +2,7 @@ ## Outline +- [Building and Running the Project](#building-and-running-the-project) - [Testing the Project](#testing-the-project) - [Running the Runtime on Docker](#running-the-runtime-on-docker) - [Nix](#nix) @@ -10,18 +11,51 @@ - [Recommended Development Flow](#recommended-development-flow) - [Conventional Commits](#conventional-commits) +## Building and Running the Project + +- Building `homestar`: + + For the fastest compile-times and prettiest logs while developing `homestar`, + build with: + + ``` console + cargo build --no-default-features --features dev + ``` + + This removes underlying `wasmtime` `zstd` compression while also turning on + ANSI color-coded logs. If you build with default features, `zstd` compression + and other `wasmtime` defaults will be included in the build. + +- Running the `homestar` server/runtime: + + ``` console + cargo run --no-default-features --features dev -- start + ``` + +- Running with [`tokio-console`][tokio-console] for diagnosis and debugging: + + ``` console + cargo run --no-default-features --features dev,console -- start + ``` + + Then, in another window: + + ```console + tokio-console --retain-for 60sec + ``` + ## Testing the Project - Running the tests: -We recommend using [cargo nextest][cargo-nextest], which is installed by default -in our [Nix flake](#nix) or can be [installed separately][cargo-nextest-install]. + We recommend using [cargo nextest][cargo-nextest], which is installed by default + in our [Nix flake](#nix) or can be [installed separately][cargo-nextest-install]. ```console cargo nextest run --all-features --no-capture ``` -The above command translates to this using the default `cargo test`: + This command translates to the default `cargo test` command: ```console cargo test --all-features -- --nocapture @@ -101,7 +135,7 @@ hooks. Please run this before every commit and/or push. * **`nx-test`**, which translates to `cargo nextest run --workspace && cargo test --workspace --doc` * **`x-test`** for testing continuously as files change, translating to - `cargo watch -c -s "cargo nextest run --workspace --nocapture && cargo test --doc"` + `cargo watch -c -s "cargo nextest run --workspace --no-capture && cargo test --doc"` * **`x-`** for running a variety of `cargo watch` execution stages * **`nx-test-`**, which is just like `nx-test`, but adds `all` or `0` @@ -139,4 +173,5 @@ a type of `fix`, `feat`, `docs`, `ci`, `refactor`, etc..., structured like so: [nix]:https://nixos.org/download.html [nix-flake]: https://nixos.wiki/wiki/Flakes [pre-commit]: https://pre-commit.com/ +[tokio-console]: https://github.com/tokio-rs/console [wit-bindgen]: https://github.com/bytecodealliance/wit-bindgen diff --git a/README.md b/README.md index 4567d7eb..0a0fd360 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ Homestar logo -

homestar

+

Homestar

@@ -50,6 +50,7 @@ ## Outline - [Quickstart](#quickstart) +- [Packages](#packages) - [Running Examples](#running-examples) - [Workspace](#workspace) - [Contributing](#contributing) @@ -60,18 +61,18 @@ ## Quickstart -If you're looking to help develop `homestar`, please dive right into our +If you're looking to help develop `Homestar`, please dive right into our [development](./DEVELOPMENT.md) guide. -Otherwise, the easiest way to get started and see `homestar` in action is to +Otherwise, the easiest way to get started and see `Homestar` in action is to follow-along and run our image-processing [websocket relay](./examples/websocket-relay) example, which integrates -`homestar` with a browser application to run a +`Homestar` with a browser application to run a statically-configured workflow. The associated `README.md` walks through what to install (i.e. `rust`, `node/npm`, `ipfs`), what commands to run, and embeds a video demonstrating its usage. -Throughout the `homestar` ecosystem and documentation, we'll draw a distinction +Throughout the `Homestar` ecosystem and documentation, we'll draw a distinction between the [host runtime][host-runtime] and the support for different [guest languages and bindings][guest]. @@ -80,26 +81,35 @@ components (currently focused on authoring in Rust), please jump into our [`homestar-functions`](./homestar-functions) directory and check out our examples there. +## Packages + +Each `Homestar` release will also build packages for distribution across +different platforms. + +- [homebrew][homebrew]: `brew install fission-codes/fission/homestar` + This includes `ipfs` in the install by default. +- [npm](https://www.npmjs.com/package/homestar-runtime): `npm install homestar-runtime -g` Wraps the `homestar-runtime` binary in a node script. + ## Running Examples All [examples](./examples) contain instructions for running them, including what to install and how to run them. Please clone this repo, and get started! -Each example showcases something specific and interesting about `homestar` +Each example showcases something specific and interesting about `Homestar` as a system. Our current list includes: - [websocket relay](./examples/websocket-relay/README.md) - An example (browser-based) application that connects to the `homestar-runtime` over a - websocket connection in order to run a couple static Wasm-based, image + WebSocket connection in order to run a couple static Wasm-based, image processing workflows that chain inputs and outputs. ## Workspace This repository is comprised of a few library packages and a library/binary that -represents the `homestar` runtime. We recommend diving into each package's own +represents the `Homestar` runtime. We recommend diving into each package's own `README.md` for more information when available. ### Core Crates @@ -135,8 +145,8 @@ represents the `homestar` runtime. We recommend diving into each package's own - [examples/*](./examples) - `examples` contains examples and demos showcasing `homestar` packages - and the `homestar runtime`. Each example is set up as its own crate, + `examples` contains examples and demos showcasing `Homestar` packages + and the `Homestar` runtime. Each example is set up as its own crate, demonstrating the necessary dependencies and setup(s). ## Contributing @@ -167,7 +177,6 @@ We would be happy to try to answer your question or try opening a new issue on G - [IPVM - IPFS and WASM][ipfs-thing-ipvm] by Brooklyn Zelenka - [Breaking Down the Interplanetary Virtual Machine][blog-1] - [Ucan Invocation Spec][ucan-invocation] -- [Wasm/Wit Demo - Februrary 2023][demo-1] by Zeeshan Lakhani ## License @@ -187,6 +196,7 @@ conditions. [demo-1]: https://www.loom.com/share/3204037368fe426ba3b4c952b0691c5c [foundations-for-openworld-compute]: https://youtu.be/dRz5mau6fsY [guest]: https://github.com/bytecodealliance/wit-bindgen#supported-guest-languages +[homebrew]: https://brew.sh/ [host-runtime]: https://github.com/bytecodealliance/wit-bindgen#host-runtimes-for-components [ipfs-thing-ipvm]: https://www.youtube.com/watch?v=rzJWk1nlYvs [ipld]: https://ipld.io/ diff --git a/docker/Dockerfile b/docker/Dockerfile index 53097d50..cbb504dc 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -91,8 +91,8 @@ ARG rpc_port=3030 ARG ws_port=1337 ENV DATABASE_URL=${database_url} \ - HOMESTAR__NODE__NETWORK__RPC_HOST=${rpc_host} \ - HOMESTAR__NODE__NETWORK__RPC_PORT=${rpc_port} \ + HOMESTAR__NODE__NETWORK__RPC__HOST=${rpc_host} \ + HOMESTAR__NODE__NETWORK__RPC__PORT=${rpc_port} \ HOMESTAR__NODE__NETWORK__WS_PORT=${ws_port} EXPOSE ${rpc_port} ${ws_port} diff --git a/examples/README.md b/examples/README.md index b0766833..a99ab3fc 100644 --- a/examples/README.md +++ b/examples/README.md @@ -5,7 +5,7 @@ and the `homestar runtime`. Each example is set up as its own crate, demonstrating the necessary dependencies and setup(s). * [websocket relay](./websocket-relay) - An example (browser-based) application - that connects to the `homestar-runtime` over a websocket connection in order + that connects to the `homestar-runtime` over a WebSocket connection in order to run a couple static Wasm-based, image processing workflows that chain inputs and outputs using [inlined promises][pipelines]. diff --git a/examples/websocket-relay/README.md b/examples/websocket-relay/README.md index a6ac7bd5..c2b08bcd 100644 --- a/examples/websocket-relay/README.md +++ b/examples/websocket-relay/README.md @@ -9,13 +9,15 @@ processing workflows that chain inputs and outputs using This application demonstrates: + * workflows built using the + [@fission-codes/homestar][@fission-codes/homestar] client library * websocket notifications of [UCAN Invocation Receipts][spec-receipts] sent - between a web client and a `homestar` runner + between a web client and a `homestar` runner using [@fission-codes/homestar][@fission-codes/homestar] * instantaneous replay of previously run, cached executions * fetching content (the original static image) over [IPFS][ipfs] through a local blockstore * the [WIT][wit] + [IPLD][ipld] interpreter for - [Wasm(time)][wasmtime] embedded execution within a `homestar` runner. + [Wasm(time)][wasmtime] embedded execution within a `homestar` runner ## Install @@ -33,9 +35,8 @@ To get started, please install: ## Usage -1. Run `cargo run -- start -c config/settings.toml` to start the runtime and - an IPFS daemon as a background process. This runtime includes - ANSI-coded logging by default. +1. Run `cargo run -- start` to start the runtime and an IPFS daemon as a + background process. This runtime includes ANSI-coded logging by default. 2. In a separate terminal window, run `npm install --prefix relay-app` to install dependencies and `npm run --prefix relay-app dev` to start the @@ -63,13 +64,19 @@ if they've been previously run. On macOS, for example, a simple homebrew install would install everything you need: `brew install rust npm ipfs` -We have packaged homestar binaries via brew, so `brew install fission-codes/fission/homestar` will install everything you need, including `ipfs`. You will still need npm to run this example. From this folder, you can run the example via `homestar start --config ./config/settings.toml --db homestar.db`. +We have packaged homestar binaries using brew, so +`brew install fission-codes/fission/homestar` will install everything you need, +including `ipfs`. You will still need `npm` to run this example. From this folder, you can then run the example like this: + +``` +homestar start --db homestar.db` +``` Running `homestar` via `cargo run` requires a minimum Rust version of `1.70.0`. If you've got an older install of rust, update it with `rustup update`. -You do not have to start-up Kubo (IPFS) on your own. The example will do this +You do not have to start Kubo (IPFS) on your own. The example will do this for you, and use `examples/websocket-relay/tmp/.ipfs` as a local blockstore. Feel free to discard it when you don't need it. @@ -77,6 +84,7 @@ If you're already running an IPFS instance however, e.g. [IPFS Desktop][ipfs-des the example will check for an already running instance and not start a new, local one. +[@fission-codes/homestar]: https://www.npmjs.com/package/@fission-codes/homestar [install-ipfs]: https://docs.ipfs.tech/install/ [install-npm]: https://docs.npmjs.com/downloading-and-installing-node-js-and-npm [install-rust]: https://www.rust-lang.org/tools/install diff --git a/examples/websocket-relay/config/settings.toml b/examples/websocket-relay/config/settings.toml deleted file mode 100644 index 93baf21c..00000000 --- a/examples/websocket-relay/config/settings.toml +++ /dev/null @@ -1,5 +0,0 @@ -[node] - -[node.network.keypair_config] -# generate key using a seed -random_seed = { key_type = "secp256k1", seed = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" } diff --git a/examples/websocket-relay/example_test.wasm b/examples/websocket-relay/example_test.wasm index 8ac772fd..15306f11 100755 Binary files a/examples/websocket-relay/example_test.wasm and b/examples/websocket-relay/example_test.wasm differ diff --git a/examples/websocket-relay/relay-app/.env b/examples/websocket-relay/relay-app/.env index 36d1eb1f..8a5a254d 100644 --- a/examples/websocket-relay/relay-app/.env +++ b/examples/websocket-relay/relay-app/.env @@ -1,4 +1,3 @@ VITE_WEBSOCKET_ENDPOINT="ws://localhost:1337" VITE_PING_INTERVAL=8000 VITE_MAX_PING_RETRIES=3 -VITE_EMULATION_MODE=false diff --git a/examples/websocket-relay/relay-app/package-lock.json b/examples/websocket-relay/relay-app/package-lock.json index cacfd5dd..5d1333fa 100644 --- a/examples/websocket-relay/relay-app/package-lock.json +++ b/examples/websocket-relay/relay-app/package-lock.json @@ -8,8 +8,10 @@ "name": "websocket-relay", "version": "0.0.1", "dependencies": { + "@fission-codes/homestar": "^1.0.0", "@zerodevx/svelte-json-view": "^1.0.3", "clipboard-copy": "^4.0.1", + "iso-base": "^2.0.1", "svelvet": "8.1.0" }, "devDependencies": { @@ -137,6 +139,75 @@ "node": ">=14" } }, + "node_modules/@fission-codes/homestar": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@fission-codes/homestar/-/homestar-1.0.0.tgz", + "integrity": "sha512-kOmnpAs2izF3B5tfOyPiCMhWCcUZlDHP/5VCW1LcHBmKD3e7TPffdmXeMnyJexxn+ssFxa9qYqsRGADCNhiUKA==", + "dependencies": { + "@ipld/dag-cbor": "^9.0.6", + "@ipld/dag-json": "^10.1.5", + "@multiformats/sha3": "^3.0.1", + "@ts-ast-parser/core": "^0.6.3", + "emittery": "^1.0.1", + "esbuild": "^0.19.8", + "get-tsconfig": "^4.7.2", + "iso-websocket": "^0.1.6", + "multiformats": "^12.1.3", + "object-path": "^0.11.8", + "zod": "^3.22.4" + } + }, + "node_modules/@fission-codes/homestar/node_modules/@esbuild/darwin-arm64": { + "version": "0.19.8", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.19.8.tgz", + "integrity": "sha512-RQw9DemMbIq35Bprbboyf8SmOr4UXsRVxJ97LgB55VKKeJOOdvsIPy0nFyF2l8U+h4PtBx/1kRf0BelOYCiQcw==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@fission-codes/homestar/node_modules/esbuild": { + "version": "0.19.8", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.19.8.tgz", + "integrity": "sha512-l7iffQpT2OrZfH2rXIp7/FkmaeZM0vxbxN9KfiCwGYuZqzMg/JdvX26R31Zxn/Pxvsrg3Y9N6XTcnknqDyyv4w==", + "hasInstallScript": true, + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=12" + }, + "optionalDependencies": { + "@esbuild/android-arm": "0.19.8", + "@esbuild/android-arm64": "0.19.8", + "@esbuild/android-x64": "0.19.8", + "@esbuild/darwin-arm64": "0.19.8", + "@esbuild/darwin-x64": "0.19.8", + "@esbuild/freebsd-arm64": "0.19.8", + "@esbuild/freebsd-x64": "0.19.8", + "@esbuild/linux-arm": "0.19.8", + "@esbuild/linux-arm64": "0.19.8", + "@esbuild/linux-ia32": "0.19.8", + "@esbuild/linux-loong64": "0.19.8", + "@esbuild/linux-mips64el": "0.19.8", + "@esbuild/linux-ppc64": "0.19.8", + "@esbuild/linux-riscv64": "0.19.8", + "@esbuild/linux-s390x": "0.19.8", + "@esbuild/linux-x64": "0.19.8", + "@esbuild/netbsd-x64": "0.19.8", + "@esbuild/openbsd-x64": "0.19.8", + "@esbuild/sunos-x64": "0.19.8", + "@esbuild/win32-arm64": "0.19.8", + "@esbuild/win32-ia32": "0.19.8", + "@esbuild/win32-x64": "0.19.8" + } + }, "node_modules/@humanwhocodes/config-array": { "version": "0.11.10", "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.10.tgz", @@ -170,6 +241,32 @@ "integrity": "sha512-ZnQMnLV4e7hDlUvw8H+U8ASL02SS2Gn6+9Ac3wGGLIe7+je2AeAOxPY+izIPJDfFDb7eDjev0Us8MO1iFRN8hA==", "dev": true }, + "node_modules/@ipld/dag-cbor": { + "version": "9.0.6", + "resolved": "https://registry.npmjs.org/@ipld/dag-cbor/-/dag-cbor-9.0.6.tgz", + "integrity": "sha512-3kNab5xMppgWw6DVYx2BzmFq8t7I56AGWfp5kaU1fIPkwHVpBRglJJTYsGtbVluCi/s/q97HZM3bC+aDW4sxbQ==", + "dependencies": { + "cborg": "^4.0.0", + "multiformats": "^12.0.1" + }, + "engines": { + "node": ">=16.0.0", + "npm": ">=7.0.0" + } + }, + "node_modules/@ipld/dag-json": { + "version": "10.1.5", + "resolved": "https://registry.npmjs.org/@ipld/dag-json/-/dag-json-10.1.5.tgz", + "integrity": "sha512-AIIDRGPgIqVG2K1O42dPDzNOfP0YWV/suGApzpF+YWZLwkwdGVsxjmXcJ/+rwOhRGdjpuq/xQBKPCu1Ao6rdOQ==", + "dependencies": { + "cborg": "^4.0.0", + "multiformats": "^12.0.1" + }, + "engines": { + "node": ">=16.0.0", + "npm": ">=7.0.0" + } + }, "node_modules/@jridgewell/gen-mapping": { "version": "0.3.3", "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.3.tgz", @@ -218,11 +315,19 @@ "@jridgewell/sourcemap-codec": "^1.4.14" } }, + "node_modules/@multiformats/sha3": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/@multiformats/sha3/-/sha3-3.0.1.tgz", + "integrity": "sha512-lXlD8gA//SNPv6QpTEKR00029G1bt9EG8WieCmAZefiV+ONYw3yYt9UhneC3DMCFFXLKCcw8obHzodnah7EGug==", + "dependencies": { + "js-sha3": "^0.9.1", + "multiformats": "^12.1.0" + } + }, "node_modules/@nodelib/fs.scandir": { "version": "2.1.5", "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", - "dev": true, "dependencies": { "@nodelib/fs.stat": "2.0.5", "run-parallel": "^1.1.9" @@ -235,7 +340,6 @@ "version": "2.0.5", "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", - "dev": true, "engines": { "node": ">= 8" } @@ -244,7 +348,6 @@ "version": "1.2.8", "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", - "dev": true, "dependencies": { "@nodelib/fs.scandir": "2.1.5", "fastq": "^1.6.0" @@ -278,6 +381,17 @@ "integrity": "sha512-a5Sab1C4/icpTZVzZc5Ghpz88yQtGOyNqYXcZgOssB2uuAr+wF/MvN6bgtW32q7HHrvBki+BsZ0OuNv6EV3K9g==", "dev": true }, + "node_modules/@sindresorhus/merge-streams": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@sindresorhus/merge-streams/-/merge-streams-1.0.0.tgz", + "integrity": "sha512-rUV5WyJrJLoloD4NDN1V1+LDMDWOa4OTsT4yYJwQNpTU6FWxkxHpL7eu4w+DmiH8x/EAM1otkPE1+LaspIbplw==", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/@sveltejs/adapter-auto": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/@sveltejs/adapter-auto/-/adapter-auto-2.1.0.tgz", @@ -370,6 +484,75 @@ "vite": "^4.0.0" } }, + "node_modules/@ts-ast-parser/comment": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/@ts-ast-parser/comment/-/comment-0.1.0.tgz", + "integrity": "sha512-SLN5vUMcu0FwhsW7ngVFhwCI9p5r5MGIRT1nGyVKlrXSXMHGcKnU9oulH5Gz9s6epRQooA2TJmU/4hW9z8zy9A==", + "dependencies": { + "tslib": "^2.6.1" + }, + "engines": { + "node": "^16.0.0 || ^18.0.0 || ^20.0.0" + } + }, + "node_modules/@ts-ast-parser/core": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/@ts-ast-parser/core/-/core-0.6.3.tgz", + "integrity": "sha512-tRBa6eoiAUx/fNIEkxbYLYJlGur1JvKJc/6hGjlILlw9QPo9kS/Ae/dBnSLBkJV8rhOvl33TE1YmQDsNe4PdLg==", + "dependencies": { + "@ts-ast-parser/comment": "0.1.0", + "globby": "^14.0.0", + "package-json-type": "^1.0.3", + "tslib": "^2.6.2" + }, + "engines": { + "node": "^18.0.0 || ^20.0.0" + }, + "peerDependencies": { + "typescript": "^4.6.x || 5.0.x || 5.1.x || 5.2.x || 5.3.x" + } + }, + "node_modules/@ts-ast-parser/core/node_modules/globby": { + "version": "14.0.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-14.0.0.tgz", + "integrity": "sha512-/1WM/LNHRAOH9lZta77uGbq0dAEQM+XjNesWwhlERDVenqothRbnzTrL3/LrIoEPPjeUHC3vrS6TwoyxeHs7MQ==", + "dependencies": { + "@sindresorhus/merge-streams": "^1.0.0", + "fast-glob": "^3.3.2", + "ignore": "^5.2.4", + "path-type": "^5.0.0", + "slash": "^5.1.0", + "unicorn-magic": "^0.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@ts-ast-parser/core/node_modules/path-type": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-5.0.0.tgz", + "integrity": "sha512-5HviZNaZcfqP95rwpv+1HDgUamezbqdSYTyzjTvwtJSnIH+3vnbmWsItli8OFEndS984VT55M3jduxZbX351gg==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@ts-ast-parser/core/node_modules/slash": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-5.1.0.tgz", + "integrity": "sha512-ZA6oR3T/pEyuqwMgAKT0/hAv8oAXckzbkmR0UkUosQ+Mc4RxGoJkRmwHgHufaenlyAgE1Mxgpdcrf75y6XcnDg==", + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/@types/chai": { "version": "4.3.5", "resolved": "https://registry.npmjs.org/@types/chai/-/chai-4.3.5.tgz", @@ -769,6 +952,19 @@ "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", "dev": true }, + "node_modules/base-x": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/base-x/-/base-x-4.0.0.tgz", + "integrity": "sha512-FuwxlW4H5kh37X/oW59pwTzzTKRzfrrQwhmyspRM7swOEZcHtDZSCt45U6oKgtuFE+WYPblePMVIPR4RZrh/hw==" + }, + "node_modules/bigint-mod-arith": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/bigint-mod-arith/-/bigint-mod-arith-3.3.1.tgz", + "integrity": "sha512-pX/cYW3dCa87Jrzv6DAr8ivbbJRzEX5yGhdt8IutnX/PCIXfpx+mabWNK/M8qqh+zQ0J3thftUBHW0ByuUlG0w==", + "engines": { + "node": ">=10.4.0" + } + }, "node_modules/binary-extensions": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz", @@ -792,7 +988,6 @@ "version": "3.0.2", "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", - "dev": true, "dependencies": { "fill-range": "^7.0.1" }, @@ -879,6 +1074,14 @@ } ] }, + "node_modules/cborg": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/cborg/-/cborg-4.0.5.tgz", + "integrity": "sha512-q8TAjprr8pn9Fp53rOIGp/UFDdFY6os2Nq62YogPSIzczJD9M6g2b6igxMkpCiZZKJ0kn/KzDLDvG+EqBIEeCg==", + "bin": { + "cborg": "lib/bin.js" + } + }, "node_modules/chai": { "version": "4.3.7", "resolved": "https://registry.npmjs.org/chai/-/chai-4.3.7.tgz", @@ -1052,7 +1255,6 @@ "version": "4.3.4", "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz", "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==", - "dev": true, "dependencies": { "ms": "2.1.2" }, @@ -1149,6 +1351,17 @@ "integrity": "sha512-6s7NVJz+sATdYnIwhdshx/N/9O6rvMxmhVoDSDFdj6iA45gHR8EQje70+RYsF4GeB+k0IeNSBnP7yG9ZXJFr7A==", "dev": true }, + "node_modules/emittery": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/emittery/-/emittery-1.0.1.tgz", + "integrity": "sha512-2ID6FdrMD9KDLldGesP6317G78K7km/kMcwItRtVFva7I/cSEOIaLpewaUb+YLXVwdAp3Ctfxh/V5zIl1sj7dQ==", + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sindresorhus/emittery?sponsor=1" + } + }, "node_modules/es6-promise": { "version": "3.3.1", "resolved": "https://registry.npmjs.org/es6-promise/-/es6-promise-3.3.1.tgz", @@ -1429,10 +1642,9 @@ "dev": true }, "node_modules/fast-glob": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.1.tgz", - "integrity": "sha512-kNFPyjhh5cKjrUltxs+wFx+ZkbRaxxmZ+X0ZU31SOsxCEtP9VPgtq2teZw1DebupL5GmDaNQ6yKMMVcM41iqDg==", - "dev": true, + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.2.tgz", + "integrity": "sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==", "dependencies": { "@nodelib/fs.stat": "^2.0.2", "@nodelib/fs.walk": "^1.2.3", @@ -1448,7 +1660,6 @@ "version": "5.1.2", "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", - "dev": true, "dependencies": { "is-glob": "^4.0.1" }, @@ -1472,7 +1683,6 @@ "version": "1.15.0", "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.15.0.tgz", "integrity": "sha512-wBrocU2LCXXa+lWBt8RoIRD89Fi8OdABODa/kEnyeyjS5aZO5/GNvI5sEINADqP/h8M29UHTHUb53sUu5Ihqdw==", - "dev": true, "dependencies": { "reusify": "^1.0.4" } @@ -1493,7 +1703,6 @@ "version": "7.0.1", "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", - "dev": true, "dependencies": { "to-regex-range": "^5.0.1" }, @@ -1584,6 +1793,17 @@ "node": "*" } }, + "node_modules/get-tsconfig": { + "version": "4.7.2", + "resolved": "https://registry.npmjs.org/get-tsconfig/-/get-tsconfig-4.7.2.tgz", + "integrity": "sha512-wuMsz4leaj5hbGgg4IvDU0bqJagpftG5l5cXIAvo8uZrqn0NJqwtfupTN00VnkQJPcIRrxYrm1Ue24btpCha2A==", + "dependencies": { + "resolve-pkg-maps": "^1.0.0" + }, + "funding": { + "url": "https://github.com/privatenumber/get-tsconfig?sponsor=1" + } + }, "node_modules/glob": { "version": "7.2.3", "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", @@ -1700,7 +1920,6 @@ "version": "5.2.4", "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.4.tgz", "integrity": "sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ==", - "dev": true, "engines": { "node": ">= 4" } @@ -1784,7 +2003,6 @@ "version": "2.1.1", "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", - "dev": true, "engines": { "node": ">=0.10.0" } @@ -1793,7 +2011,6 @@ "version": "4.0.3", "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", - "dev": true, "dependencies": { "is-extglob": "^2.1.1" }, @@ -1805,7 +2022,6 @@ "version": "7.0.0", "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", - "dev": true, "engines": { "node": ">=0.12.0" } @@ -1825,6 +2041,25 @@ "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", "dev": true }, + "node_modules/iso-base": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/iso-base/-/iso-base-2.0.1.tgz", + "integrity": "sha512-ip0nUW9oZP+LG6mslSgdJPt9NWZOq1qIOKpoUIdm8sq/87VwY8WZXw9ptCkhdJclY+r4feFbZuu9P/OFDqCvkQ==", + "dependencies": { + "base-x": "^4.0.0", + "bigint-mod-arith": "^3.3.1" + } + }, + "node_modules/iso-websocket": { + "version": "0.1.6", + "resolved": "https://registry.npmjs.org/iso-websocket/-/iso-websocket-0.1.6.tgz", + "integrity": "sha512-PfkpWtpmu9p0VIPmkbgKkAry7+fjNYZF/HX7KsvFV0mmD2RLI4aeM7M3B+5qLRBmm6H+A5CTVd760Bn2a60KsA==", + "dependencies": { + "debug": "^4.3.4", + "retry": "^0.13.1", + "typescript-event-target": "^1.1.0" + } + }, "node_modules/jiti": { "version": "1.19.1", "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.19.1.tgz", @@ -1834,6 +2069,11 @@ "jiti": "bin/jiti.js" } }, + "node_modules/js-sha3": { + "version": "0.9.2", + "resolved": "https://registry.npmjs.org/js-sha3/-/js-sha3-0.9.2.tgz", + "integrity": "sha512-8kgvwd03wNGQG1GRvl3yy1Yt40sICAcIMsDU2ZLgoL0Z6z9rkRmf9Vd+bi/gYSzgAqMUGl/jiDKu0J8AWFd+BQ==" + }, "node_modules/js-yaml": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", @@ -1965,7 +2205,6 @@ "version": "1.4.1", "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", - "dev": true, "engines": { "node": ">= 8" } @@ -1974,7 +2213,6 @@ "version": "4.0.5", "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz", "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==", - "dev": true, "dependencies": { "braces": "^3.0.2", "picomatch": "^2.3.1" @@ -2046,8 +2284,16 @@ "node_modules/ms": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" + }, + "node_modules/multiformats": { + "version": "12.1.3", + "resolved": "https://registry.npmjs.org/multiformats/-/multiformats-12.1.3.tgz", + "integrity": "sha512-eajQ/ZH7qXZQR2AgtfpmSMizQzmyYVmCql7pdhldPuYQi4atACekbJaQplk6dWyIi10jCaFnd6pqvcEFXjbaJw==", + "engines": { + "node": ">=16.0.0", + "npm": ">=7.0.0" + } }, "node_modules/mz": { "version": "2.7.0", @@ -2132,6 +2378,14 @@ "node": ">= 6" } }, + "node_modules/object-path": { + "version": "0.11.8", + "resolved": "https://registry.npmjs.org/object-path/-/object-path-0.11.8.tgz", + "integrity": "sha512-YJjNZrlXJFM42wTBn6zgOJVar9KFJvzx6sTWDte8sWZF//cnjl0BxHNpfZx+ZffXX63A9q0b1zsFiBX4g4X5KA==", + "engines": { + "node": ">= 10.12.0" + } + }, "node_modules/once": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", @@ -2188,6 +2442,11 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/package-json-type": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/package-json-type/-/package-json-type-1.0.3.tgz", + "integrity": "sha512-Bey4gdRuOwDbS8Fj1qA3/pTq5r8pqiI5E3tjSqCdhaLSsyGG364VFzXLTIexN5AaNGe/vgdBzLfoKdr7EVg2KQ==" + }, "node_modules/parent-module": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", @@ -2261,7 +2520,6 @@ "version": "2.3.1", "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", - "dev": true, "engines": { "node": ">=8.6" }, @@ -2477,7 +2735,6 @@ "version": "1.2.3", "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", - "dev": true, "funding": [ { "type": "github", @@ -2540,11 +2797,26 @@ "node": ">=4" } }, + "node_modules/resolve-pkg-maps": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/resolve-pkg-maps/-/resolve-pkg-maps-1.0.0.tgz", + "integrity": "sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==", + "funding": { + "url": "https://github.com/privatenumber/resolve-pkg-maps?sponsor=1" + } + }, + "node_modules/retry": { + "version": "0.13.1", + "resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz", + "integrity": "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==", + "engines": { + "node": ">= 4" + } + }, "node_modules/reusify": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", - "dev": true, "engines": { "iojs": ">=1.0.0", "node": ">=0.10.0" @@ -2585,7 +2857,6 @@ "version": "1.2.0", "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", - "dev": true, "funding": [ { "type": "github", @@ -3078,7 +3349,6 @@ "version": "5.0.1", "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", - "dev": true, "dependencies": { "is-number": "^7.0.0" }, @@ -3102,10 +3372,9 @@ "dev": true }, "node_modules/tslib": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", - "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==", - "dev": true + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.2.tgz", + "integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==" }, "node_modules/tsutils": { "version": "3.21.0", @@ -3165,7 +3434,6 @@ "version": "5.1.6", "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.1.6.tgz", "integrity": "sha512-zaWCozRZ6DLEWAWFrVDz1H6FVXzUSfTy5FUMWsQlU8Ym5JP9eO4xkTIROFCQvhQf61z6O/G6ugw3SgAnvvm+HA==", - "dev": true, "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" @@ -3174,6 +3442,11 @@ "node": ">=14.17" } }, + "node_modules/typescript-event-target": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/typescript-event-target/-/typescript-event-target-1.1.0.tgz", + "integrity": "sha512-PMrzUVryhnUq2n8M7tjNHNRuIHlUqly5RfGltBTpPCdVpbytgALTRDegF/t6+mFmmtBVhOqEYlbjVNBxwabIug==" + }, "node_modules/undici": { "version": "5.26.5", "resolved": "https://registry.npmjs.org/undici/-/undici-5.26.5.tgz", @@ -3186,6 +3459,17 @@ "node": ">=14.0" } }, + "node_modules/unicorn-magic": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/unicorn-magic/-/unicorn-magic-0.1.0.tgz", + "integrity": "sha512-lRfVq8fE8gz6QMBuDM6a+LO3IAzTi05H6gCVaUpir2E1Rwpo4ZUog45KpNXKC/Mn3Yb9UDuHumeFTo9iV/D9FQ==", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/update-browserslist-db": { "version": "1.0.11", "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.11.tgz", @@ -3402,6 +3686,14 @@ "funding": { "url": "https://github.com/sponsors/sindresorhus" } + }, + "node_modules/zod": { + "version": "3.22.4", + "resolved": "https://registry.npmjs.org/zod/-/zod-3.22.4.tgz", + "integrity": "sha512-iC+8Io04lddc+mVqQ9AZ7OQ2MrUKGN+oIQyq1vemgt46jwCwLfhq7/pwnBnNXXXZb8VTVLKwp9EDkx+ryxIWmg==", + "funding": { + "url": "https://github.com/sponsors/colinhacks" + } } } } diff --git a/examples/websocket-relay/relay-app/package.json b/examples/websocket-relay/relay-app/package.json index 3d191343..3d77056f 100644 --- a/examples/websocket-relay/relay-app/package.json +++ b/examples/websocket-relay/relay-app/package.json @@ -37,8 +37,10 @@ }, "type": "module", "dependencies": { + "@fission-codes/homestar": "^1.0.0", "@zerodevx/svelte-json-view": "^1.0.3", "clipboard-copy": "^4.0.1", + "iso-base": "^2.0.1", "svelvet": "8.1.0" } } diff --git a/examples/websocket-relay/relay-app/src/components/WorkflowDetail.svelte b/examples/websocket-relay/relay-app/src/components/WorkflowDetail.svelte index 4dad639e..ce1f1f51 100644 --- a/examples/websocket-relay/relay-app/src/components/WorkflowDetail.svelte +++ b/examples/websocket-relay/relay-app/src/components/WorkflowDetail.svelte @@ -3,7 +3,7 @@ import { slide } from "svelte/transition"; import { quartOut } from "svelte/easing"; - import { workflowOneJson, workflowTwoJson } from "$lib/workflow"; + import { workflowOnePromised, workflowTwoPromised } from "$lib/workflow";

Workflow Two
- + {#await workflowTwoPromised then workflowTwo} + + {/await}
diff --git a/examples/websocket-relay/relay-app/src/lib/channel.ts b/examples/websocket-relay/relay-app/src/lib/channel.ts deleted file mode 100644 index 857d8191..00000000 --- a/examples/websocket-relay/relay-app/src/lib/channel.ts +++ /dev/null @@ -1,103 +0,0 @@ -import { get as getStore } from "svelte/store"; - -import type { Maybe } from "$lib"; - -import { activeWorkflowStore, channelStore } from "../stores"; -import { fail, handleMessage } from "./workflow"; - -// TYPES - -export type Channel = { - close: () => void; - send: (data: ChannelData) => void; -}; - -export type ChannelOptions = { - handleMessage: (event: MessageEvent) => void; -}; - -export type ChannelData = string | ArrayBufferLike | Blob | ArrayBufferView; - -export async function connect() { - const channel = await createWssChannel( - import.meta.env.VITE_WEBSOCKET_ENDPOINT, - { handleMessage } - ); - - channelStore.set(channel); - - setInterval(() => { - channel.send("ping"); - - setTimeout(() => { - const activeWorkflow = getStore(activeWorkflowStore); - - if (!activeWorkflow) return; - - // Check failed ping count for lost connection - const failedPingCount = activeWorkflow.failedPingCount; - - if (failedPingCount >= import.meta.env.VITE_MAX_PING_RETRIES) { - // Fail the workflow - fail(activeWorkflow.id); - - // Remove channel. Connection will be re-established on next workflow run. - channelStore.set(null); - } else { - // Assume failure. We reset the count to zero in the message handler on pong. - activeWorkflowStore.update((store) => - store ? { ...store, failedPingCount: failedPingCount + 1 } : null - ); - } - }, import.meta.env.VITE_PING_INTERVAL); - }, import.meta.env.VITE_PING_INTERVAL); -} - -export const createWssChannel = async ( - endpoint: string, - options: ChannelOptions -): Promise => { - const { handleMessage } = options; - - const topic = `ipvm-homestar`; - console.log("Opening channel", topic); - - const socket: Maybe = new WebSocket(endpoint); - await waitForOpenConnection(socket); - socket.onmessage = handleMessage; - socket.onerror = (err) => { - console.log("socket error", err); - }; - - const send = publishOnWssChannel(socket); - const close = closeWssChannel(socket); - - return { - send, - close, - }; -}; - -const waitForOpenConnection = async (socket: WebSocket): Promise => { - return new Promise((resolve, reject) => { - socket.onopen = () => resolve(); - socket.onerror = () => reject("Websocket channel could not be opened"); - }); -}; - -export const closeWssChannel = (socket: Maybe): (() => void) => { - return function () { - if (socket) socket.close(1000); - }; -}; - -export const publishOnWssChannel = ( - socket: Maybe -): ((data: ChannelData) => void) => { - return function (data: ChannelData) { - const binary = - typeof data === "string" ? new TextEncoder().encode(data).buffer : data; - - socket?.send(binary); - }; -}; diff --git a/examples/websocket-relay/relay-app/src/lib/workflow.ts b/examples/websocket-relay/relay-app/src/lib/workflow.ts index c32fb005..f71e7448 100644 --- a/examples/websocket-relay/relay-app/src/lib/workflow.ts +++ b/examples/websocket-relay/relay-app/src/lib/workflow.ts @@ -1,16 +1,22 @@ +import { base64 } from "iso-base/rfc4648"; import { get as getStore } from "svelte/store"; +import type { MaybeResult } from "@fission-codes/homestar/codecs/types"; +import type { + Receipt as RawReceipt, + WorkflowNotification, + WorkflowNotificationError, +} from "@fission-codes/homestar"; +import * as WorkflowBuilder from "@fission-codes/homestar/workflow"; -import { base64CatStore, firstWorkflowToRunStore } from "../stores"; import type { Receipt, TaskOperation, TaskStatus, Meta } from "$lib/task"; import { activeWorkflowStore, - channelStore, + firstWorkflowToRunStore, + homestarStore, taskStore, workflowStore, } from "../stores"; -import { connect, type Channel } from "$lib/channel"; -import type { Maybe } from "$lib"; export type Workflow = { id: WorkflowId; @@ -28,15 +34,10 @@ export type WorkflowId = "one" | "two"; // RUN export async function run(workflowId: WorkflowId) { - let channel = getStore(channelStore); const firstWorkflowToRun = getStore(firstWorkflowToRunStore); + const homestar = getStore(homestarStore); const tasks = getStore(taskStore); - if (!channel) { - await connect(); - channel = getStore(channelStore); - } - // Reset workflow UI and state reset(workflowId); @@ -50,7 +51,7 @@ export async function run(workflowId: WorkflowId) { // Record the first workflow that ran if (!firstWorkflowToRun) { - firstWorkflowToRunStore.set(workflowId) + firstWorkflowToRunStore.set(workflowId); } // Set workflow status to working @@ -61,27 +62,56 @@ export async function run(workflowId: WorkflowId) { // Send run command to server if (workflowId === "one") { - channel?.send( - JSON.stringify({ - action: "run", - name: workflowId, - workflow: workflowOneJson, - }) - ); + const workflowOne = await workflowOnePromised; + homestar.runWorkflow(workflowOne, handleMessage); } else if (workflowId === "two") { - channel?.send( - JSON.stringify({ - action: "run", - name: workflowId, - workflow: workflowTwoJson, - }) - ); + const workflowTwo = await workflowTwoPromised; + homestar.runWorkflow(workflowTwo, handleMessage); } - if (import.meta.env.VITE_EMULATION_MODE === "true") { - // Emulate with an echo server - emulate(workflowId, channel); - } + checkHealth(); +} + +/** + * Check health and fail workflow when the Homestar node does not respond in time. + */ +function checkHealth() { + const homestar = getStore(homestarStore); + + let interval = setInterval(async () => { + const activeWorkflow = getStore(activeWorkflowStore); + + if (activeWorkflow) { + if (activeWorkflow.step === activeWorkflow.tasks.length - 1) { + // Workflow completed + clearInterval(interval); + } + + if ( + activeWorkflow.failedPingCount >= import.meta.env.VITE_MAX_PING_RETRIES + ) { + // Fail the workflow + fail(activeWorkflow.id); + clearInterval(interval); + } + + const health = (await homestar.health()).result; + if (health?.healthy) { + activeWorkflowStore.update((store) => + store ? { ...store, failedPingCount: 0 } : null + ); + } else { + activeWorkflowStore.update((store) => + store + ? { ...store, failedPingCount: activeWorkflow.failedPingCount + 1 } + : null + ); + } + } else { + // No workflow active + clearInterval(interval); + } + }, import.meta.env.VITE_PING_INTERVAL); } /** @@ -132,90 +162,74 @@ export function fail(workflowId: WorkflowId) { // HANDLER -export async function handleMessage(event: MessageEvent) { - const data = await event.data.text(); - +export async function handleMessage( + data: MaybeResult +) { console.log("Received message from server: ", data); - // Reset ping count on echoed ping or pong from server - if (data === "ping" || data === "pong") { - activeWorkflowStore.update((store) => - store ? { ...store, failedPingCount: 0 } : null - ); - - return; + if (data.error) { + throw data.error; } - const message = JSON.parse(data); - if (message.receipt !== undefined && message.receipt.meta !== undefined) { - const activeWorkflow = getStore(activeWorkflowStore); + const activeWorkflow = getStore(activeWorkflowStore); - if (!activeWorkflow) { - console.error("Received a receipt but workflow was not initiated"); - return; - } + if (!activeWorkflow) { + console.error("Received a receipt but workflow was not initiated"); + return; + } - const taskId = activeWorkflow.step + 1; - const status = message.metadata.replayed ? "replayed" : "executed"; - const receipt = parseReceipt(message.receipt); + const taskId = activeWorkflow.step + 1; + const status = data.result.metadata.replayed ? "replayed" : "executed"; + const receipt = parseReceipt(data.result.receipt); - // Update task in UI - taskStore.update((store) => { - const updatedTasks = store[activeWorkflow.id].map((t) => - t.id === taskId - ? { + // Update task in UI + taskStore.update((store) => { + const updatedTasks = store[activeWorkflow.id].map((t) => + t.id === taskId + ? { ...t, status, message: getTaskMessage(status), receipt, } - : t - ); + : t + ); - return { ...store, [activeWorkflow.id]: updatedTasks }; - }); + return { ...store, [activeWorkflow.id]: updatedTasks }; + }); - // Log receipt - console.table(receipt); + // Log receipt + console.table(receipt); - if (activeWorkflow.step === activeWorkflow.tasks.length - 1) { - // Workflow is done. Reset workflow status to waiting. - workflowStore.update((workflows) => ({ - ...workflows, - [activeWorkflow.id]: { - ...workflows[activeWorkflow.id], - status: "waiting", - }, - })); + if (activeWorkflow.step === activeWorkflow.tasks.length - 1) { + // Workflow is done. Reset workflow status to waiting. + workflowStore.update((workflows) => ({ + ...workflows, + [activeWorkflow.id]: { + ...workflows[activeWorkflow.id], + status: "waiting", + }, + })); - // Deactivate workflow - activeWorkflowStore.set(null); - } else { - // Increment workflow step - activeWorkflowStore.update((store) => - store ? { ...store, step: store.step + 1 } : null - ); - } + // Deactivate workflow + activeWorkflowStore.set(null); } else { - console.warn("Received an unexpected message", message); + // Increment workflow step + activeWorkflowStore.update((store) => + store ? { ...store, step: store.step + 1 } : null + ); } } -function parseReceipt(raw: { - iss: string | null; - meta: Meta | null; - out: ["ok" | "error", Record<"/", Record<"bytes", string>>]; - prf: string[]; - ran: Record<"/", string>; -}): Receipt { +const parseReceipt = (raw: RawReceipt): Receipt => { return { - iss: raw.iss, - meta: raw.meta, - out: [raw.out[0], raw.out[1]["/"].bytes], - prf: raw.prf, - ran: raw.ran["/"], + iss: raw.iss ?? null, + meta: raw.meta as Meta, + out: [raw.out[0], base64.encode(raw.out[1])], + prf: raw.prf.map(toString), + ran: raw.ran.toString(), }; -} +}; function getTaskMessage(status: TaskStatus) { switch (status) { @@ -233,226 +247,77 @@ function getTaskMessage(status: TaskStatus) { } } -// JSON WORKFLOWS - -export const workflowOneJson = { - tasks: [ - { - cause: null, - meta: { - memory: 4294967296, - time: 100000, - }, - prf: [], - run: { - input: { - args: [ - { - "/": "bafybeiejevluvtoevgk66plh5t6xiy3ikyuuxg3vgofuvpeckb6eadresm", - }, - 150, - 350, - 500, - 500, - ], - func: "crop", +// WORKFLOWS + +export const workflowOnePromised = WorkflowBuilder.workflow({ + name: "one", + workflow: { + tasks: [ + WorkflowBuilder.crop({ + name: "crop", + resource: + "ipfs://bafybeig6u35v6t3f4j3zgz2jvj4erd45fbkeolioaddu3lmu6uxm3ilb7a", + args: { + data: "{{ cid:bafybeiejevluvtoevgk66plh5t6xiy3ikyuuxg3vgofuvpeckb6eadresm }}", + x: 150, + y: 350, + height: 500, + width: 500, }, - nnc: "", - op: "wasm/run", - rsc: "ipfs://bafybeiabbxwf2vn4j3zm7bbojr6rt6k7o6cg6xcbhqkllubmsnvocpv7y4", - }, - }, - { - cause: null, - meta: { - memory: 4294967296, - time: 100000, - }, - prf: [], - run: { - input: { - args: [ - { - "await/ok": { - "/": "bafyrmibalyvlsj3zo2vdgjmvawdszh546jz53ejhud7u6lkmpg6mcvq5ja", - }, - }, - ], - func: "rotate90", + }), + WorkflowBuilder.rotate90({ + name: "rotate90", + resource: + "ipfs://bafybeig6u35v6t3f4j3zgz2jvj4erd45fbkeolioaddu3lmu6uxm3ilb7a", + args: { + data: "{{needs.crop.output}}", }, - nnc: "", - op: "wasm/run", - rsc: "ipfs://bafybeiabbxwf2vn4j3zm7bbojr6rt6k7o6cg6xcbhqkllubmsnvocpv7y4", - }, - }, - { - cause: null, - meta: { - memory: 4294967296, - time: 100000, - }, - prf: [], - run: { - input: { - args: [ - { - "await/ok": { - "/": "bafyrmiepk7vo7qcndopjhuywtv6bly7ojd6gzmwlqm4uzsingbyxluytny", - }, - }, - 20.2, - ], - func: "blur", + }), + WorkflowBuilder.blur({ + name: "blur", + resource: + "ipfs://bafybeig6u35v6t3f4j3zgz2jvj4erd45fbkeolioaddu3lmu6uxm3ilb7a", + args: { + data: "{{needs.rotate90.output}}", + sigma: 20.2, }, - nnc: "", - op: "wasm/run", - rsc: "ipfs://bafybeiabbxwf2vn4j3zm7bbojr6rt6k7o6cg6xcbhqkllubmsnvocpv7y4", - }, - }, - ], -}; - -export const workflowTwoJson = { - tasks: [ - { - cause: null, - meta: { - memory: 4294967296, - time: 100000, - }, - prf: [], - run: { - input: { - args: [ - { - "/": "bafybeiejevluvtoevgk66plh5t6xiy3ikyuuxg3vgofuvpeckb6eadresm", - }, - 150, - 350, - 500, - 500, - ], - func: "crop", + }), + ], + }, +}); + +export const workflowTwoPromised = WorkflowBuilder.workflow({ + name: "two", + workflow: { + tasks: [ + WorkflowBuilder.crop({ + name: "crop", + resource: + "ipfs://bafybeig6u35v6t3f4j3zgz2jvj4erd45fbkeolioaddu3lmu6uxm3ilb7a", + args: { + data: "{{ cid:bafybeiejevluvtoevgk66plh5t6xiy3ikyuuxg3vgofuvpeckb6eadresm }}", + x: 150, + y: 350, + height: 500, + width: 500, }, - nnc: "", - op: "wasm/run", - rsc: "ipfs://bafybeiabbxwf2vn4j3zm7bbojr6rt6k7o6cg6xcbhqkllubmsnvocpv7y4", - }, - }, - { - cause: null, - meta: { - memory: 4294967296, - time: 100000, - }, - prf: [], - run: { - input: { - args: [ - { - "await/ok": { - "/": "bafyrmibalyvlsj3zo2vdgjmvawdszh546jz53ejhud7u6lkmpg6mcvq5ja", - }, - }, - ], - func: "rotate90", + }), + WorkflowBuilder.rotate90({ + name: "rotate90", + resource: + "ipfs://bafybeig6u35v6t3f4j3zgz2jvj4erd45fbkeolioaddu3lmu6uxm3ilb7a", + args: { + data: "{{needs.crop.output}}", }, - nnc: "", - op: "wasm/run", - rsc: "ipfs://bafybeiabbxwf2vn4j3zm7bbojr6rt6k7o6cg6xcbhqkllubmsnvocpv7y4", - }, - }, - { - cause: null, - meta: { - memory: 4294967296, - time: 100000, - }, - prf: [], - run: { - input: { - args: [ - { - "await/ok": { - "/": "bafyrmiepk7vo7qcndopjhuywtv6bly7ojd6gzmwlqm4uzsingbyxluytny", - }, - }, - ], - func: "grayscale", + }), + WorkflowBuilder.grayscale({ + name: "grayscale", + resource: + "ipfs://bafybeig6u35v6t3f4j3zgz2jvj4erd45fbkeolioaddu3lmu6uxm3ilb7a", + args: { + data: "{{needs.rotate90.output}}", }, - nnc: "", - op: "wasm/run", - rsc: "ipfs://bafybeiabbxwf2vn4j3zm7bbojr6rt6k7o6cg6xcbhqkllubmsnvocpv7y4", - }, - }, - ], -}; - -// EMULATION - -function emulate(workflowId: string, channel: Maybe) { - if (!channel) { - console.error("Cannot emulate. Channel has not been set."); - return; - } - - if (workflowId === "one") { - Promise.resolve() - .then(() => sendEmulated("executed", "one", "crop", channel, 500)) - .then(() => sendEmulated("executed", "one", "rotate90", channel, 1500)) - .then(() => sendEmulated("executed", "one", "blur", channel, 20000)); - } else if (workflowId === "two") { - Promise.resolve() - .then(() => sendEmulated("replayed", "two", "crop", channel, 200)) - .then(() => sendEmulated("replayed", "two", "rotate90", channel, 200)) - .then(() => sendEmulated("executed", "two", "grayscale", channel, 1500)); - } -} - -function sendEmulated( - status: TaskStatus, - workflowId: string, - op: TaskOperation, - channel: Channel, - delay: number -) { - return new Promise((resolve) => { - setTimeout(() => { - const message = JSON.stringify(sampleReceipt(status, workflowId, op)); - - channel.send(message); - - resolve(null); - }, delay); - }); -} - -function sampleReceipt( - status: TaskStatus, - workflowId: string, - op: TaskOperation -) { - const base64Cat = getStore(base64CatStore) - - return { - metadata: { - name: workflowId, - replayed: status == "executed" ? false : true, - receipt_cid: { - "/": "bafyrmiczrugtx6jj42qbwd2ctlmj766th2nwzfsqmvathjdxk63rwkkvpi", - }, - }, - receipt: { - iss: null, - meta: { - op: op, - workflow: "bafyrmiczrugtx6jj42qbwd2ctlmj766th2nwzfsqmvathjdxk63rwkkvpd", - }, - out: ["ok", { "/": { bytes: `${base64Cat}` } }], - prf: [], - ran: { - "/": "bafkr4ickinozehpaz72vtgpbhhqpf6v2fi67rvr6uis52bwsesoss6vinq", - }, - }, - }; -} + }), + ], + }, +}); diff --git a/examples/websocket-relay/relay-app/src/routes/+page.svelte b/examples/websocket-relay/relay-app/src/routes/+page.svelte index f8b54fa7..4f96b10c 100644 --- a/examples/websocket-relay/relay-app/src/routes/+page.svelte +++ b/examples/websocket-relay/relay-app/src/routes/+page.svelte @@ -2,8 +2,7 @@ import { onDestroy } from "svelte"; import { Svelvet } from "svelvet"; - import { connect } from "$lib/channel"; - import { base64CatStore, nodeStore, taskStore } from "../stores"; + import { base64CatStore, nodeStore } from "../stores"; import Controls from "$components/Controls.svelte"; import Header from "$components/Header.svelte"; import WorkflowDetail from "$components/WorkflowDetail.svelte"; @@ -37,9 +36,6 @@ // Set spacecat unmodified image const fetchCat = initializeSpaceCat(); - // Connect to websocket server - connect(); - onDestroy(() => { unsubscribeNodeStore(); }); diff --git a/examples/websocket-relay/relay-app/src/stores.ts b/examples/websocket-relay/relay-app/src/stores.ts index 78b1a158..e5d483fd 100644 --- a/examples/websocket-relay/relay-app/src/stores.ts +++ b/examples/websocket-relay/relay-app/src/stores.ts @@ -1,16 +1,26 @@ -import { derived, writable } from "svelte/store"; -import type { Readable, Writable } from "svelte/store"; +import { + derived, + readable, + writable, + type Readable, + type Writable, +} from "svelte/store"; +import { Homestar } from "@fission-codes/homestar"; import type { NodeProps } from "svelvet"; +import { WebsocketTransport } from "@fission-codes/homestar/transports/ws.js"; -import type { Channel } from "$lib/channel"; import type { Workflow, WorkflowId, WorkflowState } from "$lib/workflow"; import type { Maybe } from "$lib"; import type { Task } from "$lib/task"; -// Initialized in +page.svelte -export const base64CatStore: Writable = writable("") +export const homestarStore: Readable = readable( + new Homestar({ + transport: new WebsocketTransport(import.meta.env.VITE_WEBSOCKET_ENDPOINT), + }) +); -export const channelStore: Writable> = writable(null); +// Initialized in +page.svelte +export const base64CatStore: Writable = writable(""); export const workflowStore: Writable> = writable({ one: { @@ -26,7 +36,8 @@ export const workflowStore: Writable> = writable({ export const activeWorkflowStore: Writable> = writable(null); -export const firstWorkflowToRunStore: Writable<'one' | 'two'| null> = writable(null) +export const firstWorkflowToRunStore: Writable<"one" | "two" | null> = + writable(null); export const taskStore: Writable> = writable({ one: [ @@ -84,13 +95,13 @@ export const taskStore: Writable> = writable({ }); const NODE_TASK_MAP = { - '2': 'crop', - '3': 'rotate90', - '4': 'blur', - '5': 'crop', - '6': 'rotate90', - '7': 'grayscale', -} + "2": "crop", + "3": "rotate90", + "4": "blur", + "5": "crop", + "6": "rotate90", + "7": "grayscale", +}; export const nodeStore: Readable = derived( [firstWorkflowToRunStore, taskStore], ($stores) => { diff --git a/examples/websocket-relay/src/main.rs b/examples/websocket-relay/src/main.rs index e3909584..b17d847a 100644 --- a/examples/websocket-relay/src/main.rs +++ b/examples/websocket-relay/src/main.rs @@ -10,24 +10,17 @@ use tracing::info; fn main() -> Result<()> { let settings = Settings::load().expect("runtime settings to be loaded"); - let _guard = Logger::init(settings.monitoring()); + let _guard = Logger::init(settings.node().monitoring()); // Just for example purposes, we're going to start the ipfs // daemon. Typically, these would be started separately. let ipfs_daemon = ipfs_setup(); - info!( - subject = "settings", - category = "homestar_init", - "starting with settings: {:?}", - settings, - ); + info!("starting with settings: {:?}", settings,); let db = Db::setup_connection_pool(settings.node(), None).expect("to setup database pool"); info!( - subject = "database", - category = "homestar_init", "starting with database: {}", Db::url().expect("database url to be provided"), ); @@ -36,7 +29,6 @@ fn main() -> Result<()> { Runner::start(settings, db).expect("Failed to start runtime"); // ipfs cleanup after runtime is stopped - if let Some(mut ipfs_daemon) = ipfs_daemon { match ipfs_daemon.try_wait() { Ok(Some(status)) => info!("exited with: {status}"), diff --git a/flake.lock b/flake.lock index 76142117..685d0257 100644 --- a/flake.lock +++ b/flake.lock @@ -36,11 +36,11 @@ }, "nixpkgs": { "locked": { - "lastModified": 1699186365, - "narHash": "sha256-Pxrw5U8mBsL3NlrJ6q1KK1crzvSUcdfwb9083sKDrcU=", + "lastModified": 1701080425, + "narHash": "sha256-QUaQPXLMsgIWxY2JsbK2TlqKHtcbhf9BGpmn4ilAkrI=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "a0b3b06b7a82c965ae0bb1d59f6e386fe755001d", + "rev": "21ea9c80732bc4168ed38c5c2f1f4df37c57a6dd", "type": "github" }, "original": { @@ -68,11 +68,11 @@ ] }, "locked": { - "lastModified": 1699236891, - "narHash": "sha256-J0uhoYlufJncIFbM/pAoggzHK/qERB9KfQRkmYD56yo=", + "lastModified": 1701224160, + "narHash": "sha256-qnMmxNMKmd6Soel0cfauyMJ+LzuZbvmiDQPSIuTbQ+M=", "owner": "oxalica", "repo": "rust-overlay", - "rev": "a7f9bf91dc5065d470cd57169a9f2ebdbdfe1f24", + "rev": "4a080e26d55eaedb95ab1bf8eeaeb84149c10f12", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index f4fd8bb3..b7795159 100644 --- a/flake.nix +++ b/flake.nix @@ -57,11 +57,11 @@ cargo-expand cargo-nextest cargo-sort - cargo-spellcheck cargo-unused-features cargo-udeps cargo-watch rustup + tokio-console twiggy wasm-tools ]; @@ -100,7 +100,7 @@ devRunServer = pkgs.writeScriptBin "cargo-run-dev" '' #!${pkgs.stdenv.shell} - cargo run --no-default-features --features dev -- start -c homestar-runtime/config/settings.toml + cargo run --no-default-features --features dev -- start ''; doc = pkgs.writeScriptBin "doc" '' @@ -134,7 +134,7 @@ xFuncNoDefault = cmd: pkgs.writeScriptBin "x-${cmd}-0" '' #!${pkgs.stdenv.shell} - cargo watch -c -s "cargo ${cmd} --workspace --no-default-features" + cargo watch -c -s "cargo ${cmd} --no-default-features" ''; xFuncPackage = cmd: crate: @@ -145,19 +145,19 @@ xFuncTest = pkgs.writeScriptBin "x-test" '' #!${pkgs.stdenv.shell} - cargo watch -c -s "cargo nextest run --workspace --nocapture && cargo test --doc" + cargo watch -c -s "cargo nextest run --workspace --no-capture && cargo test --doc" ''; xFuncTestAll = pkgs.writeScriptBin "x-test-all" '' #!${pkgs.stdenv.shell} - cargo watch -c -s "cargo nextest run --workspace --all-features --nocapture \ + cargo watch -c -s "cargo nextest run --workspace --all-features --no-capture \ && cargo test --workspace --doc --all-features" ''; xFuncTestNoDefault = pkgs.writeScriptBin "x-test-0" '' #!${pkgs.stdenv.shell} - cargo watch -c -s "cargo nextest run --workspace --no-default-features --nocapture \ - && cargo test --workspace --doc --no-default-features" + cargo watch -c -s "cargo nextest run --no-default-features --no-capture \ + && cargo test --doc --no-default-features" ''; xFuncTestPackage = crate: @@ -175,14 +175,14 @@ nxTestAll = pkgs.writeScriptBin "nx-test-all" '' #!${pkgs.stdenv.shell} - cargo nextest run --workspace --all-features --nocapture + cargo nextest run --workspace --all-features --no-capture cargo test --workspace --doc --all-features ''; nxTestNoDefault = pkgs.writeScriptBin "nx-test-0" '' #!${pkgs.stdenv.shell} - cargo nextest run --workspace --no-default-features --nocapture - cargo test --workspace --doc --no-default-features + cargo nextest run --no-default-features --no-capture + cargo test --doc --no-default-features ''; wasmTest = pkgs.writeScriptBin "wasm-ex-test" '' diff --git a/homestar-core/src/test_utils/ports.rs b/homestar-core/src/test_utils/ports.rs index 362c2868..f817b9b9 100644 --- a/homestar-core/src/test_utils/ports.rs +++ b/homestar-core/src/test_utils/ports.rs @@ -9,6 +9,6 @@ static PORTS: OnceCell = OnceCell::new(); /// Return a unique port(in runtime) for test pub fn get_port() -> usize { PORTS - .get_or_init(|| AtomicUsize::new(rand::thread_rng().gen_range(3000..3800))) + .get_or_init(|| AtomicUsize::new(rand::thread_rng().gen_range(2000..6800))) .fetch_add(1, Ordering::Relaxed) } diff --git a/homestar-core/src/test_utils/workflow.rs b/homestar-core/src/test_utils/workflow.rs index d03baf25..2c11f01e 100644 --- a/homestar-core/src/test_utils/workflow.rs +++ b/homestar-core/src/test_utils/workflow.rs @@ -19,13 +19,13 @@ use std::collections::BTreeMap; use url::Url; const RAW: u64 = 0x55; +const WASM_CID: &str = "bafybeig6u35v6t3f4j3zgz2jvj4erd45fbkeolioaddu3lmu6uxm3ilb7a"; type NonceBytes = Vec; /// Return a `mocked` `wasm/run` [Instruction]. pub fn wasm_instruction<'a, T>() -> Instruction<'a, T> { - let wasm = "bafybeihzvrlcfqf6ffbp2juhuakspxj2bdsc54cabxnuxfvuqy5lvfxapy".to_string(); - let resource = Url::parse(format!("ipfs://{wasm}").as_str()).unwrap(); + let resource = Url::parse(format!("ipfs://{WASM_CID}").as_str()).unwrap(); Instruction::new( resource, @@ -45,8 +45,7 @@ where Ipld: From, T: Clone, { - let wasm = "bafybeihzvrlcfqf6ffbp2juhuakspxj2bdsc54cabxnuxfvuqy5lvfxapy".to_string(); - let resource = Url::parse(format!("ipfs://{wasm}").as_str()).unwrap(); + let resource = Url::parse(format!("ipfs://{WASM_CID}").as_str()).unwrap(); let instr = Instruction::new( resource.clone(), @@ -100,8 +99,7 @@ where /// Return a `mocked` `wasm/run` [Instruction], along with it's [Nonce] as bytes. pub fn wasm_instruction_with_nonce<'a, T>() -> (Instruction<'a, T>, NonceBytes) { - let wasm = "bafybeihzvrlcfqf6ffbp2juhuakspxj2bdsc54cabxnuxfvuqy5lvfxapy".to_string(); - let resource = Url::parse(format!("ipfs://{wasm}").as_str()).unwrap(); + let resource = Url::parse(format!("ipfs://{WASM_CID}").as_str()).unwrap(); let nonce = Nonce::generate(); ( @@ -120,8 +118,7 @@ pub fn wasm_instruction_with_nonce<'a, T>() -> (Instruction<'a, T>, NonceBytes) /// Return a `mocked` [Instruction]. pub fn instruction<'a, T>() -> Instruction<'a, T> { - let wasm = "bafybeihzvrlcfqf6ffbp2juhuakspxj2bdsc54cabxnuxfvuqy5lvfxapy".to_string(); - let resource = Url::parse(format!("ipfs://{wasm}").as_str()).unwrap(); + let resource = Url::parse(format!("ipfs://{WASM_CID}").as_str()).unwrap(); Instruction::new( resource, @@ -132,8 +129,7 @@ pub fn instruction<'a, T>() -> Instruction<'a, T> { /// Return a `mocked` [Instruction], along with it's [Nonce] as bytes. pub fn instruction_with_nonce<'a, T>() -> (Instruction<'a, T>, NonceBytes) { - let wasm = "bafybeihzvrlcfqf6ffbp2juhuakspxj2bdsc54cabxnuxfvuqy5lvfxapy".to_string(); - let resource = Url::parse(format!("ipfs://{wasm}").as_str()).unwrap(); + let resource = Url::parse(format!("ipfs://{WASM_CID}").as_str()).unwrap(); let nonce = Nonce::generate(); ( diff --git a/homestar-core/src/workflow/input/parse.rs b/homestar-core/src/workflow/input/parse.rs index acf189ae..8e73858b 100644 --- a/homestar-core/src/workflow/input/parse.rs +++ b/homestar-core/src/workflow/input/parse.rs @@ -64,7 +64,7 @@ impl From> for Args { /// use libipld::Ipld; /// use url::Url; /// -/// let wasm = "bafybeihzvrlcfqf6ffbp2juhuakspxj2bdsc54cabxnuxfvuqy5lvfxapy".to_string(); +/// let wasm = "bafkreihxcyjgyrz437ewzi7md55uqt2zf6yr3zn7xrfi4orc34xdc5jgrm".to_string(); /// let resource = Url::parse(format!("ipfs://{wasm}").as_str()).unwrap(); /// /// let inst = Instruction::unique( diff --git a/homestar-core/src/workflow/instruction.rs b/homestar-core/src/workflow/instruction.rs index 7aed2649..3c9c18d8 100644 --- a/homestar-core/src/workflow/instruction.rs +++ b/homestar-core/src/workflow/instruction.rs @@ -132,7 +132,7 @@ where /// use libipld::Ipld; /// use url::Url; /// -/// let wasm = "bafybeihzvrlcfqf6ffbp2juhuakspxj2bdsc54cabxnuxfvuqy5lvfxapy".to_string(); +/// let wasm = "bafkreihxcyjgyrz437ewzi7md55uqt2zf6yr3zn7xrfi4orc34xdc5jgrm".to_string(); /// let resource = Url::parse(format!("ipfs://{wasm}").as_str()).unwrap(); /// /// let instr = Instruction::unique( @@ -154,7 +154,7 @@ where /// use libipld::{cid::{multihash::{Code, MultihashDigest}, Cid}, Ipld, Link}; /// use url::Url; -/// let wasm = "bafybeihzvrlcfqf6ffbp2juhuakspxj2bdsc54cabxnuxfvuqy5lvfxapy".to_string(); +/// let wasm = "bafkreihxcyjgyrz437ewzi7md55uqt2zf6yr3zn7xrfi4orc34xdc5jgrm".to_string(); /// let resource = Url::parse(format!("ipfs://{wasm}").as_str()).expect("IPFS URL"); /// let h = Code::Blake3_256.digest(b"beep boop"); /// let cid = Cid::new_v1(0x55, h); @@ -317,7 +317,13 @@ impl<'a, T> DagCbor for Instruction<'a, T> where Ipld: From {} #[cfg(test)] mod test { use super::*; - use crate::{test_utils, Unit}; + use crate::{test_utils, Unit, DAG_CBOR}; + use libipld::{ + cbor::DagCborCodec, + multihash::{Code, MultihashDigest}, + prelude::Codec, + Cid, + }; #[test] fn ipld_roundtrip() { @@ -330,7 +336,7 @@ mod test { ( RESOURCE_KEY.into(), Ipld::String( - "ipfs://bafybeihzvrlcfqf6ffbp2juhuakspxj2bdsc54cabxnuxfvuqy5lvfxapy".into() + "ipfs://bafybeig6u35v6t3f4j3zgz2jvj4erd45fbkeolioaddu3lmu6uxm3ilb7a".into() ) ), (OP_KEY.into(), Ipld::String("ipld/fun".to_string())), @@ -341,6 +347,28 @@ mod test { assert_eq!(instruction, ipld.try_into().unwrap()) } + #[test] + fn ipld_cid_trials() { + let a_cid = + Cid::try_from("bafyrmiev5j2jzjrqncbfqo6pbraiw7r2p527m4z3bbm6ir3o5kdz2zwcjy").unwrap(); + let ipld = libipld::ipld!({"input": + { + "args": [{"await/ok": a_cid}, "111111"], + "func": "join-strings" + }, + "nnc": "", "op": "wasm/run", + "rsc": "ipfs://bafybeig6u35v6t3f4j3zgz2jvj4erd45fbkeolioaddu3lmu6uxm3ilb7a"}); + + let instruction = Instruction::::try_from(ipld.clone()).unwrap(); + let instr_cid = instruction.to_cid().unwrap(); + + let bytes = DagCborCodec.encode(&ipld).unwrap(); + let hash = Code::Sha3_256.digest(&bytes); + let ipld_to_cid = Cid::new_v1(DAG_CBOR, hash); + + assert_eq!(ipld_to_cid, instr_cid); + } + #[test] fn ser_de() { let (instruction, _bytes) = test_utils::workflow::instruction_with_nonce::(); diff --git a/homestar-core/src/workflow/pointer.rs b/homestar-core/src/workflow/pointer.rs index 973d876a..33c227ed 100644 --- a/homestar-core/src/workflow/pointer.rs +++ b/homestar-core/src/workflow/pointer.rs @@ -18,11 +18,7 @@ use diesel::{ AsExpression, FromSqlRow, }; use enum_assoc::Assoc; -use libipld::{ - cid::{multibase::Base, Cid}, - serde::from_ipld, - Ipld, Link, -}; +use libipld::{cid::Cid, serde::from_ipld, Ipld, Link}; use serde::{Deserialize, Serialize}; use std::{borrow::Cow, collections::btree_map::BTreeMap, fmt, str::FromStr}; @@ -183,11 +179,7 @@ pub struct Pointer(Cid); impl fmt::Display for Pointer { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let cid_as_string = self - .0 - .to_string_of_base(Base::Base32Lower) - .map_err(|_| fmt::Error)?; - + let cid_as_string = self.0.to_string(); write!(f, "{cid_as_string}") } } diff --git a/homestar-core/src/workflow/task.rs b/homestar-core/src/workflow/task.rs index e55e70fd..02b35255 100644 --- a/homestar-core/src/workflow/task.rs +++ b/homestar-core/src/workflow/task.rs @@ -190,7 +190,7 @@ mod test { ( "rsc".into(), Ipld::String( - "ipfs://bafybeihzvrlcfqf6ffbp2juhuakspxj2bdsc54cabxnuxfvuqy5lvfxapy".into(), + "ipfs://bafybeig6u35v6t3f4j3zgz2jvj4erd45fbkeolioaddu3lmu6uxm3ilb7a".into(), ), ), ("op".into(), Ipld::String("ipld/fun".to_string())), diff --git a/homestar-functions/README.md b/homestar-functions/README.md index 5ad4c1a1..4c030e1f 100644 --- a/homestar-functions/README.md +++ b/homestar-functions/README.md @@ -51,9 +51,8 @@ cd test && cargo build --target wasm32-unknown-unknown --profile release-wasm-fn cargo build -p homestar-functions-test --target wasm32-unknown-unknown --profile release-wasm-fn ``` -Guest Wasm modules will be generated within the -`../target/wasm32-unknown-unknown/release` directory, e.g. -`../target/wasm32-unknown-unknown/release-wasm-fn/homestar_functions_test.wasm`. +Guest Wasm modules will be generated in the top-level `homestar` directory: +`./target/wasm32-unknown-unknown/release-wasm-fn/homestar_functions_test.wasm`. Sadly, this module is **not yet** an actual `component`. But, we can leverage the [wasm-tools][wasm-tools] tooling ([wit-component][wit-component] in @@ -99,7 +98,7 @@ conditions. [kv-demo]: https://github.com/Mossaka/keyvalue-component-model-demo [spiderlightning]: https://github.com/deislabs/spiderlightning [wasi]: https://github.com/WebAssembly/WASI -[wasm32]: https://doc.rust-lang.org/rustc/platform-support/wasm64-unknown-unknown.html +[wasm32]: https://rustwasm.github.io/docs/wasm-pack/prerequisites/non-rustup-setups.html#manually-add-wasm32-unknown-unknown [wasmtime]: https://github.com/bytecodealliance/wasmtime [wasm-tools]: https://github.com/bytecodealliance/wasm-tools [wit-bindgen]: https://github.com/bytecodealliance/wit-bindgen diff --git a/homestar-functions/add/src/lib.rs b/homestar-functions/add/src/lib.rs index 2c67e6a1..18bdd1b6 100644 --- a/homestar-functions/add/src/lib.rs +++ b/homestar-functions/add/src/lib.rs @@ -8,6 +8,10 @@ wit_bindgen::generate!({ pub struct Component; impl Guest for Component { + fn add_one(input: i32) -> i32 { + input + 1 + } + fn add_two(input: i32) -> i32 { input + 2 } diff --git a/homestar-functions/add/wit/host.wit b/homestar-functions/add/wit/host.wit index 27654fc1..0c2d6d2e 100644 --- a/homestar-functions/add/wit/host.wit +++ b/homestar-functions/add/wit/host.wit @@ -1,5 +1,6 @@ package homestar-functions:add world add { + export add-one: func(input: s32) -> s32 export add-two: func(input: s32) -> s32 } diff --git a/homestar-functions/test/Cargo.toml b/homestar-functions/test/Cargo.toml index 5b01ff9a..6770b04a 100644 --- a/homestar-functions/test/Cargo.toml +++ b/homestar-functions/test/Cargo.toml @@ -6,6 +6,7 @@ edition = { workspace = true } rust-version = { workspace = true } [dependencies] +base64 = "0.21" image = { version = "0.24", default-features = false, features = ["png"] } wit-bindgen = "0.13" diff --git a/homestar-functions/test/src/lib.rs b/homestar-functions/test/src/lib.rs index 6abe64d6..b4332144 100644 --- a/homestar-functions/test/src/lib.rs +++ b/homestar-functions/test/src/lib.rs @@ -5,7 +5,12 @@ wit_bindgen::generate!({ } }); -use std::io::Cursor; +use base64::{engine::general_purpose, Engine}; +use std::{ + collections::hash_map::DefaultHasher, + hash::{Hash, Hasher}, + io::Cursor, +}; pub struct Component; @@ -46,6 +51,14 @@ impl Guest for Component { buffer } + fn blur_base64(data: String, sigma: f32) -> Vec { + let base64_encoded_png = data.replace("data:image/png;base64,", ""); + let decoded = general_purpose::STANDARD + .decode(base64_encoded_png) + .unwrap(); + Self::blur(decoded, sigma) + } + fn crop(data: Vec, x: u32, y: u32, target_width: u32, target_height: u32) -> Vec { let mut img = image::load_from_memory_with_format(&data, image::ImageFormat::Png).unwrap(); @@ -60,6 +73,14 @@ impl Guest for Component { buffer } + fn crop_base64(data: String, x: u32, y: u32, target_width: u32, target_height: u32) -> Vec { + let base64_encoded_png = data.replace("data:image/png;base64,", ""); + let decoded = general_purpose::STANDARD + .decode(base64_encoded_png) + .unwrap(); + Self::crop(decoded, x, y, target_width, target_height) + } + fn grayscale(data: Vec) -> Vec { let img = image::load_from_memory_with_format(&data, image::ImageFormat::Png).unwrap(); let gray = img.grayscale(); @@ -71,9 +92,16 @@ impl Guest for Component { buffer } + fn grayscale_base64(data: String) -> Vec { + let base64_encoded_png = data.replace("data:image/png;base64,", ""); + let decoded = general_purpose::STANDARD + .decode(base64_encoded_png) + .unwrap(); + Self::grayscale(decoded) + } + fn rotate90(data: Vec) -> Vec { let img = image::load_from_memory_with_format(&data, image::ImageFormat::Png).unwrap(); - let rotated = img.rotate90(); let mut buffer: Vec = Vec::new(); @@ -83,6 +111,20 @@ impl Guest for Component { buffer } + + fn rotate90_base64(data: String) -> Vec { + let base64_encoded_png = data.replace("data:image/png;base64,", ""); + let decoded = general_purpose::STANDARD + .decode(base64_encoded_png) + .unwrap(); + Self::rotate90(decoded) + } + + fn hash(s: String) -> Vec { + let mut hash = DefaultHasher::new(); + s.hash(&mut hash); + hash.finish().to_be_bytes().to_vec() + } } #[cfg(test)] @@ -214,4 +256,15 @@ mod test_mod { let cropped = Component::crop(gray, 150, 350, 400, 400); Component::blur(cropped, 0.1); } + + #[cfg(feature = "run-image-tests")] + #[test] + fn mixed_base64() { + let img_uri = r#"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAYAAACNiR0NAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAG9SURBVHgBrVRLTgJBEK3qaYhLtiZGxxMoN8CdISTiCYSEmLgSTkA8AXFFAgv0BkNMDDu4gXAC2vhhy9IIU2X1CGb4T5CXzCRd3f26Xv0QBOna+ykiVJjBha2A3XhMlbz8vsFsdeCOtP/CAAn4H4bAcKbGMarsgMwiYVUqIj6FHYERXBU2oHV7BYI95HsH6VKk5eUzkw0TPqfDCwK400iGWDXmw+BrJ9mSoE/X59VBZ2/vazjy4xIyzk3tat6Tp8Kh54+d5J8HgRZuhsksWjf7xssfD5npNaxsXvLV9PDz9cGxlSaB7sopA0uQbfQlEeoorAalBvvC5E4IO1KLj0L2ABGQqb+lCLAd8sgsSI5KFtxHXii3GUJxPZWuf5QhIgici7WEwavAKSsFNsB2mCQru5HQFqfW2sAGSLveLuuwBULR7X77fluSlYMVyNQ+LVlx2Z6ec8+TXzOunY5XmK07C1smo3GsTEDFFW/Nls2vBYwtH/G0R9I1gYlUAh04kSzk1g4SuasXjCJZLuWCfVbTg8AEkaAQl3fBViDuKemM0ropExWWg2K6iHYhk8NVMmhF2FazUUiMhKQkXdb9AfsesrssluqmAAAAAElFTkSuQmCC"#; + // Call component to rotate the image 90 deg clockwise + let rotated = Component::rotate90_base64(img_uri.into()); + let gray = Component::grayscale(rotated); + let cropped = Component::crop(gray, 10, 10, 50, 50); + Component::blur(cropped, 0.1); + } } diff --git a/homestar-functions/test/wit/host.wit b/homestar-functions/test/wit/host.wit index e2fe58bc..87cdb521 100644 --- a/homestar-functions/test/wit/host.wit +++ b/homestar-functions/test/wit/host.wit @@ -6,7 +6,12 @@ world test { export join-strings: func(a: string, b: string) -> string export transpose: func(matrix: list>) -> list> export blur: func(data: list, sigma: float32) -> list + export blur-base64: func(data: string, sigma: float32) -> list export crop: func(data: list, x: u32, y: u32, target-width: u32, target-height: u32) -> list + export crop-base64: func(data: string, x: u32, y: u32, target-width: u32, target-height: u32) -> list export grayscale: func(data: list) -> list + export grayscale-base64: func(data: string) -> list export rotate90: func(data: list) -> list + export rotate90-base64: func(data: string) -> list + export hash: func(data: string) -> list } diff --git a/homestar-runtime/Cargo.toml b/homestar-runtime/Cargo.toml index 59f5aa20..5588f3d6 100644 --- a/homestar-runtime/Cargo.toml +++ b/homestar-runtime/Cargo.toml @@ -32,6 +32,7 @@ bench = false [[test]] name = "integration" path = "tests/main.rs" +required-features = ["test-utils"] [dependencies] # return to version.workspace = true after the following issue is fixed: @@ -39,10 +40,6 @@ path = "tests/main.rs" anyhow = { workspace = true } async-trait = "0.1" atomic_refcell = { workspace = true } -axum = { version = "0.6", default-features = false, features = [ - "ws", - "headers", -], optional = true } byte-unit = { workspace = true } chrono = { workspace = true } clap = { version = "4.4", default-features = false, features = [ @@ -51,11 +48,13 @@ clap = { version = "4.4", default-features = false, features = [ "help", "env", "std", + "usage", ] } config = { version = "0.13", default-features = false, features = ["toml"] } console-subscriber = { version = "0.2", default-features = false, features = [ "parking_lot", ], optional = true } +const_format = "0.2" crossbeam = "0.8" dagga = "0.2" dashmap = "5.5" @@ -68,6 +67,7 @@ diesel = { version = "2.1", default-features = false, features = [ ] } diesel_migrations = "2.1" dotenvy = "0.15" +dyn-clone = "1.0" enum-assoc = { workspace = true } faststr = { workspace = true } flume = { version = "0.11", default-features = false, features = ["async"] } @@ -86,6 +86,9 @@ ipfs-api-backend-hyper = { version = "0.6", default-features = false, features = "with-send-sync", ], optional = true } itertools = { workspace = true } +jsonrpsee = { version = "0.20", default-features = false, features = [ + "server", +] } libipld = { workspace = true } libp2p = { version = "0.52", default-features = false, features = [ "kad", @@ -102,24 +105,29 @@ libp2p = { version = "0.52", default-features = false, features = [ "noise", "cbor", "yamux", + "serde", ] } libsqlite3-sys = { version = "0.26", default-features = false, features = [ "bundled", ] } -metrics = { version = "0.21", default-features = false, optional = true } +maplit = "1.0" +metrics = { version = "0.21", default-features = false } metrics-exporter-prometheus = { version = "0.12.1", default-features = false, features = [ "http-listener", -], optional = true } +] } +metrics-util = "0.15" miette = { version = "5.10", default-features = false, features = ["fancy"] } moka = { version = "0.12.1", default-features = false, features = [ "future", "sync", ] } -names = { version = "0.14", default-features = false, optional = true } +names = { version = "0.14", default-features = false } +once_cell = { version = "1.18", default-features = false } proptest = { version = "1.2", optional = true } puffin = { version = "0.17", default-features = false, optional = true } puffin_egui = { version = "0.23.0", default-features = false, optional = true } rand = { workspace = true } +regex = "1.10" reqwest = { version = "0.11", default-features = false, features = [ "blocking", "json", @@ -128,7 +136,7 @@ sec1 = { version = "0.7", default-features = false, features = ["pem"] } semver = { version = "1.0", default-features = false } serde = { workspace = true } serde_ipld_dagcbor = { workspace = true } -serde_json = { version = "1.0", optional = true, default-features = false, features = [ +serde_json = { version = "1.0", default-features = false, features = [ "raw_value", ] } serde_with = { version = "3.3", default-features = false, features = [ @@ -152,7 +160,20 @@ tokio = { workspace = true } tokio-serde = { version = "0.8", default-features = false, features = [ "messagepack", ] } +tokio-stream = { version = "0.1", default-features = false, features = [ + "sync", +] } tokio-util = { version = "0.7", default-features = false } +tower = { version = "0.4", default-features = false, features = [ + "log", + "timeout", +] } +tower-http = { version = "0.4", default-features = false, features = [ + "trace", + "sensitive-headers", + "catch-panic", + "cors", +] } tracing = { workspace = true } tracing-appender = "0.2" tracing-logfmt = "0.3" @@ -162,6 +183,7 @@ tracing-subscriber = { version = "0.3", default-features = false, features = [ "registry", ] } tryhard = "0.5" +typetag = "0.2" url = "2.4" wnfs-common = "0.1" @@ -175,8 +197,10 @@ homestar-core = { version = "0.1", path = "../homestar-core", features = [ "test-utils", ] } homestar_runtime_proc_macro = { path = "src/test_utils/proc_macro", package = "homestar-runtime-tests-proc-macro" } +jsonrpsee = { version = "0.20", default-features = false, features = [ + "client", +] } nix = { version = "0.27", features = ["signal"] } -once_cell = { version = "1.18", default-features = false } predicates = { version = "3.0", default-features = false } prometheus-parse = "0.2.4" rand = { workspace = true } @@ -186,7 +210,11 @@ serial_test = { version = "2.0", default-features = false, features = [ "file_locks", ] } strip-ansi-escapes = "0.2.0" -tokio-tungstenite = { version = "0.20", default-features = false } +sysinfo = { version = "0.29", default-features = false } +tokio-test = "0.4" +tokio-tungstenite = { version = "0.20", default-features = false, features = [ + "connect", +] } wait-timeout = "0.2" [features] @@ -195,20 +223,18 @@ dev = ["ansi-logs", "ipfs", "monitoring", "websocket-notify"] ansi-logs = ["tracing-logfmt/ansi_logs"] console = ["dep:console-subscriber"] ipfs = ["dep:ipfs-api", "dep:ipfs-api-backend-hyper"] -metrics = ["dep:metrics", "dep:metrics-exporter-prometheus"] -monitoring = ["metrics", "dep:sysinfo"] +monitoring = ["dep:sysinfo"] profile = ["dep:puffin", "dep:puffin_egui"] test-utils = ["dep:proptest"] wasmtime-default = ["homestar-wasm/default"] -websocket-notify = ["websocket-server", "dep:serde_json", "dep:names"] -websocket-server = ["dep:axum"] +websocket-notify = [] [package.metadata.docs.rs] all-features = true # defines the configuration attribute `docsrs` rustdoc-args = ["--cfg", "docsrs"] -[package.metadata.deb] +[package.metadata.deb.variants.x86_64-unknown-linux-musl] maintainer = "James Walker " license-file = ["LICENSE", "0"] extended-description-file = "README.md" @@ -238,10 +264,48 @@ assets = [ ], ] -[package.metadata.generate-rpm] +[package.metadata.deb.variants.x86_64-unknown-linux-gnu] +maintainer = "James Walker " +license-file = ["LICENSE", "0"] +extended-description-file = "README.md" +depends = "" +section = "network" +priority = "optional" +assets = [ + [ + "../target/x86_64-unknown-linux-gnu/release/homestar", + "usr/bin/", + "755", + ], + [ + "../CHANGELOG.md", + "usr/share/doc/homestar/", + "644", + ], + [ + "../LICENSE", + "usr/share/doc/homestar/", + "644", + ], + [ + "../README.md", + "usr/share/doc/homestar/", + "644", + ], +] + +[package.metadata.generate-rpm.variants.x86_64-unknown-linux-musl] assets = [ { source = "../target/x86_64-unknown-linux-musl/release/homestar", dest = "/usr/bin/homestar", mode = "755" }, { source = "../CHANGELOG.md", dest = "/usr/share/doc/homestar/CHANGELOG.md", mode = "644", doc = true }, { source = "../LICENSE", dest = "/usr/share/doc/homestar/LICENSE.md", mode = "644", doc = true }, { source = "../README.md", dest = "/usr/share/doc/homestar/README.md", mode = "644", doc = true }, ] + +[package.metadata.generate-rpm.variants.x86_64-unknown-linux-gnu] +assets = [ + { source = "../target/x86_64-unknown-linux-gnu/release/homestar", dest = "/usr/bin/homestar", mode = "755" }, + { source = "../CHANGELOG.md", dest = "/usr/share/doc/homestar/CHANGELOG.md", mode = "644", doc = true }, + { source = "../LICENSE", dest = "/usr/share/doc/homestar/LICENSE.md", mode = "644", doc = true }, + { source = "../README.md", dest = "/usr/share/doc/homestar/README.md", mode = "644", doc = true }, +] diff --git a/homestar-runtime/config/settings.toml b/homestar-runtime/config/settings.toml index 2be540be..291536a1 100644 --- a/homestar-runtime/config/settings.toml +++ b/homestar-runtime/config/settings.toml @@ -1,6 +1,8 @@ -[monitoring] +[node] + +[node.monitoring] process_collector_interval = 5000 -metrics_port = 4000 -console_subscriber_port = 5555 +console_subscriber_port = 6669 -[node] +[node.network.metrics] +port = 4000 diff --git a/homestar-runtime/fixtures/defaults.toml b/homestar-runtime/fixtures/defaults.toml new file mode 100644 index 00000000..29aad8af --- /dev/null +++ b/homestar-runtime/fixtures/defaults.toml @@ -0,0 +1,74 @@ +[node] +gc_interval = 1800 +shutdown_timeout = 20 + +[node.database] +url = "homestar.db" +max_pool_size = 100 + +[node.monitoring] +process_collector_interval = 5000 +console_subscriber_port = 6669 + +[node.network] +events_buffer_len = 1024 +poll_cache_interval = 1000 + +[node.network.ipfs] +host = "127.0.0.1" +port = 5001 + +[node.network.libp2p] +listen_address = "/ip4/0.0.0.0/tcp/0" +node_addresses = [] +announce_addresses = [] +transport_connection_timeout = 60 +max_connected_peers = 32 +max_announce_addresses = 10 + +[node.network.libp2p.mdns] +enable = true +enable_ipv6 = false +query_interval = 300 +ttl = 540 + +[node.network.libp2p.rendezvous] +enable_client = true +enable_server = false +registration_ttl = 7200 +discovery_interval = 600 + +[node.network.libp2p.pubsub] +enable = true +duplication_cache_time = 1 +heartbeat = 60 +idle_timeout = 86400 +max_transmit_size = 10485760 +mesh_n_low = 1 +mesh_n_high = 10 +mesh_n = 2 +mesh_outbound_min = 1 + +[node.network.libp2p.dht] +p2p_provider_timeout = 30 +receipt_quorum = 2 +workflow_quorum = 3 + +[node.network.keypair_config] +random = {} + +[node.network.metrics] +port = 4000 + +[node.network.rpc] +host = "::1" +port = 3030 +max_connections = 10 +server_timeout = 120 + +[node.network.webserver] +host = "127.0.0.1" +port = 1337 +timeout = 120 +websocket_capacity = 2048 +websocket_receiver_timeout = 30000 diff --git a/homestar-runtime/fixtures/settings.toml b/homestar-runtime/fixtures/settings.toml index edf2cf1b..5c11b12c 100644 --- a/homestar-runtime/fixtures/settings.toml +++ b/homestar-runtime/fixtures/settings.toml @@ -2,5 +2,9 @@ [node.network] events_buffer_len = 1000 -websocket_port = 9999 + +[node.network.webserver] +port = 9999 + +[node.network.libp2p] node_addresses = ["/ip4/127.0.0.1/tcp/9998/ws"] diff --git a/homestar-runtime/npm/base/index.js b/homestar-runtime/npm/base/index.js new file mode 100644 index 00000000..65abe3ff --- /dev/null +++ b/homestar-runtime/npm/base/index.js @@ -0,0 +1,42 @@ +#!/usr/bin/env node + +import { execa } from 'execa' +import { createRequire } from 'module' +const require = createRequire(import.meta.url) + +/** + * Returns the executable path which is located inside `node_modules` + * The naming convention is app-${os}-${arch} + * If the platform is `win32` or `cygwin`, executable will include a `.exe` extension. + * @see https://nodejs.org/api/os.html#osarch + * @see https://nodejs.org/api/os.html#osplatform + * @example "x/xx/node_modules/app-darwin-arm64" + */ +function getExePath() { + const arch = process.arch + let os = process.platform + let extension = '' + if (['win32', 'cygwin'].includes(process.platform)) { + os = 'windows' + extension = '.exe' + } + + try { + // Since the binary will be located inside `node_modules`, we can simply call `require.resolve` + return require.resolve(`homestar-${os}-${arch}/bin/homestar${extension}`) + } catch (e) { + throw new Error( + `Couldn't find application binary inside node_modules for ${os}-${arch}` + ) + } +} + +/** + * Runs the application with args using nodejs spawn + */ +function run() { + const args = process.argv.slice(2) + execa(getExePath(), args, { stdio: 'inherit' }) +} + +run() diff --git a/homestar-runtime/npm/base/package-lock.json b/homestar-runtime/npm/base/package-lock.json new file mode 100644 index 00000000..dcf9b431 --- /dev/null +++ b/homestar-runtime/npm/base/package-lock.json @@ -0,0 +1,263 @@ +{ + "name": "homestar-runtime", + "version": "0.0.8", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "homestar-runtime", + "version": "0.0.8", + "license": "MIT", + "dependencies": { + "execa": "^8.0.1" + }, + "bin": { + "homestar": "index.js" + }, + "optionalDependencies": { + "homestar-darwin-arm64": "*", + "homestar-darwin-x64": "*", + "homestar-linux-arm64": "*", + "homestar-linux-x64": "*", + "homestar-windows-arm64": "*", + "homestar-windows-x64": "*" + } + }, + "node_modules/cross-spawn": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/execa": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/execa/-/execa-8.0.1.tgz", + "integrity": "sha512-VyhnebXciFV2DESc+p6B+y0LjSm0krU4OgJN44qFAhBY0TJ+1V61tYD2+wHusZ6F9n5K+vl8k0sTy7PEfV4qpg==", + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^8.0.1", + "human-signals": "^5.0.0", + "is-stream": "^3.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^5.1.0", + "onetime": "^6.0.0", + "signal-exit": "^4.1.0", + "strip-final-newline": "^3.0.0" + }, + "engines": { + "node": ">=16.17" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/get-stream": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-8.0.1.tgz", + "integrity": "sha512-VaUJspBffn/LMCJVoMvSAdmscJyS1auj5Zulnn5UoYcY531UWmdwhRWkcGKnGU93m5HSXP9LP2usOryrBtQowA==", + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/homestar-darwin-arm64": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/homestar-darwin-arm64/-/homestar-darwin-arm64-0.0.1.tgz", + "integrity": "sha512-ftcZyXJalctBtj3jhTepLVE6LjNaB/k2KB9zAAZjQi6neAKs+MMTqaRt8TV3/X16hOryOeyjDPCshgbGnqpBJw==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/homestar-darwin-x64": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/homestar-darwin-x64/-/homestar-darwin-x64-0.0.1.tgz", + "integrity": "sha512-DT4H2XnKD6bwjY/3ooYRwfqnP8maKlLp53ZOkeSPIWT8HDf7DI/6WJxeZZy8AGkMox5SU0xP64CrIQ3W/D57NA==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/homestar-linux-arm64": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/homestar-linux-arm64/-/homestar-linux-arm64-0.0.1.tgz", + "integrity": "sha512-IKDrLIvZWmp1ZrcYyySV1xp7wOYOCHPELeuiOEd0a3YuHssURXS4CdibUGKXGnTnxv7w7bjNla5HAVyOnC/dNA==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/homestar-linux-x64": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/homestar-linux-x64/-/homestar-linux-x64-0.0.1.tgz", + "integrity": "sha512-LuY2HA3SM1B5B4LFpyb+eAKHFaKlEJ0vtkr/aFJCR9d0SA/omf3ZpqmeT4zDrCNgCnqT81rvVDjBOP094890zw==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/human-signals": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-5.0.0.tgz", + "integrity": "sha512-AXcZb6vzzrFAUE61HnN4mpLqd/cSIwNQjtNWR0euPm6y0iqx3G4gOXaIDdtdDwZmhwe82LA6+zinmW4UBWVePQ==", + "engines": { + "node": ">=16.17.0" + } + }, + "node_modules/is-stream": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-3.0.0.tgz", + "integrity": "sha512-LnQR4bZ9IADDRSkvpqMGvt/tEJWclzklNgSw48V5EAaAeDd6qGvN8ei6k5p0tvxSR171VmGyHuTiAOfxAbr8kA==", + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==" + }, + "node_modules/merge-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==" + }, + "node_modules/mimic-fn": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-4.0.0.tgz", + "integrity": "sha512-vqiC06CuhBTUdZH+RYl8sFrL096vA45Ok5ISO6sE/Mr1jRbGH4Csnhi8f3wKVl7x8mO4Au7Ir9D3Oyv1VYMFJw==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/npm-run-path": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-5.1.0.tgz", + "integrity": "sha512-sJOdmRGrY2sjNTRMbSvluQqg+8X7ZK61yvzBEIDhz4f8z1TZFYABsqjjCBd/0PUNE9M6QDgHJXQkGUEm7Q+l9Q==", + "dependencies": { + "path-key": "^4.0.0" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/npm-run-path/node_modules/path-key": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-4.0.0.tgz", + "integrity": "sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/onetime": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-6.0.0.tgz", + "integrity": "sha512-1FlR+gjXK7X+AsAHso35MnyN5KqGwJRi/31ft6x0M194ht7S+rWAvd7PHss9xSKMzE0asv1pyIHaJYq+BbacAQ==", + "dependencies": { + "mimic-fn": "^4.0.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "engines": { + "node": ">=8" + } + }, + "node_modules/signal-exit": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/strip-final-newline": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-3.0.0.tgz", + "integrity": "sha512-dOESqjYr96iWYylGObzd39EuNTa5VJxyvVAEm5Jnh7KGo75V43Hk1odPQkNDyXNmUR6k+gEiDVXnjB8HJ3crXw==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + } + } +} diff --git a/homestar-runtime/npm/base/package.json b/homestar-runtime/npm/base/package.json new file mode 100644 index 00000000..d5a6669a --- /dev/null +++ b/homestar-runtime/npm/base/package.json @@ -0,0 +1,38 @@ +{ + "name": "homestar-runtime", + "version": "0.0.8", + "description": "The IPVM reference implementation", + "author": "Hugo Dias (hugodias.me)", + "homepage": "https://github.com/ipvm-wg/homestar/tree/main/homestar-runtime", + "repository": { + "url": "ipvm-wg/homestar", + "directory": "homestar-runtime" + }, + "keywords": [ + "homestar", + "wasm", + "wit", + "webassembly", + "workflows", + "scheduling" + ], + "bin": { + "homestar": "index.js" + }, + "type": "module", + "scripts": { + "test": "echo \"Error: no test specified\" && exit 1" + }, + "license": "Apache-2.0", + "optionalDependencies": { + "homestar-darwin-arm64": "*", + "homestar-darwin-x64": "*", + "homestar-linux-arm64": "*", + "homestar-linux-x64": "*", + "homestar-windows-arm64": "*", + "homestar-windows-x64": "*" + }, + "dependencies": { + "execa": "^8.0.1" + } +} diff --git a/homestar-runtime/npm/package.json.tmpl b/homestar-runtime/npm/package.json.tmpl new file mode 100644 index 00000000..90d0dc29 --- /dev/null +++ b/homestar-runtime/npm/package.json.tmpl @@ -0,0 +1,22 @@ +{ + "name": "${node_pkg}", + "version": "${node_version}", + "description": "The IPVM reference implementation", + "homepage": "https://github.com/ipvm-wg/homestar/tree/main/homestar-runtime", + "repository": { + "url": "ipvm-wg/homestar", + "directory": "homestar-runtime" + }, + "keywords": ["homestar", "wasm", "wit", "webassembly", "workflows", "scheduling"], + "scripts": { + "test": "echo \"Error: no test specified\" && exit 1" + }, + "author": "Hugo Dias (hugodias.me)", + "license": "Apache-2.0", + "os": [ + "${node_os}" + ], + "cpu": [ + "${node_arch}" + ] +} diff --git a/homestar-runtime/npm/readme.md b/homestar-runtime/npm/readme.md new file mode 100644 index 00000000..cd51c934 --- /dev/null +++ b/homestar-runtime/npm/readme.md @@ -0,0 +1,75 @@ +# Homestar NPM packages + +## Packages + +- [homestar-runtime](https://www.npmjs.com/package/homestar-runtime) - This is the main package that installs the os specific binary package and runs it. +- [homestar-darwin-arm64](https://www.npmjs.com/package/homestar-darwin-arm64) +- [homestar-darwin-x64](https://www.npmjs.com/package/homestar-darwin-x64) +- [homestar-linux-arm64](https://www.npmjs.com/package/homestar-linux-arm64) +- [homestar-linux-x64](https://www.npmjs.com/package/homestar-linux-x64) +- [homestar-windows-x64](https://www.npmjs.com/package/homestar-windows-x64) + +## Usage + +```bash +npx homestar-runtime --help + +# Global install +npm install -g homestar-runtime +homestar start -c config.toml +``` + +## Manual publishing + +```bash + +rustup target add aarch64-unknown-linux-gnu +rustup target add x86_64-unknown-linux-musl +cargo install cargo-get + + +export node_version=$(cargo get workspace.package.version) +export bin="homestar" + + +## darwin arm64 +cargo build -p homestar-runtime --features ansi-logs --locked --release --target aarch64-apple-darwin +export node_os=darwin +export node_arch=arm64 +export node_pkg="${bin}-${node_os}-${node_arch}" +mkdir -p "binaries/${node_pkg}/bin" +envsubst < package.json.tmpl > "binaries/${node_pkg}/package.json" +cp "../../target/aarch64-apple-darwin/release/${bin}" "binaries/${node_pkg}/bin" + +## darwin x64 +cross build -p homestar-runtime --features ansi-logs --locked --release --target x86_64-apple-darwin +export node_os=darwin +export node_arch=x64 +export node_pkg="${bin}-${node_os}-${node_arch}" +mkdir -p "binaries/${node_pkg}/bin" +envsubst < package.json.tmpl > "binaries/${node_pkg}/package.json" +cp "../../target/x86_64-apple-darwin/release/${bin}" "binaries/${node_pkg}/bin" + +## linux arm64 +cross build -p homestar-runtime --features ansi-logs --locked --release --target aarch64-unknown-linux-gnu +export node_os=linux +export node_arch=arm64 +export node_pkg="${bin}-${node_os}-${node_arch}" +mkdir -p "binaries/${node_pkg}/bin" +envsubst < package.json.tmpl > "binaries/${node_pkg}/package.json" +cp "../../target/aarch64-unknown-linux-gnu/release/${bin}" "binaries/${node_pkg}/bin" + +## linux x64 +cross build -p homestar-runtime --features ansi-logs --locked --release --target x86_64-unknown-linux-musl +export node_os=linux +export node_arch=x64 +export node_pkg="${bin}-${node_os}-${node_arch}" +mkdir -p "binaries/${node_pkg}/bin" +envsubst < package.json.tmpl > "binaries/${node_pkg}/package.json" +cp "../../target/x86_64-unknown-linux-musl/release/${bin}" "binaries/${node_pkg}/bin" + +# publish the RC package +cd "${node_pkg}" +npm version $(cargo get package.version)-rc.$(date +%s) --git-tag-version false +npm publish --access public --tag rc +``` diff --git a/homestar-runtime/src/event_handler/channel.rs b/homestar-runtime/src/channel.rs similarity index 58% rename from homestar-runtime/src/event_handler/channel.rs rename to homestar-runtime/src/channel.rs index b8d798c3..d06eb361 100644 --- a/homestar-runtime/src/event_handler/channel.rs +++ b/homestar-runtime/src/channel.rs @@ -1,5 +1,5 @@ -//! Wrapper around [crossbeam::channel] and [flume::bounded] to provide common -//! interfaces for sync/async bounded and non-tokio "oneshot" channels. +//! Wrapper around [crossbeam::channel] and [flume] to provide common +//! interfaces for sync/async (un)bounded and non-tokio "oneshot" channels. use crossbeam::channel; @@ -12,21 +12,21 @@ pub type BoundedChannelReceiver = channel::Receiver; /// A bounded [crossbeam::channel] with a sender and receiver. #[allow(dead_code)] #[derive(Debug, Clone)] -pub struct BoundedChannel { +pub struct Channel { /// Sender for the channel. tx: channel::Sender, /// REceiver for the channel. rx: channel::Receiver, } -impl BoundedChannel { - /// Create a new [BoundedChannel] with a given capacity. +impl Channel { + /// Create a new [Channel] with a given capacity. pub fn with(capacity: usize) -> (BoundedChannelSender, BoundedChannelReceiver) { let (tx, rx) = channel::bounded(capacity); (tx, rx) } - /// Create a oneshot (1) [BoundedChannel]. + /// Create a oneshot (1) [Channel]. pub fn oneshot() -> (BoundedChannelSender, BoundedChannelReceiver) { let (tx, rx) = channel::bounded(1); (tx, rx) @@ -34,30 +34,36 @@ impl BoundedChannel { } /// [flume::Sender] for a bounded [flume::bounded] channel. -pub type AsyncBoundedChannelSender = flume::Sender; +pub type AsyncChannelSender = flume::Sender; /// [flume::Receiver] for a bounded [flume::bounded] channel. -pub type AsyncBoundedChannelReceiver = flume::Receiver; +pub type AsyncChannelReceiver = flume::Receiver; /// A bounded [flume] channel with sender and receiver. #[allow(dead_code)] #[derive(Debug, Clone)] -pub struct AsyncBoundedChannel { +pub struct AsyncChannel { /// Sender for the channel. tx: flume::Sender, /// REceiver for the channel. rx: flume::Receiver, } -impl AsyncBoundedChannel { - /// Create a new [AsyncBoundedChannel] with a given capacity. - pub fn with(capacity: usize) -> (AsyncBoundedChannelSender, AsyncBoundedChannelReceiver) { +impl AsyncChannel { + /// Create a new [AsyncChannel] with a given capacity. + pub fn with(capacity: usize) -> (AsyncChannelSender, AsyncChannelReceiver) { let (tx, rx) = flume::bounded(capacity); (tx, rx) } - /// Create a oneshot (1) [BoundedChannel]. - pub fn oneshot() -> (AsyncBoundedChannelSender, AsyncBoundedChannelReceiver) { + /// Create an unbounded [AsyncChannel]. + pub fn unbounded() -> (AsyncChannelSender, AsyncChannelReceiver) { + let (tx, rx) = flume::unbounded(); + (tx, rx) + } + + /// Create a oneshot (1) [Channel]. + pub fn oneshot() -> (AsyncChannelSender, AsyncChannelReceiver) { let (tx, rx) = flume::bounded(1); (tx, rx) } diff --git a/homestar-runtime/src/cli.rs b/homestar-runtime/src/cli.rs index 0165e725..30353fa9 100644 --- a/homestar-runtime/src/cli.rs +++ b/homestar-runtime/src/cli.rs @@ -5,7 +5,7 @@ use crate::{ runner::{file, response}, }; use anyhow::anyhow; -use clap::{Args, Parser}; +use clap::{Args, Parser, Subcommand}; use serde::{Deserialize, Serialize}; use std::{ net::{IpAddr, Ipv6Addr, SocketAddr}, @@ -19,19 +19,21 @@ pub use error::Error; pub(crate) mod show; pub(crate) use show::ConsoleTable; +const DEFAULT_DB_PATH: &str = "homestar.db"; const TMP_DIR: &str = "/tmp"; const HELP_TEMPLATE: &str = "{name} {version} +{about} -USAGE: - {usage} +Usage: {usage} {all-args} "; /// CLI arguments. -#[derive(Parser, Debug)] -#[command(bin_name = "homestar", name = "homestar", author, version, about, long_about = None, help_template = HELP_TEMPLATE)] +#[derive(Debug, Parser)] +#[command(bin_name = "homestar", name = "homestar", author, version, about, + long_about = None, help_template = HELP_TEMPLATE)] pub struct Cli { /// Homestar [Command]. #[clap(subcommand)] @@ -43,17 +45,17 @@ pub struct Cli { /// [Client]: crate::network::rpc::Client #[derive(Debug, Clone, PartialEq, Args, Serialize, Deserialize)] pub struct RpcArgs { - /// RPC Homestar runtime host to ping. + /// Homestar RPC host. #[clap( long = "host", default_value = "::1", value_hint = clap::ValueHint::Hostname )] host: IpAddr, - /// RPC Homestar runtime port to ping. + /// Homestar RPC port. #[clap(short = 'p', long = "port", default_value_t = 3030)] port: u16, - /// RPC Homestar runtime port to ping. + /// Homestar RPC timeout. #[clap(long = "timeout", default_value = "60s", value_parser = humantime::parse_duration)] timeout: Duration, } @@ -69,32 +71,36 @@ impl Default for RpcArgs { } /// CLI Argument types. -#[derive(Debug, Parser)] +#[derive(Debug, Subcommand)] pub enum Command { /// Start the Homestar runtime. Start { - /// Database url, defaults to sqlite://homestar.db. + /// Database URL, defaults to homestar.db. #[arg( long = "db", value_name = "DB", - env = "DATABASE_URL", - help = "SQLite database url" + env = "DATABASE_PATH", + value_hint = clap::ValueHint::AnyPath, + value_name = "DATABASE_PATH", + default_value = DEFAULT_DB_PATH, + help = "Database path (SQLite) [optional]" )] database_url: Option, - /// Optional runtime configuration file, otherwise use defaults. + /// Runtime configuration file (.toml). #[arg( short = 'c', long = "config", + value_hint = clap::ValueHint::FilePath, value_name = "CONFIG", - help = "runtime configuration file" + help = "Runtime configuration file (.toml) [optional]" )] runtime_config: Option, /// Daemonize the runtime, false by default. #[arg( short = 'd', long = "daemonize", - default_value_t = false, - help = "daemonize the runtime" + default_value = "false", + help = "Daemonize the runtime" )] daemonize: bool, /// Directory to place daemon files, defaults to /tmp. @@ -103,35 +109,39 @@ pub enum Command { default_value = TMP_DIR, value_hint = clap::ValueHint::DirPath, value_name = "DIR", - help = "directory to place daemon files" + help = "Directory to place daemon file(s)" )] daemon_dir: PathBuf, }, /// Stop the Homestar runtime. Stop(RpcArgs), - /// Ping the Homestar runtime. + /// Ping the Homestar runtime to see if it's running. Ping(RpcArgs), - /// Run a workflow, given a workflow file. + /// Run an IPVM-configured workflow file on the Homestar runtime. Run { /// RPC host / port arguments. #[clap(flatten)] args: RpcArgs, - /// (optional) name given to a workflow. + /// Local name associated with a workflow (optional). #[arg( short = 'n', long = "name", value_name = "NAME", - help = "(optional) name given to a workflow" + help = "Local name given to a workflow (optional)" )] name: Option, - /// Workflow file to run. + /// IPVM-configured workflow file to run. + /// Supported: + /// - JSON (.json). #[arg( short='w', long = "workflow", value_hint = clap::ValueHint::FilePath, value_name = "FILE", value_parser = clap::value_parser!(file::ReadWorkflow), - help = "path to workflow file" + help = r#"IPVM-configured workflow file to run. +Supported: + - JSON (.json)"# )] workflow: file::ReadWorkflow, }, diff --git a/homestar-runtime/src/cli/show.rs b/homestar-runtime/src/cli/show.rs index c631b539..d38bdd2d 100644 --- a/homestar-runtime/src/cli/show.rs +++ b/homestar-runtime/src/cli/show.rs @@ -1,3 +1,5 @@ +//! Styled, output response for console table. + use std::{ fmt, io::{self, Write}, diff --git a/homestar-runtime/src/db.rs b/homestar-runtime/src/db.rs index 5aeca7f1..f8f06238 100644 --- a/homestar-runtime/src/db.rs +++ b/homestar-runtime/src/db.rs @@ -25,7 +25,6 @@ use tracing::info; pub mod schema; pub(crate) mod utils; -pub(crate) const ENV: &str = "DATABASE_URL"; const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations/"); const PRAGMAS: &str = " PRAGMA journal_mode = WAL; -- better write-concurrency @@ -35,6 +34,9 @@ PRAGMA busy_timeout = 1000; -- sleep if the database is busy PRAGMA foreign_keys = ON; -- enforce foreign keys "; +/// Database environment variable. +pub(crate) const ENV: &str = "DATABASE_URL"; + /// A Sqlite connection [pool]. /// /// [pool]: r2d2::Pool @@ -92,7 +94,7 @@ pub trait Database: Send + Sync + Clone { fn setup(url: &str) -> Result { info!( subject = "database", - category = "homestar_init", + category = "homestar.init", "setting up database at {}, running migrations if needed", url ); @@ -119,13 +121,15 @@ pub trait Database: Send + Sync + Clone { receipt: Receipt, conn: &mut Connection, ) -> Result { - let receipt = conn.transaction::<_, diesel::result::Error, _>(|conn| { - let returned = Self::store_receipt(receipt, conn)?; - Self::store_workflow_receipt(workflow_cid, returned.cid(), conn)?; - Ok(returned) - })?; - - Ok(receipt) + conn.transaction::<_, diesel::result::Error, _>(|conn| { + if let Some(returned) = Self::store_receipt(receipt.clone(), conn)? { + Self::store_workflow_receipt(workflow_cid, returned.cid(), conn)?; + Ok(returned) + } else { + Self::store_workflow_receipt(workflow_cid, receipt.cid(), conn)?; + Ok(receipt) + } + }) } /// Store receipt given a connection to the database pool. @@ -134,12 +138,13 @@ pub trait Database: Send + Sync + Clone { fn store_receipt( receipt: Receipt, conn: &mut Connection, - ) -> Result { + ) -> Result, diesel::result::Error> { diesel::insert_into(schema::receipts::table) .values(&receipt) .on_conflict(schema::receipts::cid) .do_nothing() .get_result(conn) + .optional() } /// Store receipts given a connection to the Database pool. @@ -148,13 +153,17 @@ pub trait Database: Send + Sync + Clone { conn: &mut Connection, ) -> Result { receipts.iter().try_fold(0, |acc, receipt| { - let res = diesel::insert_into(schema::receipts::table) + if let Some(res) = diesel::insert_into(schema::receipts::table) .values(receipt) .on_conflict(schema::receipts::cid) .do_nothing() - .execute(conn)?; - - Ok::<_, diesel::result::Error>(acc + res) + .execute(conn) + .optional()? + { + Ok::<_, diesel::result::Error>(acc + res) + } else { + Ok(acc) + } }) } @@ -204,15 +213,24 @@ pub trait Database: Send + Sync + Clone { } /// Store localized workflow cid and information, e.g. number of tasks. + /// + /// On conflicts, do nothing. + /// Otherwise, return the stored workflow. fn store_workflow( workflow: workflow::Stored, conn: &mut Connection, ) -> Result { - diesel::insert_into(schema::workflows::table) + if let Some(stored) = diesel::insert_into(schema::workflows::table) .values(&workflow) .on_conflict(schema::workflows::cid) .do_nothing() .get_result(conn) + .optional()? + { + Ok(stored) + } else { + Ok(workflow) + } } /// Store workflow [Cid] and [Receipt] [Cid] in the database for inner join. @@ -220,7 +238,7 @@ pub trait Database: Send + Sync + Clone { workflow_cid: Cid, receipt_cid: Cid, conn: &mut Connection, - ) -> Result { + ) -> Result, diesel::result::Error> { let value = StoredReceipt::new(Pointer::new(workflow_cid), Pointer::new(receipt_cid)); diesel::insert_into(schema::workflows_receipts::table) .values(&value) @@ -230,6 +248,7 @@ pub trait Database: Send + Sync + Clone { )) .do_nothing() .execute(conn) + .optional() } /// Store series of receipts for a workflow [Cid] in the @@ -244,8 +263,11 @@ pub trait Database: Send + Sync + Clone { conn: &mut Connection, ) -> Result { receipts.iter().try_fold(0, |acc, receipt| { - let res = Self::store_workflow_receipt(workflow_cid, *receipt, conn)?; - Ok::<_, diesel::result::Error>(acc + res) + if let Some(res) = Self::store_workflow_receipt(workflow_cid, *receipt, conn)? { + Ok::<_, diesel::result::Error>(acc + res) + } else { + Ok(acc) + } }) } diff --git a/homestar-runtime/src/db/utils.rs b/homestar-runtime/src/db/utils.rs index 6dc7c64c..08d76e36 100644 --- a/homestar-runtime/src/db/utils.rs +++ b/homestar-runtime/src/db/utils.rs @@ -1,5 +1,8 @@ +//! Utility functions Database interaction. + use chrono::NaiveDateTime; +/// Trait for converting nanoseconds to a timestamp. pub(crate) trait Timestamp { fn timestamp_from_nanos(&self) -> Option; } diff --git a/homestar-runtime/src/event_handler.rs b/homestar-runtime/src/event_handler.rs index 5af1044c..0a3f9490 100644 --- a/homestar-runtime/src/event_handler.rs +++ b/homestar-runtime/src/event_handler.rs @@ -1,10 +1,11 @@ //! [EventHandler] implementation for handling network events and messages. -#[cfg(feature = "websocket-server")] -use crate::network::ws; +#[cfg(feature = "websocket-notify")] +use crate::network::webserver::{self, notifier}; #[cfg(feature = "ipfs")] use crate::network::IpfsCli; use crate::{ + channel, db::Database, network::swarm::{ComposedBehaviour, PeerDiscoveryInfo, RequestResponseKey}, settings, @@ -22,15 +23,16 @@ use swarm_event::ResponseEvent; use tokio::{runtime::Handle, select}; pub(crate) mod cache; -pub mod channel; pub(crate) mod error; pub(crate) mod event; +#[cfg(feature = "websocket-notify")] +pub(crate) mod notification; pub(crate) mod swarm_event; pub(crate) use cache::{setup_cache, CacheValue}; pub(crate) use error::RequestResponseError; pub(crate) use event::Event; -type P2PSender = channel::AsyncBoundedChannelSender; +type P2PSender = channel::AsyncChannelSender; /// Handler trait for [EventHandler] events. #[async_trait] @@ -45,53 +47,89 @@ where } /// Event loop handler for [libp2p] network events and commands. -#[cfg(feature = "websocket-server")] -#[cfg_attr( - docsrs, - doc(cfg(all(feature = "websocket-server", feature = "websocket-notify"))) -)] +#[cfg(feature = "websocket-notify")] +#[cfg_attr(docsrs, doc(cfg(feature = "websocket-notify")))] #[allow(missing_debug_implementations, dead_code)] pub(crate) struct EventHandler { + /// Minimum number of peers required to receive a receipt. receipt_quorum: usize, + /// Minimum number of peers required to receive workflow information. workflow_quorum: usize, + /// Timeout for p2p provider requests. p2p_provider_timeout: Duration, + /// Accessible database instance. db: DB, + /// [libp2p::swarm::Swarm] swarm instance. swarm: Swarm, + /// [moka::future::Cache] instance, used for retry logic. cache: Arc>, - sender: Arc>, - receiver: channel::AsyncBoundedChannelReceiver, + /// [channel::AsyncChannelSender] for sending [Event]s to the [EventHandler]. + sender: Arc>, + /// [channel::AsyncChannelReceiver] for receiving [Event]s from the [EventHandler]. + receiver: channel::AsyncChannelReceiver, + /// [QueryId] to [RequestResponseKey] and [P2PSender] mapping. query_senders: FnvHashMap)>, + /// [PeerId] to [ConnectedPoint] connections mapping. connections: Connections, + /// [RequestId] to [RequestResponseKey] and [P2PSender] mapping. request_response_senders: FnvHashMap, + /// Rendezvous protocol configurations and state (cookies). rendezvous: Rendezvous, + /// Whether or not to enable pubsub. pubsub_enabled: bool, - ws_msg_sender: ws::Notifier, + /// [tokio::sync::broadcast::Sender] for websocket event + /// notification messages. + ws_evt_sender: webserver::Notifier, + /// [tokio::sync::broadcast::Sender] for websocket workflow-related + /// notification messages. + ws_workflow_sender: webserver::Notifier, + /// [libp2p::Multiaddr] addresses to dial. node_addresses: Vec, + /// [libp2p::Multiaddr] externally reachable addresses to announce to the network. announce_addresses: Vec, + /// Maximum number of externally reachable addresses to announce to the network. external_address_limit: u32, + /// Interval for polling the cache for expired entries. poll_cache_interval: Duration, } /// Event loop handler for [libp2p] network events and commands. -#[cfg(not(feature = "websocket-server"))] +#[cfg(not(feature = "websocket-notify"))] #[allow(missing_debug_implementations, dead_code)] pub(crate) struct EventHandler { + /// Minimum number of peers required to receive a receipt. receipt_quorum: usize, + /// Minimum number of peers required to receive workflow information. workflow_quorum: usize, + /// Timeout for p2p provider requests. p2p_provider_timeout: Duration, + /// Accesible database instance. db: DB, + /// [libp2p::swarm::Swarm] swarm instance. swarm: Swarm, - cache: Cache, - sender: Arc>, - receiver: channel::AsyncBoundedChannelReceiver, + /// [moka::future::Cache] instance, centered around retry logic. + cache: Arc>, + /// [channel::AsyncChannelReceiver] for receiving [Event]s from the [EventHandler]. + sender: Arc>, + /// [channel::AsyncChannelReceiver] for receiving [Event]s from the [EventHandler]. + receiver: channel::AsyncChannelReceiver, + /// [QueryId] to [RequestResponseKey] and [P2PSender] mapping. query_senders: FnvHashMap)>, + /// [PeerId] to [ConnectedPoint] connections mapping. connections: Connections, + /// [RequestId] to [RequestResponseKey] and [P2PSender] mapping. request_response_senders: FnvHashMap, + /// Rendezvous protocol configurations and state (cookies). rendezvous: Rendezvous, + /// Whether or not to enable pubsub. pubsub_enabled: bool, + /// [libp2p::Multiaddr] addresses to dial. node_addresses: Vec, + /// [libp2p::Multiaddr] externally reachable addresses to announce to the network. announce_addresses: Vec, + /// Maximum number of externally reachable addresses to announce to the network. external_address_limit: u32, + /// Interval for polling the cache for expired entries. poll_cache_interval: Duration, } @@ -114,28 +152,29 @@ where DB: Database, { fn setup_channel( - settings: &settings::Node, + settings: &settings::Network, ) -> ( - channel::AsyncBoundedChannelSender, - channel::AsyncBoundedChannelReceiver, + channel::AsyncChannelSender, + channel::AsyncChannelReceiver, ) { - channel::AsyncBoundedChannel::with(settings.network.events_buffer_len) + channel::AsyncChannel::with(settings.events_buffer_len) } /// Create an [EventHandler] with channel sender/receiver defaults. - #[cfg(feature = "websocket-server")] + #[cfg(feature = "websocket-notify")] pub(crate) fn new( swarm: Swarm, db: DB, - settings: &settings::Node, - ws_msg_sender: ws::Notifier, + settings: &settings::Network, + ws_evt_sender: webserver::Notifier, + ws_workflow_sender: webserver::Notifier, ) -> Self { let (sender, receiver) = Self::setup_channel(settings); let sender = Arc::new(sender); Self { - receipt_quorum: settings.network.receipt_quorum, - workflow_quorum: settings.network.workflow_quorum, - p2p_provider_timeout: settings.network.p2p_provider_timeout, + receipt_quorum: settings.libp2p.dht.receipt_quorum, + workflow_quorum: settings.libp2p.dht.workflow_quorum, + p2p_provider_timeout: settings.libp2p.dht.p2p_provider_timeout, db, swarm, cache: Arc::new(setup_cache(sender.clone())), @@ -145,32 +184,37 @@ where request_response_senders: FnvHashMap::default(), connections: Connections { peers: FnvHashMap::default(), - max_peers: settings.network.max_connected_peers, + max_peers: settings.libp2p.max_connected_peers, }, rendezvous: Rendezvous { - registration_ttl: settings.network.rendezvous_registration_ttl, - discovery_interval: settings.network.rendezvous_discovery_interval, + registration_ttl: settings.libp2p.rendezvous.registration_ttl, + discovery_interval: settings.libp2p.rendezvous.discovery_interval, discovered_peers: FnvHashMap::default(), cookies: FnvHashMap::default(), }, - pubsub_enabled: settings.network.enable_pubsub, - ws_msg_sender, - node_addresses: settings.network.node_addresses.clone(), - announce_addresses: settings.network.announce_addresses.clone(), - external_address_limit: settings.network.max_announce_addresses, - poll_cache_interval: settings.network.poll_cache_interval, + pubsub_enabled: settings.libp2p.pubsub.enable, + ws_evt_sender, + ws_workflow_sender, + node_addresses: settings.libp2p.node_addresses.clone(), + announce_addresses: settings.libp2p.announce_addresses.clone(), + external_address_limit: settings.libp2p.max_announce_addresses, + poll_cache_interval: settings.poll_cache_interval, } } /// Create an [EventHandler] with channel sender/receiver defaults. - #[cfg(not(feature = "websocket-server"))] - pub(crate) fn new(swarm: Swarm, db: DB, settings: &settings::Node) -> Self { + #[cfg(not(feature = "websocket-notify"))] + pub(crate) fn new( + swarm: Swarm, + db: DB, + settings: &settings::Network, + ) -> Self { let (sender, receiver) = Self::setup_channel(settings); let sender = Arc::new(sender); Self { - receipt_quorum: settings.network.receipt_quorum, - workflow_quorum: settings.network.workflow_quorum, - p2p_provider_timeout: settings.network.p2p_provider_timeout, + receipt_quorum: settings.libp2p.dht.receipt_quorum, + workflow_quorum: settings.libp2p.dht.workflow_quorum, + p2p_provider_timeout: settings.libp2p.dht.p2p_provider_timeout, db, swarm, cache: Arc::new(setup_cache(sender.clone())), @@ -180,19 +224,19 @@ where request_response_senders: FnvHashMap::default(), connections: Connections { peers: FnvHashMap::default(), - max_peers: settings.network.max_connected_peers, + max_peers: settings.libp2p.max_connected_peers, }, rendezvous: Rendezvous { - registration_ttl: settings.network.rendezvous_registration_ttl, - discovery_interval: settings.network.rendezvous_discovery_interval, + registration_ttl: settings.libp2p.rendezvous.registration_ttl, + discovery_interval: settings.libp2p.rendezvous.discovery_interval, discovered_peers: FnvHashMap::default(), cookies: FnvHashMap::default(), }, - pubsub_enabled: settings.network.enable_pubsub, - node_addresses: settings.network.node_addresses.clone(), - announce_addresses: settings.network.announce_addresses.clone(), - external_address_limit: settings.network.max_announce_addresses, - poll_cache_interval: settings.network.poll_cache_interval, + pubsub_enabled: settings.libp2p.pubsub.enable, + node_addresses: settings.libp2p.node_addresses.clone(), + announce_addresses: settings.libp2p.announce_addresses.clone(), + external_address_limit: settings.libp2p.max_announce_addresses, + poll_cache_interval: settings.poll_cache_interval, } } @@ -200,20 +244,26 @@ where pub(crate) async fn shutdown(&mut self) {} /// Get a [Arc]'ed copy of the [EventHandler] channel sender. - pub(crate) fn sender(&self) -> Arc> { + pub(crate) fn sender(&self) -> Arc> { self.sender.clone() } - /// [tokio::sync::broadcast::Sender] for sending messages through the - /// webSocket server to subscribers. - #[cfg(all(feature = "websocket-server", feature = "websocket-notify"))] - #[cfg_attr( - docsrs, - doc(cfg(all(feature = "websocket-server", feature = "websocket-notify"))) - )] + /// [tokio::sync::broadcast::Sender] for sending workflow-related messages + /// through the WebSocket server to subscribers. + #[cfg(feature = "websocket-notify")] + #[cfg_attr(docsrs, doc(cfg(feature = "websocket-notify")))] #[allow(dead_code)] - pub(crate) fn ws_sender(&self) -> ws::Notifier { - self.ws_msg_sender.clone() + pub(crate) fn ws_workflow_sender(&self) -> webserver::Notifier { + self.ws_workflow_sender.clone() + } + + /// [tokio::sync::broadcast::Sender] for sending event-related messages + /// through the WebSocket server to subscribers. + #[cfg(feature = "websocket-notify")] + #[cfg_attr(docsrs, doc(cfg(feature = "websocket-notify")))] + #[allow(dead_code)] + pub(crate) fn ws_evt_sender(&self) -> webserver::Notifier { + self.ws_evt_sender.clone() } /// Start [EventHandler] that matches on swarm and pubsub [events]. @@ -222,7 +272,7 @@ where #[cfg(not(feature = "ipfs"))] pub(crate) async fn start(mut self) -> Result<()> { let handle = Handle::current(); - handle.spawn(poll_cache(self.cache.clone())); + handle.spawn(poll_cache(self.cache.clone(), self.poll_cache_interval)); loop { select! { diff --git a/homestar-runtime/src/event_handler/cache.rs b/homestar-runtime/src/event_handler/cache.rs index c05386c0..a4fedb8b 100644 --- a/homestar-runtime/src/event_handler/cache.rs +++ b/homestar-runtime/src/event_handler/cache.rs @@ -1,3 +1,5 @@ +//! Event-handler cache for retry events. + use crate::{channel, event_handler::Event}; use libp2p::PeerId; use moka::{ @@ -24,6 +26,7 @@ impl ExpiryBase for Expiry { } } +/// A cache value, made-up of an expiration and data map. #[derive(Clone, Debug)] pub(crate) struct CacheValue { expiration: Duration, @@ -36,20 +39,23 @@ impl CacheValue { } } +/// Kinds of data to be stored in the cache. #[derive(Clone, Debug)] pub(crate) enum CacheData { Peer(PeerId), OnExpiration(DispatchEvent), } +/// Events to be dispatched on cache expiration. #[derive(Clone, Debug)] pub(crate) enum DispatchEvent { RegisterPeer, DiscoverPeers, } +/// Setup a cache with an eviction listener. pub(crate) fn setup_cache( - sender: Arc>, + sender: Arc>, ) -> Cache { let eviction_listener = move |_key: Arc, val: CacheValue, cause: RemovalCause| { let tx = Arc::clone(&sender); diff --git a/homestar-runtime/src/event_handler/event.rs b/homestar-runtime/src/event_handler/event.rs index 9a770efe..6cf2c33a 100644 --- a/homestar-runtime/src/event_handler/event.rs +++ b/homestar-runtime/src/event_handler/event.rs @@ -2,12 +2,14 @@ use super::EventHandler; #[cfg(feature = "websocket-notify")] -use crate::network::ws::notifier::NotifyReceipt; +use crate::event_handler::notification::{ + self, emit_receipt, EventNotificationTyp, SwarmNotification, +}; #[cfg(feature = "ipfs")] use crate::network::IpfsCli; use crate::{ db::Database, - event_handler::{Handler, P2PSender}, + event_handler::{channel::AsyncChannelSender, Handler, P2PSender, ResponseEvent}, network::{ pubsub, swarm::{CapsuleTag, RequestResponseKey, TopicMessage}, @@ -16,22 +18,26 @@ use crate::{ }; use anyhow::Result; use async_trait::async_trait; -use homestar_core::workflow::Pointer; #[cfg(feature = "websocket-notify")] -use homestar_core::{ipld::DagJson, workflow::Receipt as InvocationReceipt}; +use homestar_core::workflow::Pointer; +use homestar_core::workflow::Receipt as InvocationReceipt; use libipld::{Cid, Ipld}; use libp2p::{ kad::{record::Key, Quorum, Record}, rendezvous::Namespace, PeerId, }; +#[cfg(feature = "websocket-notify")] +use maplit::btreemap; use std::{collections::HashSet, num::NonZeroUsize, sync::Arc}; -#[cfg(feature = "ipfs")] +#[cfg(all(feature = "ipfs", not(feature = "test-utils")))] use tokio::runtime::Handle; -use tokio::sync::oneshot; use tracing::{error, info, warn}; +const RENDEZVOUS_NAMESPACE: &str = "homestar"; + /// A [Receipt] captured (inner) event. +#[allow(dead_code)] #[derive(Debug, Clone)] pub(crate) struct Captured { /// The captured receipt. @@ -91,7 +97,7 @@ pub(crate) enum Event { #[cfg(feature = "websocket-notify")] ReplayReceipts(Replay), /// General shutdown event. - Shutdown(oneshot::Sender<()>), + Shutdown(AsyncChannelSender<()>), /// Find a [Record] in the DHT, e.g. a [Receipt]. /// /// [Record]: libp2p::kad::Record @@ -111,23 +117,35 @@ pub(crate) enum Event { RegisterPeer(PeerId), /// Discover peers from a rendezvous node. DiscoverPeers(PeerId), + /// Dynamically get listeners for the swarm. + GetListeners(AsyncChannelSender>), } -const RENDEZVOUS_NAMESPACE: &str = "homestar"; - +#[allow(unreachable_patterns)] impl Event { async fn handle_info(self, event_handler: &mut EventHandler) -> Result<()> where DB: Database, { match self { + Event::CapturedReceipt(captured) => { + let _ = captured.publish_and_notify(event_handler); + } Event::Shutdown(tx) => { - info!("event_handler server shutting down"); + info!( + subject = "shutdown", + category = "handle_event", + "event_handler server shutting down" + ); event_handler.shutdown().await; - let _ = tx.send(()); + let _ = tx.send_async(()).await; + } + Event::GetListeners(tx) => { + let listeners = event_handler.swarm.listeners().cloned().collect(); + let _ = tx.send_async(listeners).await; } - Event::FindRecord(record) => record.find(event_handler), - Event::RemoveRecord(record) => record.remove(event_handler), + Event::FindRecord(record) => record.find(event_handler).await, + Event::RemoveRecord(record) => record.remove(event_handler).await, Event::OutboundRequest(PeerRequest { peer, request, @@ -143,7 +161,7 @@ impl Event { .request_response_senders .insert(request_id, (request, sender)); } - Event::GetProviders(record) => record.get_providers(event_handler), + Event::GetProviders(record) => record.get_providers(event_handler).await, Event::ProvideRecord(cid, sender, capsule_tag) => { let query_id = event_handler .swarm @@ -169,7 +187,12 @@ impl Event { } } Event::Providers(Err(err)) => { - error!("failed to find providers: {}", err); + info!( + subject = "libp2p.providers.err", + category = "handle_event", + err=?err, + "failed to find providers", + ); } Event::RegisterPeer(peer_id) => { if let Some(rendezvous_client) = event_handler @@ -185,6 +208,8 @@ impl Event { Some(event_handler.rendezvous.registration_ttl.as_secs()), ) { warn!( + subject = "libp2p.register.rendezvous.err", + category = "handle_event", peer_id = peer_id.to_string(), err = format!("{err}"), "failed to register with rendezvous peer" @@ -229,7 +254,8 @@ impl Captured { } } - fn store_and_notify( + #[allow(dead_code)] + fn publish_and_notify( mut self, event_handler: &mut EventHandler, ) -> Result<(Cid, InvocationReceipt)> @@ -238,36 +264,60 @@ impl Captured { { let receipt = Db::find_receipt_by_cid(self.receipt, &mut event_handler.db.conn()?)?; let invocation_receipt = InvocationReceipt::from(&receipt); - let invocation_notification = invocation_receipt.clone(); let instruction_bytes = receipt.instruction_cid_as_bytes(); let receipt_cid = receipt.cid(); - #[cfg(all(feature = "websocket-server", feature = "websocket-notify"))] + #[cfg(feature = "websocket-notify")] { - let ws_tx = event_handler.ws_sender(); - let metadata = self.metadata.to_owned(); - let receipt = NotifyReceipt::with(invocation_notification, receipt_cid, metadata); - if let Ok(json) = receipt.to_json() { - info!( - cid = receipt_cid.to_string(), - "Sending receipt to websocket" - ); - let _ = ws_tx.notify(json); - } + emit_receipt( + event_handler.ws_workflow_sender(), + &receipt, + self.metadata.to_owned(), + ) + } + + // short-circuit if no peers + // + // - don't gossip receipt + // - don't store receipt or workflow info on DHT + if event_handler.connections.peers.is_empty() { + return Ok((self.receipt, invocation_receipt)); } if event_handler.pubsub_enabled { match event_handler.swarm.behaviour_mut().gossip_publish( pubsub::RECEIPTS_TOPIC, - TopicMessage::CapturedReceipt(receipt), + TopicMessage::CapturedReceipt(pubsub::Message::new(receipt.clone())), ) { - Ok(msg_id) => info!( - "message {msg_id} published on {} topic for receipt with cid: {receipt_cid}", - pubsub::RECEIPTS_TOPIC - ), - Err(_err) => { - error!( - "message not published on {} topic for receipt with cid: {receipt_cid}", + Ok(msg_id) => { + info!( + subject = "libp2p.gossip.publish", + category = "publish_event", + cid = receipt_cid.to_string(), + message_id = msg_id.to_string(), + "message published on {} topic for receipt with cid: {receipt_cid}", + pubsub::RECEIPTS_TOPIC + ); + + #[cfg(feature = "websocket-notify")] + notification::emit_event( + event_handler.ws_evt_sender(), + EventNotificationTyp::SwarmNotification( + SwarmNotification::PublishedReceiptPubsub, + ), + btreemap! { + "cid" => receipt.cid().to_string(), + "ran" => receipt.ran().to_string() + }, + ); + } + Err(err) => { + warn!( + subject = "libp2p.gossip.publish.err", + category = "publish_event", + cid = receipt_cid.to_string(), + err=?err, + "message not published on {} topic for receipt", pubsub::RECEIPTS_TOPIC ) } @@ -295,7 +345,12 @@ impl Captured { Record::new(instruction_bytes, receipt_bytes.to_vec()), receipt_quorum, ) - .map_err(|err| warn!(err=?err, "receipt not PUT on dht")); + .map_err(|err| { + warn!(subject = "libp2p.put_record.err", + category = "publish_event", + err=?err, + "receipt not PUT onto DHT") + }); Arc::make_mut(&mut self.workflow).increment_progress(receipt_cid); let workflow_cid_bytes = self.workflow.cid_as_bytes(); @@ -308,15 +363,27 @@ impl Captured { Record::new(workflow_cid_bytes, workflow_bytes), workflow_quorum, ) - .map_err(|err| warn!(err=?err, "workflow information not PUT on dht")); + .map_err(|err| { + warn!(subject = "libp2p.put_record.err", + category = "publish_event", + err=?err, + "workflow information not PUT onto DHT") + }); } else { error!( - "cannot convert workflow information {} to bytes", - self.workflow.cid() + subject = "libp2p.put_record.err", + category = "publish_event", + cid = self.workflow.cid().to_string(), + "cannot convert workflow information to bytes", ); } } else { - error!("cannot convert receipt {receipt_cid} to bytes"); + error!( + subject = "libp2p.put_record.err", + category = "publish_event", + cid = receipt_cid.to_string(), + "cannot convert receipt to bytes" + ); } Ok((self.receipt, invocation_receipt)) @@ -332,7 +399,7 @@ impl Replay { Self { pointers, metadata } } - fn notify(self, event_handler: &EventHandler) -> Result<()> + fn notify(self, event_handler: &mut EventHandler) -> Result<()> where DB: Database, { @@ -353,23 +420,58 @@ impl Replay { self.pointers.iter().collect::>() ); - receipts.into_iter().for_each(|receipt| { - let invocation_receipt = InvocationReceipt::from(&receipt); - let invocation_notification = invocation_receipt; - let receipt_cid = receipt.cid(); - - let ws_tx = event_handler.ws_sender(); - let metadata = self.metadata.to_owned(); - let receipt = NotifyReceipt::with(invocation_notification, receipt_cid, metadata); - if let Ok(json) = receipt.to_json() { - info!( - cid = receipt_cid.to_string(), - "Sending receipt to websocket" - ); - let _ = ws_tx.notify(json); - } + #[cfg(feature = "websocket-notify")] + receipts.iter().for_each(|receipt| { + emit_receipt( + event_handler.ws_workflow_sender(), + receipt, + self.metadata.to_owned(), + ); }); + // gossiping replayed receipts + if event_handler.pubsub_enabled { + receipts.into_iter().for_each(|receipt| { + let receipt_cid = receipt.cid().to_string(); + let _ = event_handler + .swarm + .behaviour_mut() + .gossip_publish( + pubsub::RECEIPTS_TOPIC, + TopicMessage::CapturedReceipt(pubsub::Message::new(receipt.clone())), + ) + .map(|msg_id| { + info!( + subject = "libp2p.gossip.publish.replay", + category = "publish_event", + cid = receipt_cid, + message_id = msg_id.to_string(), + "message published on {} topic for receipt with cid: {receipt_cid}", + pubsub::RECEIPTS_TOPIC + ); + + #[cfg(feature = "websocket-notify")] + notification::emit_event( + event_handler.ws_evt_sender(), + EventNotificationTyp::SwarmNotification( + SwarmNotification::PublishedReceiptPubsub, + ), + btreemap! { + "cid" => receipt.cid().to_string(), + "ran" => receipt.ran().to_string() + }, + ); + }) + .map_err(|err| { + warn!( + subject = "libp2p.gossip.publish.replay.err", + category = "publish_event", + err=?err, + cid=receipt_cid, + "message not published on {} topic for receipt", pubsub::RECEIPTS_TOPIC) + }); + }); + } Ok(()) } } @@ -384,10 +486,18 @@ impl QueryRecord { } } - fn find(self, event_handler: &mut EventHandler) + async fn find(self, event_handler: &mut EventHandler) where DB: Database, { + if event_handler.connections.peers.is_empty() { + if let Some(sender) = self.sender { + let _ = sender.send_async(ResponseEvent::NoPeersAvailable).await; + } + + return; + } + let id = event_handler .swarm .behaviour_mut() @@ -398,10 +508,18 @@ impl QueryRecord { event_handler.query_senders.insert(id, (key, self.sender)); } - fn remove(self, event_handler: &mut EventHandler) + async fn remove(self, event_handler: &mut EventHandler) where DB: Database, { + if event_handler.connections.peers.is_empty() { + if let Some(sender) = self.sender { + let _ = sender.send_async(ResponseEvent::NoPeersAvailable).await; + } + + return; + } + event_handler .swarm .behaviour_mut() @@ -415,10 +533,18 @@ impl QueryRecord { .stop_providing(&Key::new(&self.cid.to_bytes())); } - fn get_providers(self, event_handler: &mut EventHandler) + async fn get_providers(self, event_handler: &mut EventHandler) where DB: Database, { + if event_handler.connections.peers.is_empty() { + if let Some(sender) = self.sender { + let _ = sender.send_async(ResponseEvent::NoPeersAvailable).await; + } + + return; + } + let id = event_handler .swarm .behaviour_mut() @@ -449,48 +575,79 @@ where #[cfg(not(feature = "ipfs"))] async fn handle_event(self, event_handler: &mut EventHandler) { if let Err(err) = self.handle_info(event_handler).await { - error!(error=?err, "error storing event") + error!(subject = "handle.err", + category = "handle_event", + error=?err, + "error storing event") } } #[cfg(feature = "ipfs")] #[cfg_attr(docsrs, doc(cfg(feature = "ipfs")))] + #[allow(unused_variables)] async fn handle_event(self, event_handler: &mut EventHandler, ipfs: IpfsCli) { match self { Event::CapturedReceipt(captured) => { - let _ = captured - .store_and_notify(event_handler) - .map(|(cid, receipt)| { - // Spawn client call in the background, without awaiting. - let handle = Handle::current(); - let ipfs = ipfs.clone(); - handle.spawn(async move { - if let Ok(bytes) = receipt.try_into() { - match ipfs.put_receipt_bytes(bytes).await { - Ok(put_cid) => { - info!(cid = put_cid, "IPLD DAG node stored"); - #[cfg(debug_assertions)] - debug_assert_eq!(put_cid, cid.to_string()); - } - Err(err) => { - warn!(error=?err, cid=cid.to_string(), "failed to store IPLD DAG node"); + if let Ok((cid, receipt)) = captured.publish_and_notify(event_handler) { + #[cfg(not(feature = "test-utils"))] + { + // Spawn client call in the background, without awaiting. + let handle = Handle::current(); + let ipfs = ipfs.clone(); + handle.spawn(async move { + if let Ok(bytes) = receipt.try_into() { + match ipfs.put_receipt_bytes(bytes).await { + Ok(put_cid) => { + info!( + subject = "ipfs.put.receipt", + category = "handle_event", + cid = put_cid, + "IPLD DAG node stored" + ); + #[cfg(debug_assertions)] + debug_assert_eq!(put_cid, cid.to_string()); + } + Err(err) => { + warn!(subject = "ipfs.put.receipt.err", + category = "handle_event", + error=?err, + cid=cid.to_string(), + "failed to store IPLD DAG node"); + } } + } else { + warn!( + subject = "ipfs.put.receipt.err", + category = "handle_event", + cid = cid.to_string(), + "failed to convert receipt to bytes" + ); } - } else { - warn!(cid=cid.to_string(), "failed to convert receipt to bytes"); - } - }) - }); + }); + } + } else { + error!( + subject = "ipfs.put.receipt.err", + category = "handle_event", + "failed to capture receipt" + ); + } } #[cfg(feature = "websocket-notify")] Event::ReplayReceipts(replay) => { if let Err(err) = replay.notify(event_handler) { - error!(error=?err, "error notifying receipts") + error!(subject = "replay.err", + category = "handle_event", + error=?err, + "error replaying and notifying receipts") } } event => { if let Err(err) = event.handle_info(event_handler).await { - error!(error=?err, "error storing event") + error!(subject = "event.err", + category = "handle_event", + error=?err, + "error storing event") } } } diff --git a/homestar-runtime/src/event_handler/notification.rs b/homestar-runtime/src/event_handler/notification.rs new file mode 100644 index 00000000..f074c07c --- /dev/null +++ b/homestar-runtime/src/event_handler/notification.rs @@ -0,0 +1,266 @@ +//! Evented notifications emitted to clients. + +use crate::{ + network::webserver::{ + notifier::{self, Header, Message, Notifier, SubscriptionTyp}, + SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + }, + Receipt, +}; +use anyhow::anyhow; +use chrono::prelude::Utc; +use homestar_core::{ + ipld::DagJson, + workflow::{ + receipt::metadata::{WORKFLOW_KEY, WORKFLOW_NAME_KEY}, + Receipt as InvocationReceipt, + }, +}; +use libipld::{serde::from_ipld, Ipld}; +use serde::{Deserialize, Serialize}; +use std::{collections::BTreeMap, fmt, str::FromStr}; +use tracing::{debug, warn}; + +pub(crate) mod receipt; +pub(crate) mod swarm; +pub(crate) use receipt::ReceiptNotification; +pub(crate) use swarm::SwarmNotification; + +const TYPE_KEY: &str = "type"; +const DATA_KEY: &str = "data"; +const TIMESTAMP_KEY: &str = "timestamp"; + +/// Send receipt notification as bytes. +pub(crate) fn emit_receipt( + notifier: Notifier, + receipt: &Receipt, + metadata: Option, +) { + let invocation_receipt = InvocationReceipt::from(receipt); + let receipt_cid = receipt.cid(); + let notification = ReceiptNotification::with(invocation_receipt, receipt_cid, metadata.clone()); + + if let Ok(json) = notification.to_json() { + debug!( + subject = "notification.receipt", + category = "notification", + cid = receipt_cid.to_string(), + "emitting receipt to WebSocket" + ); + if let Some(ipld) = metadata { + match (ipld.get(WORKFLOW_KEY), ipld.get(WORKFLOW_NAME_KEY)) { + (Ok(Ipld::Link(cid)), Ok(Ipld::String(name))) => { + let header = + Header::new(SubscriptionTyp::Cid(*cid), Some((name.to_string()).into())); + let _ = notifier.notify(Message::new(header, json)); + } + (Ok(Ipld::Link(cid)), Err(_err)) => { + let header = Header::new(SubscriptionTyp::Cid(*cid), None); + let _ = notifier.notify(Message::new(header, json)); + } + _ => (), + } + } + } else { + warn!( + subject = "notification.err", + category = "notification", + cid = receipt_cid.to_string(), + "unable to serialize receipt notification as bytes" + ); + } +} + +/// Send event notification as bytes. +pub(crate) fn emit_event( + notifier: Notifier, + ty: EventNotificationTyp, + data: BTreeMap<&str, String>, +) { + let header = Header::new( + SubscriptionTyp::EventSub(SUBSCRIBE_NETWORK_EVENTS_ENDPOINT.to_string()), + None, + ); + let notification = EventNotification::new(ty, data); + + if let Ok(json) = notification.to_json() { + let _ = notifier.notify(Message::new(header, json)); + } else { + warn!( + subject = "notification.err", + category = "notification", + "unable to serialize event notification as bytes: {}", + notification.typ + ); + } +} + +/// Notification sent to clients. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub(crate) struct EventNotification { + typ: EventNotificationTyp, + data: Ipld, + timestamp: i64, +} + +impl EventNotification { + pub(crate) fn new(typ: EventNotificationTyp, data: BTreeMap<&str, String>) -> Self { + let ipld_data = data + .iter() + .map(|(key, val)| (key.to_string(), Ipld::String(val.to_owned()))) + .collect(); + + Self { + typ, + data: Ipld::Map(ipld_data), + timestamp: Utc::now().timestamp_millis(), + } + } +} + +impl DagJson for EventNotification where Ipld: From {} + +impl From for Ipld { + fn from(notification: EventNotification) -> Self { + Ipld::Map(BTreeMap::from([ + ("type".into(), notification.typ.into()), + ("data".into(), notification.data), + ("timestamp".into(), notification.timestamp.into()), + ])) + } +} + +impl TryFrom for EventNotification { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let typ: EventNotificationTyp = map + .get(TYPE_KEY) + .ok_or_else(|| anyhow!("missing {TYPE_KEY}"))? + .to_owned() + .try_into()?; + + let data = map + .get(DATA_KEY) + .ok_or_else(|| anyhow!("missing {DATA_KEY}"))? + .to_owned(); + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + Ok(EventNotification { + typ, + data, + timestamp, + }) + } +} + +/// Types of notification sent to clients. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub(crate) enum EventNotificationTyp { + SwarmNotification(SwarmNotification), +} + +impl fmt::Display for EventNotificationTyp { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + EventNotificationTyp::SwarmNotification(subtype) => { + write!(f, "swarm notification: {}", subtype) + } + } + } +} + +impl DagJson for EventNotificationTyp where Ipld: From {} + +impl From for Ipld { + fn from(typ: EventNotificationTyp) -> Self { + match typ { + EventNotificationTyp::SwarmNotification(subtype) => { + Ipld::String(format!("network:{}", subtype)) + } + } + } +} + +impl TryFrom for EventNotificationTyp { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + if let Some((ty, subtype)) = from_ipld::(ipld)?.split_once(':') { + match ty { + "network" => Ok(EventNotificationTyp::SwarmNotification( + SwarmNotification::from_str(subtype)?, + )), + _ => Err(anyhow!("Missing event notification type: {}", ty)), + } + } else { + Err(anyhow!( + "Event notification type missing colon delimiter between type and subtype." + )) + } + } +} + +#[cfg(test)] +mod test { + use super::*; + use libp2p::PeerId; + use maplit::btreemap; + + #[test] + fn notification_bytes_rountrip() { + let peer_id = PeerId::random().to_string(); + let address: String = "/ip4/127.0.0.1/tcp/7000".to_string(); + + let notification = EventNotification::new( + EventNotificationTyp::SwarmNotification(SwarmNotification::ConnnectionEstablished), + btreemap! { + "peer_id" => peer_id.clone(), + "address" => address.clone() + }, + ); + let bytes = notification.to_json().unwrap(); + + let parsed = EventNotification::from_json(bytes.as_ref()).unwrap(); + let data: BTreeMap = from_ipld(parsed.data).unwrap(); + + assert_eq!( + parsed.typ, + EventNotificationTyp::SwarmNotification(SwarmNotification::ConnnectionEstablished) + ); + assert_eq!(data.get("peer_id").unwrap(), &peer_id); + assert_eq!(data.get("address").unwrap(), &address); + } + + #[test] + fn notification_json_string_rountrip() { + let peer_id = PeerId::random().to_string(); + let address: String = "/ip4/127.0.0.1/tcp/7000".to_string(); + + let notification = EventNotification::new( + EventNotificationTyp::SwarmNotification(SwarmNotification::ConnnectionEstablished), + btreemap! { + "peer_id" => peer_id.clone(), + "address" => address.clone() + }, + ); + let json_string = notification.to_json_string().unwrap(); + + let parsed = EventNotification::from_json_string(json_string).unwrap(); + let data: BTreeMap = from_ipld(parsed.data).unwrap(); + + assert_eq!( + parsed.typ, + EventNotificationTyp::SwarmNotification(SwarmNotification::ConnnectionEstablished) + ); + assert_eq!(data.get("peer_id").unwrap(), &peer_id); + assert_eq!(data.get("address").unwrap(), &address); + } +} diff --git a/homestar-runtime/src/event_handler/notification/receipt.rs b/homestar-runtime/src/event_handler/notification/receipt.rs new file mode 100644 index 00000000..8ca58583 --- /dev/null +++ b/homestar-runtime/src/event_handler/notification/receipt.rs @@ -0,0 +1,47 @@ +//! Notification receipts. + +use homestar_core::{ipld::DagJson, workflow::Receipt}; +use libipld::{ipld, Cid, Ipld}; + +/// A [Receipt] that is sent out for websocket notifications. +#[derive(Debug, Clone, PartialEq)] +pub(crate) struct ReceiptNotification(Ipld); + +impl ReceiptNotification { + /// Obtain a reference to the inner [Ipld] value. + #[allow(dead_code)] + pub(crate) fn inner(&self) -> &Ipld { + &self.0 + } + + /// Obtain ownership of the inner [Ipld] value. + #[allow(dead_code)] + pub(crate) fn into_inner(self) -> Ipld { + self.0.to_owned() + } + + /// Create a new [ReceiptNotification]. + pub(crate) fn with(receipt: Receipt, cid: Cid, metadata: Option) -> Self { + let receipt: Ipld = receipt.into(); + let data = ipld!({ + "receipt": receipt, + "metadata": metadata.as_ref().map(|m| m.to_owned()).map_or(Ipld::Null, |m| m), + "receipt_cid": cid, + }); + ReceiptNotification(data) + } +} + +impl DagJson for ReceiptNotification where Ipld: From {} + +impl From for Ipld { + fn from(receipt: ReceiptNotification) -> Self { + receipt.0 + } +} + +impl From for ReceiptNotification { + fn from(ipld: Ipld) -> Self { + ReceiptNotification(ipld) + } +} diff --git a/homestar-runtime/src/event_handler/notification/swarm.rs b/homestar-runtime/src/event_handler/notification/swarm.rs new file mode 100644 index 00000000..d5aeda46 --- /dev/null +++ b/homestar-runtime/src/event_handler/notification/swarm.rs @@ -0,0 +1,58 @@ +// Notification types for [swarm] events. +// +// [swarm]: libp2p_swarm::Swarm + +use anyhow::anyhow; +use serde::{Deserialize, Serialize}; +use std::{fmt, str::FromStr}; + +// Swarm notification types sent to clients +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub(crate) enum SwarmNotification { + ConnnectionEstablished, + ConnnectionClosed, + ListeningOn, + OutgoingConnectionError, + IncomingConnectionError, + PublishedReceiptPubsub, + ReceivedReceiptPubsub, +} + +impl fmt::Display for SwarmNotification { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + SwarmNotification::ConnnectionEstablished => write!(f, "connectionEstablished"), + SwarmNotification::ConnnectionClosed => write!(f, "connectionClosed"), + SwarmNotification::ListeningOn => write!(f, "listeningOn"), + SwarmNotification::OutgoingConnectionError => { + write!(f, "outgoingConnectionError") + } + SwarmNotification::IncomingConnectionError => { + write!(f, "incomingConnectionError") + } + SwarmNotification::ReceivedReceiptPubsub => { + write!(f, "receivedReceiptPubsub") + } + SwarmNotification::PublishedReceiptPubsub => { + write!(f, "publishedReceiptPubsub") + } + } + } +} + +impl FromStr for SwarmNotification { + type Err = anyhow::Error; + + fn from_str(ty: &str) -> Result { + match ty { + "connectionEstablished" => Ok(Self::ConnnectionEstablished), + "connectionClosed" => Ok(Self::ConnnectionClosed), + "listeningOn" => Ok(Self::ListeningOn), + "outgoingConnectionError" => Ok(Self::OutgoingConnectionError), + "incomingConnectionError" => Ok(Self::IncomingConnectionError), + "receivedReceiptPubsub" => Ok(Self::ReceivedReceiptPubsub), + "publishedReceiptPubsub" => Ok(Self::PublishedReceiptPubsub), + _ => Err(anyhow!("Missing swarm notification type: {}", ty)), + } + } +} diff --git a/homestar-runtime/src/event_handler/swarm_event.rs b/homestar-runtime/src/event_handler/swarm_event.rs index 3bed8c4a..efa6abef 100644 --- a/homestar-runtime/src/event_handler/swarm_event.rs +++ b/homestar-runtime/src/event_handler/swarm_event.rs @@ -1,6 +1,8 @@ //! Internal libp2p [SwarmEvent] handling and [Handler] implementation. use super::EventHandler; +#[cfg(feature = "websocket-notify")] +use crate::event_handler::notification::{self, EventNotificationTyp, SwarmNotification}; #[cfg(feature = "ipfs")] use crate::network::IpfsCli; use crate::{ @@ -11,8 +13,11 @@ use crate::{ Event, Handler, RequestResponseError, }, libp2p::multiaddr::MultiaddrExt, - network::swarm::{ - CapsuleTag, ComposedEvent, PeerDiscoveryInfo, RequestResponseKey, HOMESTAR_PROTOCOL_VER, + network::{ + pubsub, + swarm::{ + CapsuleTag, ComposedEvent, PeerDiscoveryInfo, RequestResponseKey, HOMESTAR_PROTOCOL_VER, + }, }, receipt::{RECEIPT_TAG, VERSION_KEY}, workflow, @@ -39,6 +44,8 @@ use libp2p::{ swarm::{dial_opts::DialOpts, SwarmEvent}, PeerId, StreamProtocol, }; +#[cfg(feature = "websocket-notify")] +use maplit::btreemap; use std::{ collections::{HashMap, HashSet}, fmt, @@ -54,7 +61,9 @@ const RENDEZVOUS_NAMESPACE: &str = "homestar"; pub(crate) enum ResponseEvent { /// Found [PeerRecord] on the DHT. Found(Result), - /// Found Providers/[PeerId]s on the DHT. + /// No peers available to network with on the DHT. + NoPeersAvailable, + /// Found providers/[PeerId]s on the DHT. Providers(Result>), } @@ -106,17 +115,33 @@ async fn handle_swarm_event( SwarmEvent::Behaviour(ComposedEvent::Identify(identify_event)) => { match identify_event { identify::Event::Error { peer_id, error } => { - warn!(peer_id=peer_id.to_string(), err=?error, "error while attempting to identify the remote") + warn!(subject = "libp2p.identify.err", + category = "handle_swarm_event", + peer_id=peer_id.to_string(), + err=?error, + "error while attempting to identify the remote") } identify::Event::Sent { peer_id } => { - debug!(peer_id = peer_id.to_string(), "sent identify info to peer") + debug!( + subject = "libp2p.identify.sent", + category = "handle_swarm_event", + peer_id = peer_id.to_string(), + "sent identify info to peer" + ) } identify::Event::Received { peer_id, info } => { - debug!(peer_id=peer_id.to_string(), info=?info, "identify info received from peer"); + debug!(subject = "libp2p.identify.recv", + category = "handle_swarm_event", + peer_id=peer_id.to_string(), + info=?info, + "identify info received from peer"); // Ignore peers that do not use the Homestar protocol if info.protocol_version != HOMESTAR_PROTOCOL_VER { - info!(protocol_version=info.protocol_version, "peer was not using our homestar protocol version: {HOMESTAR_PROTOCOL_VER}"); + debug!(subject ="libp2p.identify.recv", + category="handle_swarm_event", + protocol_version=info.protocol_version, + "peer was not using our homestar protocol version: {HOMESTAR_PROTOCOL_VER}"); return; } @@ -136,7 +161,6 @@ async fn handle_swarm_event( .all(|proto| !proto.is_private()) // Identify observed a potentially valid external address that we weren't aware of. // Add it to the addresses we announce to other peers. - // TODO: have a set of _maybe_ external addresses that we validate with other peers first before adding it .then(|| event_handler.swarm.add_external_address(info.observed_addr)); } @@ -147,6 +171,8 @@ async fn handle_swarm_event( for addr in info.listen_addrs { behavior.kademlia.add_address(&peer_id, addr); debug!( + subject = "libp2p.identify.recv", + category = "handle_swarm_event", peer_id = peer_id.to_string(), "added identified node to kademlia routing table" ); @@ -168,6 +194,8 @@ async fn handle_swarm_event( Some(event_handler.rendezvous.registration_ttl.as_secs()), ) { warn!( + subject = "libp2p.identify.recv", + category = "handle_swarm_event", peer_id = peer_id.to_string(), err = format!("{err}"), "failed to register with rendezvous peer" @@ -185,6 +213,8 @@ async fn handle_swarm_event( } } identify::Event::Pushed { peer_id } => debug!( + subject = "libp2p.identify.pushed", + category = "handle_swarm_event", peer_id = peer_id.to_string(), "pushed identify info to peer" ), @@ -199,6 +229,8 @@ async fn handle_swarm_event( } => { if cookie.namespace() == Some(&Namespace::from_static(RENDEZVOUS_NAMESPACE)) { debug!( + subject = "libp2p.rendezvous.client.discovered", + category = "handle_swarm_event", peer_id = rendezvous_node.to_string(), "received discovery from rendezvous server" ); @@ -213,7 +245,11 @@ async fn handle_swarm_event( // Skip dialing peers if at connected peers limit if connected_peers_count >= event_handler.connections.max_peers as usize { - warn!("peers discovered through rendezvous not dialed because max connected peers limit reached"); + debug!( + subject = "libp2p.rendezvous.client.discovered.err", + category = "handle_swarm_event", + "peers discovered not dialed because max connected peers limit reached" + ); return; } @@ -254,14 +290,18 @@ async fn handle_swarm_event( ); } Err(err) => { - warn!(peer_id=peer_id.to_string(), err=?err, "failed to dial peer discovered through rendezvous"); + warn!(subject = "libp2p.rendezvous.client.discovered.err", + category = "handle_swarm_event", + peer_id=peer_id.to_string(), + err=?err, + "failed to dial discovered peer"); } }; } else if !self_registration { - warn!( - peer_id=registration.record.peer_id().to_string(), - "peer discovered through rendezvous not dialed because the max connected peers limit was reached" - ) + debug!(subject = "libp2p.rendezvous.client.discovered.err", + category = "handle_swarm_event", + peer_id=registration.record.peer_id().to_string(), + "peer discovered not dialed because the max connected peers limit was reached") } } @@ -289,22 +329,31 @@ async fn handle_swarm_event( .await; } else { // Do not dial peers that are not using our namespace - warn!(peer_id=rendezvous_node.to_string(), namespace=?cookie.namespace(), "rendezvous peer gave records from an unexpected namespace"); + debug!(subject = "libp2p.rendezvous.client.discovered.err", + category = "handle_swarm_event", + peer_id=rendezvous_node.to_string(), + namespace=?cookie.namespace(), + "rendezvous peer gave records from an unexpected namespace"); } } rendezvous::client::Event::DiscoverFailed { rendezvous_node, error, .. - } => { - error!(peer_id=rendezvous_node.to_string(), err=?error, "failed to discover peers from rendezvous peer") - } + } => warn!(subject = "libp2p.rendezvous.client.discovered.err", + category = "handle_swarm_event", + peer_id=rendezvous_node.to_string(), + err=?error, + "failed to discover peers"), + rendezvous::client::Event::Registered { rendezvous_node, ttl, .. } => { debug!( + subject = "libp2p.rendezvous.client.registered", + category = "handle_swarm_event", peer_id = rendezvous_node.to_string(), ttl = ttl, "registered self with rendezvous node" @@ -335,7 +384,11 @@ async fn handle_swarm_event( error, .. } => { - error!(peer_id=rendezvous_node.to_string(), err=?error, "failed to register self with rendezvous peer") + warn!(subject = "libp2p.rendezvous.client.registered.err", + category = "handle_swarm_event", + peer_id=rendezvous_node.to_string(), + err=?error, + "failed to register self with rendezvous peer") } rendezvous::client::Event::Expired { peer } => { // re-discover records from peer @@ -364,14 +417,22 @@ async fn handle_swarm_event( SwarmEvent::Behaviour(ComposedEvent::RendezvousServer(rendezvous_server_event)) => { match rendezvous_server_event { rendezvous::server::Event::DiscoverServed { enquirer, .. } => debug!( + subject = "libp2p.rendezvous.server.discover", + category = "handle_swarm_event", peer_id = enquirer.to_string(), "served rendezvous discover request to peer" ), rendezvous::server::Event::DiscoverNotServed { enquirer, error } => { - warn!(peer_id=enquirer.to_string(), err=?error, "did not serve rendezvous discover request") + warn!(subject = "libp2p.rendezvous.server.discover.err", + category = "handle_swarm_event", + peer_id=enquirer.to_string(), + err=?error, + "did not serve rendezvous discover request") } rendezvous::server::Event::PeerRegistered { peer, .. } => { debug!( + subject = "libp2p.rendezvous.server.peer_registered", + category = "handle_swarm_event", peer_id = peer.to_string(), "registered peer through rendezvous" ) @@ -381,10 +442,17 @@ async fn handle_swarm_event( namespace, error, } => { - warn!(peer_id=peer.to_string(), err=?error, namespace=?namespace, "did not register peer with rendezvous") + debug!(subject = "libp2p.rendezvous.server.peer_registered.err", + category = "handle_swarm_event", + peer_id=peer.to_string(), + err=?error, + namespace=?namespace, + "did not register peer with rendezvous") } rendezvous::server::Event::RegistrationExpired(registration) => { debug!( + subject = "libp2p.rendezvous.server.registration_expired", + category = "handle_swarm_event", peer_id = registration.record.peer_id().to_string(), "rendezvous peer registration expired on server" ) @@ -397,27 +465,53 @@ async fn handle_swarm_event( message, propagation_source, message_id, - } => match Receipt::try_from(message.data) { - // TODO: dont fail blindly if we get a non receipt message - Ok(receipt) => { - info!("got message: {receipt} from {propagation_source} with message id: {message_id}"); + } => { + let bytes: Vec = message.data; + match pubsub::Message::::try_from(bytes) { + Ok(msg) => { + let receipt = msg.payload; + info!( + subject = "libp2p.gossipsub.recv", + category = "handle_swarm_event", + peer_id = propagation_source.to_string(), + message_id = message_id.to_string(), + "message received on receipts topic: {}", + receipt.cid() + ); - // Store gossiped receipt. - let _ = event_handler - .db - .conn() - .as_mut() - .map(|conn| Db::store_receipt(receipt, conn)); + // Store gossiped receipt. + let _ = event_handler + .db + .conn() + .as_mut() + .map(|conn| Db::store_receipt(receipt.clone(), conn)); + + #[cfg(feature = "websocket-notify")] + notification::emit_event( + event_handler.ws_evt_sender(), + EventNotificationTyp::SwarmNotification( + SwarmNotification::ReceivedReceiptPubsub, + ), + btreemap! { + "peerId" => propagation_source.to_string(), + "cid" => receipt.cid().to_string(), + "ran" => receipt.ran().to_string() + }, + ); + } + Err(err) => debug!(subject = "libp2p.gossipsub.err", + category = "handle_swarm_event", + err=?err, + "cannot handle incoming gossipsub message"), } - Err(err) => info!(err=?err, "cannot handle incoming event message"), - }, - gossipsub::Event::Subscribed { peer_id, topic } => { - debug!( - peer_id = peer_id.to_string(), - topic = topic.to_string(), - "subscribed to topic over gossipsub" - ) } + gossipsub::Event::Subscribed { peer_id, topic } => debug!( + subject = "libp2p.gossipsub.subscribed", + category = "handle_swarm_event", + peer_id = peer_id.to_string(), + topic = topic.to_string(), + "subscribed to topic over gossipsub" + ), _ => {} }, @@ -427,9 +521,11 @@ async fn handle_swarm_event( .. })) => { match result { - QueryResult::Bootstrap(Ok(BootstrapOk { peer, .. })) => { - debug!("successfully bootstrapped peer: {peer}") - } + QueryResult::Bootstrap(Ok(BootstrapOk { peer, .. })) => debug!( + subject = "libp2p.kad.bootstrap", + category = "handle_swarm_event", + "successfully bootstrapped peer: {peer}" + ), QueryResult::GetProviders(Ok(GetProvidersOk::FoundProviders { key: _, providers, @@ -457,7 +553,10 @@ async fn handle_swarm_event( } QueryResult::GetProviders(Err(err)) => { - warn!(err=?err, "error retrieving outbound query providers"); + warn!(subject = "libp2p.kad.get_providers.err", + category = "handle_swarm_event", + err=?err, + "error retrieving outbound query providers"); let Some((_, sender)) = event_handler.query_senders.remove(&id) else { return; @@ -471,8 +570,11 @@ async fn handle_swarm_event( } QueryResult::GetRecord(Ok(GetRecordOk::FoundRecord(peer_record))) => { debug!( + subject = "libp2p.kad.get_record.err", + category = "handle_swarm_event", "found record {:#?}, published by {:?}", - peer_record.record.key, peer_record.record.publisher + peer_record.record.key, + peer_record.record.publisher ); match peer_record.found_record() { Ok(event) => { @@ -485,8 +587,10 @@ async fn handle_swarm_event( } } Err(err) => { - warn!(err=?err, "error retrieving record"); - + warn!(subject = "libp2p.kad.get_record.err", + category = "handle_swarm_event", + err=?err, + "error retrieving record"); let Some((_, sender)) = event_handler.query_senders.remove(&id) else { return; }; @@ -499,7 +603,10 @@ async fn handle_swarm_event( } QueryResult::GetRecord(Ok(_)) => {} QueryResult::GetRecord(Err(err)) => { - warn!(err=?err, "error retrieving record"); + warn!(subject = "libp2p.kad.get_record.err", + category = "handle_swarm_event", + err=?err, + "error retrieving record"); // Upon an error, attempt to find the record on the DHT via // a provider if it's a Workflow/Info one. @@ -531,39 +638,67 @@ async fn handle_swarm_event( )))) .await; } else { - warn!("not a valid provider record tag: {capsule_tag}",) + warn!( + subject = "libp2p.kad.req_resp.err", + category = "handle_swarm_event", + "not a valid provider record tag: {capsule_tag}", + ) } } - None => { - info!("No provider found for outbound query {id:?}") - } + None => debug!( + subject = "libp2p.kad.req_resp.err", + category = "handle_swarm_event", + "No provider found for outbound query {id:?}" + ), } } - QueryResult::PutRecord(Ok(PutRecordOk { key })) => { - debug!("successfully put record {key:#?}"); - } - QueryResult::PutRecord(Err(err)) => { - warn!("error putting record: {err}") - } + QueryResult::PutRecord(Ok(PutRecordOk { key })) => debug!( + subject = "libp2p.kad.put_record", + category = "handle_swarm_event", + "successfully put record {key:#?}" + ), + QueryResult::PutRecord(Err(err)) => warn!( + subject = "libp2p.kad.put_record.err", + category = "handle_swarm_event", + err=?err, + "error putting record"), QueryResult::StartProviding(Ok(AddProviderOk { key })) => { // Currently, we don't send anything to the channel, // once they key is provided. let _ = event_handler.query_senders.remove(&id); - debug!("successfully providing {key:#?}"); + debug!( + subject = "libp2p.kad.provide_record", + category = "handle_swarm_event", + "successfully providing {key:#?}" + ); } QueryResult::StartProviding(Err(err)) => { // Currently, we don't send anything to the channel, // once they key is provided. let _ = event_handler.query_senders.remove(&id); - warn!("error providing key: {:#?}", err.key()); + warn!( + subject = "libp2p.kad.provide_record.err", + category = "handle_swarm_event", + "error providing key: {:#?}", + err.key() + ); } _ => {} } } + SwarmEvent::Behaviour(ComposedEvent::Kademlia(kad::Event::InboundRequest { request })) => { + debug!( + subject = "libp2p.kad.inbound_request", + category = "handle_swarm_event", + "kademlia inbound request received {request:?}" + ) + } SwarmEvent::Behaviour(ComposedEvent::Kademlia(kad::Event::RoutingUpdated { peer, .. })) => { debug!( + subject = "libp2p.kad.routing", + category = "handle_swarm_event", peer = peer.to_string(), "kademlia routing table updated with peer" ) @@ -612,7 +747,11 @@ async fn handle_swarm_event( } } Err(err) => { - warn!(err=?err, cid=?cid, "error retrieving workflow info"); + warn!(subject = "libp2p.req_resp.err", + category = "handle_swarm_event", + err=?err, + cid=?cid, + "error retrieving workflow info"); let _ = event_handler .swarm @@ -653,11 +792,11 @@ async fn handle_swarm_event( let _ = sender.send_async(ResponseEvent::Found(Ok(event))).await; } Err(err) => { - warn!( - err=?err, - cid = key_cid.as_str(), - "error returning capsule for request_id: {request_id}" - ); + warn!(subject = "libp2p.req_resp.resp.err", + category = "handle_swarm_event", + err=?err, + cid = key_cid.as_str(), + "error returning capsule for request_id: {request_id}"); let _ = sender.send_async(ResponseEvent::Found(Err(err))).await; } @@ -668,7 +807,9 @@ async fn handle_swarm_event( }, SwarmEvent::Behaviour(ComposedEvent::Mdns(mdns::Event::Discovered(list))) => { for (peer_id, multiaddr) in list { - info!( + debug!( + subject = "libp2p.mdns.discovered", + category = "handle_swarm_event", peer_id = peer_id.to_string(), addr = multiaddr.to_string(), "mDNS discovered a new peer" @@ -683,9 +824,10 @@ async fn handle_swarm_event( .build(), ); } else { - warn!( - peer_id = peer_id.to_string(), - "peer discovered by mDNS not dialed because max connected peers limit reached" + debug!(subject = "libp2p.mdns.discovered.err", + category = "handle_swarm_event", + peer_id = peer_id.to_string(), + "peer discovered by mDNS not dialed because max connected peers limit reached" ) } } @@ -695,13 +837,17 @@ async fn handle_swarm_event( if let Some(mdns) = behaviour.mdns.as_ref() { for (peer_id, multiaddr) in list { - info!( + debug!( + subject = "libp2p.mdns.expired", + category = "handle_swarm_event", peer_id = peer_id.to_string(), "mDNS discover peer has expired" ); if mdns.has_node(&peer_id) { behaviour.kademlia.remove_address(&peer_id, &multiaddr); debug!( + subject = "libp2p.mdns.expired", + category = "handle_swarm_event", peer_id = peer_id.to_string(), "removed peer address from kademlia table" ); @@ -711,23 +857,62 @@ async fn handle_swarm_event( } SwarmEvent::NewListenAddr { address, .. } => { let local_peer = *event_handler.swarm.local_peer_id(); + info!( + subject = "libp2p.listen.addr", + category = "handle_swarm_event", + peer_id = local_peer.to_string(), "local node is listening on {}", - address.with(Protocol::P2p(local_peer)) + address + ); + + #[cfg(feature = "websocket-notify")] + notification::emit_event( + event_handler.ws_evt_sender(), + EventNotificationTyp::SwarmNotification(SwarmNotification::ListeningOn), + btreemap! { + "peerId" => local_peer.to_string(), + "address" => address.to_string() + }, ); } SwarmEvent::IncomingConnection { .. } => {} SwarmEvent::ConnectionEstablished { peer_id, endpoint, .. } => { - debug!(peer_id=peer_id.to_string(), endpoint=?endpoint, "peer connection established"); + debug!(subject = "libp2p.conn.established", + category = "handle_swarm_event", + peer_id=peer_id.to_string(), + endpoint=?endpoint, + "peer connection established"); + // add peer to connected peers list - event_handler.connections.peers.insert(peer_id, endpoint); + event_handler + .connections + .peers + .insert(peer_id, endpoint.clone()); + + #[cfg(feature = "websocket-notify")] + notification::emit_event( + event_handler.ws_evt_sender(), + EventNotificationTyp::SwarmNotification(SwarmNotification::ConnnectionEstablished), + btreemap! { + "peerId" => peer_id.to_string(), + "address" => endpoint.get_remote_address().to_string() + }, + ); } - SwarmEvent::ConnectionClosed { peer_id, cause, .. } => { + SwarmEvent::ConnectionClosed { + peer_id, + cause, + endpoint, + .. + } => { debug!( + subject = "libp2p.conn.closed", + category = "handle_swarm_event", peer_id = peer_id.to_string(), - "peer connection closed, cause: {cause:?}" + "peer connection closed, cause: {cause:#?}, endpoint: {endpoint:#?}" ); event_handler.connections.peers.remove_entry(&peer_id); @@ -738,7 +923,11 @@ async fn handle_swarm_event( } else { // TODO: We may want to check the multiadress without relying on // the peer ID. This would give more flexibility when configuring nodes. - warn!("Configured peer must include a peer ID: {multiaddr}"); + warn!( + subject = "libp2p.conn.closed", + category = "handle_swarm_event", + "Configured peer must include a peer ID: {multiaddr}" + ); true } }) { @@ -749,22 +938,45 @@ async fn handle_swarm_event( .remove_peer(&peer_id); debug!( + subject = "libp2p.kad.remove", + category = "handle_swarm_event", peer_id = peer_id.to_string(), "removed peer from kademlia table" ); } + + #[cfg(feature = "websocket-notify")] + notification::emit_event( + event_handler.ws_evt_sender(), + EventNotificationTyp::SwarmNotification(SwarmNotification::ConnnectionClosed), + btreemap! { + "peerId" => peer_id.to_string(), + "address" => endpoint.get_remote_address().to_string() + }, + ); } SwarmEvent::OutgoingConnectionError { connection_id, peer_id, error, } => { - error!( - peer_id=peer_id.map(|p| p.to_string()).unwrap_or_default(), - err=?error, - connection_id=?connection_id, - "outgoing connection error" - ) + warn!(subject = "libp2p.outgoing.err", + category = "handle_swarm_event", + peer_id=peer_id.map(|p| p.to_string()).unwrap_or_default(), + err=?error, + connection_id=?connection_id, + "outgoing connection error" + ); + + #[cfg(feature = "websocket-notify")] + notification::emit_event( + event_handler.ws_evt_sender(), + EventNotificationTyp::SwarmNotification(SwarmNotification::OutgoingConnectionError), + btreemap! { + "peerId" => peer_id.map_or("Unknown peer".into(), |p| p.to_string()), + "error" => error.to_string() + }, + ); } SwarmEvent::IncomingConnectionError { connection_id, @@ -772,22 +984,49 @@ async fn handle_swarm_event( send_back_addr, error, } => { - error!( - err=?error, - connection_id=?connection_id, - local_address=local_addr.to_string(), - remote_address=send_back_addr.to_string(), - "incoming connection error" - ) + warn!(subject = "libp2p.incoming.err", + category = "handle_swarm_event", + err=?error, + connection_id=?connection_id, + local_address=local_addr.to_string(), + remote_address=send_back_addr.to_string(), + "incoming connection error"); + + #[cfg(feature = "websocket-notify")] + notification::emit_event( + event_handler.ws_evt_sender(), + EventNotificationTyp::SwarmNotification(SwarmNotification::IncomingConnectionError), + btreemap! { + "error" => error.to_string() + }, + ); } SwarmEvent::ListenerError { listener_id, error } => { - error!(err=?error, listener_id=?listener_id, "listener error") + error!(subject = "libp2p.listener.err", + category = "handle_swarm_event", + err=?error, + listener_id=?listener_id, + "listener error") } SwarmEvent::Dialing { peer_id, .. } => match peer_id { - Some(id) => debug!(peer_id = id.to_string(), "dialing peer"), - None => debug!("dialing an unknown peer"), + Some(id) => { + debug!( + subject = "libp2p.dialing", + category = "handle_swarm_event", + peer_id = id.to_string(), + "dialing peer" + ) + } + None => debug!( + subject = "libp2p.dialing", + category = "handle_swarm_event", + "dialing an unknown peer" + ), }, - e => debug!(e=?e, "uncaught event"), + e => debug!(subject = "libp2p.event", + category = "handle_swarm_event", + e=?e, + "uncaught event"), } } @@ -823,10 +1062,7 @@ fn decode_capsule(key_cid: Cid, value: &Vec) -> Result { Ok(ipld) => Err(anyhow!( "decode mismatch: expected an Ipld map, got {ipld:#?}", )), - Err(err) => { - warn!(error=?err, "error deserializing record value"); - Err(anyhow!("error deserializing record value")) - } + Err(err) => Err(anyhow!("error deserializing record value: {err}")), } } diff --git a/homestar-runtime/src/lib.rs b/homestar-runtime/src/lib.rs index e6f84323..b81b5364 100644 --- a/homestar-runtime/src/lib.rs +++ b/homestar-runtime/src/lib.rs @@ -17,6 +17,7 @@ //! [homestar-core]: homestar_core //! [homestar-wasm]: homestar_wasm +pub mod channel; pub mod cli; pub mod daemon; pub mod db; @@ -37,12 +38,14 @@ pub mod workflow; pub mod test_utils; pub use db::Db; -pub use event_handler::channel; pub(crate) mod libp2p; pub use logger::*; pub(crate) mod metrics; +#[allow(unused_imports)] +pub(crate) use event_handler::EventHandler; pub use receipt::{Receipt, RECEIPT_TAG, VERSION_KEY}; pub use runner::Runner; +pub(crate) use scheduler::TaskScheduler; pub use settings::Settings; pub(crate) use worker::Worker; pub use workflow::WORKFLOW_TAG; diff --git a/homestar-runtime/src/libp2p/mod.rs b/homestar-runtime/src/libp2p/mod.rs index 8254556f..c2ac9fe1 100644 --- a/homestar-runtime/src/libp2p/mod.rs +++ b/homestar-runtime/src/libp2p/mod.rs @@ -1 +1,3 @@ +//! [libp2p] utilities. + pub(crate) mod multiaddr; diff --git a/homestar-runtime/src/libp2p/multiaddr.rs b/homestar-runtime/src/libp2p/multiaddr.rs index 894d7008..12f43531 100644 --- a/homestar-runtime/src/libp2p/multiaddr.rs +++ b/homestar-runtime/src/libp2p/multiaddr.rs @@ -1,5 +1,7 @@ +/// Multiaddr extension methods. use libp2p::{multiaddr::Protocol, Multiaddr, PeerId}; +/// [Multiaddr] extension trait. pub(crate) trait MultiaddrExt { fn peer_id(&self) -> Option; } diff --git a/homestar-runtime/src/logger.rs b/homestar-runtime/src/logger.rs index 4deb4d87..767d975f 100644 --- a/homestar-runtime/src/logger.rs +++ b/homestar-runtime/src/logger.rs @@ -59,6 +59,7 @@ fn init( .add_directive("tarpc=info".parse().expect(DIRECTIVE_EXPECT)) .add_directive("tower_http=info".parse().expect(DIRECTIVE_EXPECT)) .add_directive("moka=info".parse().expect(DIRECTIVE_EXPECT)) + .add_directive("jsonrpsee=info".parse().expect(DIRECTIVE_EXPECT)) }); #[cfg(all( diff --git a/homestar-runtime/src/main.rs b/homestar-runtime/src/main.rs index 4ba64d86..9639b3aa 100644 --- a/homestar-runtime/src/main.rs +++ b/homestar-runtime/src/main.rs @@ -30,14 +30,14 @@ fn main() -> Result<()> { let _guard = if daemonize { daemon::start(daemon_dir.clone()) .expect("runner to be started as a daemon process"); - FileLogger::init(daemon_dir, settings.monitoring()) + FileLogger::init(daemon_dir, settings.node().monitoring()) } else { - Logger::init(settings.monitoring()) + Logger::init(settings.node().monitoring()) }; info!( subject = "settings", - category = "homestar_init", + category = "homestar.init", "starting with settings: {:?}", settings, ); @@ -47,7 +47,7 @@ fn main() -> Result<()> { info!( subject = "database", - category = "homestar_init", + category = "homestar.init", "starting with database: {}", Db::url().expect("database url to be provided"), ); diff --git a/homestar-runtime/src/metrics.rs b/homestar-runtime/src/metrics.rs index bba7533f..d5a3623a 100644 --- a/homestar-runtime/src/metrics.rs +++ b/homestar-runtime/src/metrics.rs @@ -2,19 +2,35 @@ use crate::settings; use anyhow::Result; +use metrics_exporter_prometheus::PrometheusHandle; +#[cfg(feature = "monitoring")] use tokio::runtime::Handle; mod exporter; +#[cfg(feature = "monitoring")] mod node; /// Start metrics collection and setup scrape endpoint. -pub(crate) async fn start(settings: &settings::Monitoring) -> Result<()> { - let handle = Handle::current(); - exporter::setup_metrics_recorder(settings)?; +/// Also, spawn a task to collect process metrics at a regular interval. +#[cfg(feature = "monitoring")] +pub(crate) async fn start( + monitor_settings: &settings::Monitoring, + network_settings: &settings::Network, +) -> Result { + let metrics_hdl = exporter::setup_metrics_recorder(network_settings)?; // Spawn tick-driven process collection task - #[cfg(feature = "monitoring")] - handle.spawn(node::collect_metrics(settings.process_collector_interval)); + let handle = Handle::current(); + handle.spawn(node::collect_metrics( + monitor_settings.process_collector_interval, + )); - Ok(()) + Ok(metrics_hdl) +} + +/// Start metrics collection and setup scrape endpoint. +#[cfg(not(feature = "monitoring"))] +pub(crate) async fn start(network_settings: &settings::Network) -> Result { + let metrics_hdl = exporter::setup_metrics_recorder(network_settings)?; + Ok(metrics_hdl) } diff --git a/homestar-runtime/src/metrics/exporter.rs b/homestar-runtime/src/metrics/exporter.rs index 675c53a5..5c027aa4 100644 --- a/homestar-runtime/src/metrics/exporter.rs +++ b/homestar-runtime/src/metrics/exporter.rs @@ -1,31 +1,42 @@ //! Metrics Prometheus recorder. -use crate::{metrics::node, settings}; -use metrics_exporter_prometheus::{Matcher, PrometheusBuilder}; +#[cfg(feature = "monitoring")] +use crate::metrics::node; +use crate::settings; +use metrics_exporter_prometheus::{Matcher, PrometheusBuilder, PrometheusHandle}; +use metrics_util::layers::{PrefixLayer, Stack}; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use tokio::runtime::Handle; /// Set up Prometheus buckets for matched metrics and install recorder. -pub(crate) fn setup_metrics_recorder(settings: &settings::Monitoring) -> anyhow::Result<()> { +pub(crate) fn setup_metrics_recorder( + settings: &settings::Network, +) -> anyhow::Result { const EXPONENTIAL_SECONDS: &[f64] = &[ 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0, ]; - let socket = SocketAddr::new( - IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), - settings.metrics_port, - ); + let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), settings.metrics.port); - #[cfg(feature = "monitoring")] - node::describe(); - - PrometheusBuilder::new() + let (recorder, exporter) = PrometheusBuilder::new() .set_buckets_for_metric( Matcher::Suffix("_duration_seconds".to_string()), EXPONENTIAL_SECONDS, )? .with_http_listener(socket) - .install() + .build() .expect("failed to install recorder/exporter"); - Ok(()) + let hdl = recorder.handle(); + let rt_hdl = Handle::current(); + rt_hdl.spawn(exporter); + + Stack::new(recorder) + .push(PrefixLayer::new("homestar")) + .install()?; + + #[cfg(feature = "monitoring")] + node::describe(); + + Ok(hdl) } diff --git a/homestar-runtime/src/metrics/node.rs b/homestar-runtime/src/metrics/node.rs index 2544692e..d25e575d 100644 --- a/homestar-runtime/src/metrics/node.rs +++ b/homestar-runtime/src/metrics/node.rs @@ -1,14 +1,13 @@ //! Node metrics, including system, process, network, and database information -use crate::db::ENV as DATABASE_URL; +use crate::Db; use anyhow::{anyhow, Context, Result}; use metrics::{describe_counter, describe_gauge, Unit}; -use std::{env, time::Duration}; +use std::time::Duration; use sysinfo::{ get_current_pid, CpuRefreshKind, Disk, DiskExt, NetworkExt, Networks, NetworksExt, ProcessExt, ProcessRefreshKind, RefreshKind, System, SystemExt, }; -use tokio::fs; use tracing::{info, warn}; /// Create and describe gauges for node metrics. @@ -109,8 +108,8 @@ pub(crate) fn describe() { } /// Collect node metrics on a settings-defined interval. -pub(crate) async fn collect_metrics(interval: u64) { - let mut interval = tokio::time::interval(Duration::from_millis(interval)); +pub(crate) async fn collect_metrics(interval: Duration) { + let mut interval = tokio::time::interval(interval); // Log static system info log_static_info(); @@ -152,11 +151,11 @@ async fn collect_stats(sys: System) -> Result<()> { .iter() .fold(0, |acc, interface| acc + interface.1.received()) } - async fn compute_database_size() -> Option { - let url = env::var(DATABASE_URL).unwrap(); - match fs::metadata(url).await { - Ok(metadata) => Some(metadata.len()), - Err(_) => None, + async fn compute_database_size() -> Option { + if let Ok(size) = Db::size().await { + Some(size.get_value()) + } else { + None } } @@ -223,7 +222,7 @@ async fn collect_stats(sys: System) -> Result<()> { // Database metrics if let Some(database_size) = compute_database_size().await { - metrics::gauge!("database_size_bytes", database_size as f64); + metrics::gauge!("database_size_bytes", database_size); } Ok(()) diff --git a/homestar-runtime/src/network/error.rs b/homestar-runtime/src/network/error.rs index 10f18416..1e0319f1 100644 --- a/homestar-runtime/src/network/error.rs +++ b/homestar-runtime/src/network/error.rs @@ -1,3 +1,5 @@ +//! # Error types centered around the networking. + #[derive(thiserror::Error, Debug)] pub(crate) enum Error { #[error("pubsub error: {0}")] diff --git a/homestar-runtime/src/network/ipfs.rs b/homestar-runtime/src/network/ipfs.rs index 1e018634..2322ca97 100644 --- a/homestar-runtime/src/network/ipfs.rs +++ b/homestar-runtime/src/network/ipfs.rs @@ -2,14 +2,17 @@ //! //! [IpfsClient]: ipfs_api::IpfsClient +use crate::settings; use anyhow::Result; use futures::TryStreamExt; use homestar_core::workflow::Receipt; +use http::uri::Scheme; use ipfs_api::{ request::{DagCodec, DagPut}, response::DagPutResponse, IpfsApi, IpfsClient, }; +use ipfs_api_backend_hyper::TryFromUri; use libipld::{Cid, Ipld}; use std::{io::Cursor, sync::Arc}; use url::Url; @@ -20,15 +23,21 @@ const SHA3_256: &str = "sha3-256"; #[allow(missing_debug_implementations)] pub(crate) struct IpfsCli(Arc); -impl Clone for IpfsCli { - fn clone(&self) -> Self { - IpfsCli(Arc::clone(&self.0)) +impl IpfsCli { + /// Create a new [IpfsCli] from a [IpfsClient]. + pub(crate) fn new(settings: &settings::Ipfs) -> Result { + let cli = Self(Arc::new(IpfsClient::from_host_and_port( + Scheme::HTTP, + settings.host.as_str(), + settings.port, + )?)); + Ok(cli) } } -impl Default for IpfsCli { - fn default() -> Self { - Self(Arc::new(IpfsClient::default())) +impl Clone for IpfsCli { + fn clone(&self) -> Self { + IpfsCli(Arc::clone(&self.0)) } } @@ -69,8 +78,7 @@ impl IpfsCli { let DagPutResponse { cid } = self .0 .dag_put_with_options(Cursor::new(receipt_bytes), dag_builder) - .await - .expect("a CID"); + .await?; Ok(cid.cid_string) } diff --git a/homestar-runtime/src/network/mod.rs b/homestar-runtime/src/network/mod.rs index 257f0ae0..e47cb79e 100644 --- a/homestar-runtime/src/network/mod.rs +++ b/homestar-runtime/src/network/mod.rs @@ -1,7 +1,8 @@ -//! [libp2p], [websocket], and [ipfs] networking interfaces. +//! [libp2p], multi-use [HTTP] and [WebSocket] server, and [ipfs] networking +//! interfaces. //! -//! [libp2p]: libp2p -//! [websocket]: axum::extract::ws +//! [HTTP]: jsonrpsee::server +//! [WebSocket]: jsonrpsee::server //! [ipfs]: ipfs_api pub(crate) mod error; @@ -10,8 +11,9 @@ pub(crate) mod ipfs; pub(crate) mod pubsub; pub mod rpc; pub(crate) mod swarm; -#[cfg(feature = "websocket-server")] -pub(crate) mod ws; +pub(crate) mod webserver; +#[allow(unused_imports)] +pub(crate) use error::Error; #[cfg(feature = "ipfs")] pub(crate) use ipfs::IpfsCli; diff --git a/homestar-runtime/src/network/pubsub.rs b/homestar-runtime/src/network/pubsub.rs index 710b2c41..fafe48a7 100644 --- a/homestar-runtime/src/network/pubsub.rs +++ b/homestar-runtime/src/network/pubsub.rs @@ -5,7 +5,7 @@ use crate::settings; use anyhow::Result; use libp2p::{ - gossipsub::{self, ConfigBuilder, Message, MessageAuthenticity, MessageId, ValidationMode}, + gossipsub::{self, ConfigBuilder, MessageAuthenticity, MessageId, ValidationMode}, identity::Keypair, }; use std::{ @@ -13,6 +13,9 @@ use std::{ hash::{Hash, Hasher}, }; +pub(crate) mod message; +pub(crate) use message::Message; + /// [Receipt]-related topic for pub(gossip)sub. /// /// [Receipt]: homestar_core::workflow::receipt @@ -21,25 +24,27 @@ pub(crate) const RECEIPTS_TOPIC: &str = "receipts"; /// Setup [gossipsub] mesh protocol with default configuration. /// /// [gossipsub]: libp2p::gossipsub -pub(crate) fn new(keypair: Keypair, settings: &settings::Node) -> Result { +pub(crate) fn new(keypair: Keypair, settings: &settings::Pubsub) -> Result { // To content-address message, we can take the hash of message and use it as an ID. - let message_id_fn = |message: &Message| { + let message_id_fn = |message: &gossipsub::Message| { let mut s = DefaultHasher::new(); message.data.hash(&mut s); MessageId::from(s.finish().to_string()) }; let gossipsub_config = ConfigBuilder::default() - .heartbeat_interval(settings.network.pubsub_heartbeat) - .idle_timeout(settings.network.pubsub_idle_timeout) + .heartbeat_interval(settings.heartbeat) + .idle_timeout(settings.idle_timeout) // This sets the kind of message validation. The default is Strict (enforce message signing). .validation_mode(ValidationMode::Strict) - .mesh_n_low(1) - .mesh_outbound_min(1) - .mesh_n(2) + .max_transmit_size(settings.max_transmit_size) + .mesh_n_low(settings.mesh_n_low) + .mesh_outbound_min(settings.mesh_outbound_min) + .mesh_n(settings.mesh_n) + .mesh_n_high(settings.mesh_n_high) // Content-address messages. No two messages of the same content will be propagated. .message_id_fn(message_id_fn) - .duplicate_cache_time(settings.network.pubsub_duplication_cache_time) + .duplicate_cache_time(settings.duplication_cache_time) .support_floodsub() .build() .map_err(anyhow::Error::msg)?; diff --git a/homestar-runtime/src/network/pubsub/message.rs b/homestar-runtime/src/network/pubsub/message.rs new file mode 100644 index 00000000..8153af05 --- /dev/null +++ b/homestar-runtime/src/network/pubsub/message.rs @@ -0,0 +1,157 @@ +//! [Message] type for messages transmitted over [gossipsub]. +//! +//! [gossipsub]: libp2p::gossipsub + +use anyhow::{anyhow, Result}; +use homestar_core::workflow::Nonce; +use libipld::{self, cbor::DagCborCodec, prelude::Codec, serde::from_ipld, Ipld}; +use std::collections::BTreeMap; + +const HEADER_KEY: &str = "header"; +const PAYLOAD_KEY: &str = "payload"; +const NONCE_KEY: &str = "nonce"; + +#[derive(Debug)] +pub(crate) struct Message { + pub(crate) header: Header, + pub(crate) payload: T, +} + +impl Message { + pub(crate) fn new(payload: T) -> Self { + let header = Header { + nonce: Nonce::generate(), + }; + + Self { header, payload } + } +} + +impl TryFrom> for Vec +where + Ipld: From> + From, +{ + type Error = anyhow::Error; + + fn try_from(message: Message) -> Result { + let message_ipld = Ipld::from(message); + DagCborCodec.encode(&message_ipld) + } +} + +impl TryFrom> for Message +where + T: TryFrom, +{ + type Error = anyhow::Error; + + fn try_from(bytes: Vec) -> Result { + let ipld: Ipld = DagCborCodec.decode(&bytes)?; + ipld.try_into() + .map_err(|_| anyhow!("Could not convert IPLD to pubsub message.")) + } +} + +impl From> for Ipld +where + Ipld: From, +{ + fn from(message: Message) -> Self { + Ipld::Map(BTreeMap::from([ + (HEADER_KEY.into(), message.header.into()), + (PAYLOAD_KEY.into(), message.payload.into()), + ])) + } +} + +impl TryFrom for Message +where + T: TryFrom, +{ + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let header = map + .get(HEADER_KEY) + .ok_or_else(|| anyhow!("missing {HEADER_KEY}"))? + .to_owned() + .try_into()?; + + let payload = map + .get(PAYLOAD_KEY) + .ok_or_else(|| anyhow!("missing {PAYLOAD_KEY}"))? + .to_owned() + .try_into()?; + + Ok(Message { header, payload }) + } +} + +#[derive(Clone, Debug)] +pub(crate) struct Header { + nonce: Nonce, +} + +impl From
for Ipld { + fn from(header: Header) -> Self { + Ipld::Map(BTreeMap::from([( + NONCE_KEY.into(), + header.nonce.to_owned().into(), + )])) + } +} + +impl TryFrom for Header { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let nonce = map + .get(NONCE_KEY) + .ok_or_else(|| anyhow!("Missing {NONCE_KEY}"))? + .try_into()?; + + Ok(Header { nonce }) + } +} + +impl TryFrom
for Vec { + type Error = anyhow::Error; + + fn try_from(header: Header) -> Result { + let header_ipld = Ipld::from(header); + DagCborCodec.encode(&header_ipld) + } +} + +impl TryFrom> for Header { + type Error = anyhow::Error; + + fn try_from(bytes: Vec) -> Result { + let ipld: Ipld = DagCborCodec.decode(&bytes)?; + ipld.try_into() + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::{test_utils, Receipt}; + + #[test] + fn pubsub_message_rountrip() { + let (_, receipt) = test_utils::receipt::receipts(); + let message = Message::new(receipt.clone()); + let bytes: Vec = message + .try_into() + .expect("Could not serialize message into bytes"); + + let parsed = + Message::::try_from(bytes).expect("Could not deserialize message from bytes"); + + assert_eq!(receipt, parsed.payload); + } +} diff --git a/homestar-runtime/src/network/rpc.rs b/homestar-runtime/src/network/rpc.rs index 9f4f0e72..f260634f 100644 --- a/homestar-runtime/src/network/rpc.rs +++ b/homestar-runtime/src/network/rpc.rs @@ -1,7 +1,7 @@ -//! RPC server implementation. +//! CLI-focused RPC server implementation. use crate::{ - channel::{AsyncBoundedChannel, AsyncBoundedChannelReceiver, AsyncBoundedChannelSender}, + channel::{AsyncChannel, AsyncChannelReceiver, AsyncChannelSender}, runner::{self, file::ReadWorkflow, response, RpcSender}, settings, }; @@ -14,7 +14,7 @@ use tarpc::{ context, server::{self, incoming::Incoming, Channel}, }; -use tokio::{runtime::Handle, select, sync::oneshot, time}; +use tokio::{runtime::Handle, select, time}; use tokio_serde::formats::MessagePack; use tracing::{info, warn}; @@ -34,7 +34,7 @@ pub(crate) enum ServerMessage { /// Message sent by the [Runner] to start a graceful shutdown. /// /// [Runner]: crate::Runner - GracefulShutdown(oneshot::Sender<()>), + GracefulShutdown(AsyncChannelSender<()>), /// Message sent to start a [Workflow] run by reading a [Workflow] file. /// /// [Workflow]: homestar_core::Workflow @@ -47,6 +47,7 @@ pub(crate) enum ServerMessage { /// /// [Workflow]: homestar_core::Workflow RunErr(runner::Error), + /// For skipping server messages. Skip, } @@ -70,9 +71,9 @@ pub(crate) struct Server { /// [SocketAddr] of the RPC server. pub(crate) addr: SocketAddr, /// Sender for messages to be sent to the RPC server. - pub(crate) sender: Arc>, + pub(crate) sender: Arc>, /// Receiver for messages sent to the RPC server. - pub(crate) receiver: AsyncBoundedChannelReceiver, + pub(crate) receiver: AsyncChannelReceiver, /// Sender for messages to be sent to the [Runner]. /// /// [Runner]: crate::Runner @@ -118,15 +119,15 @@ impl Interface for ServerHandler { name: Option, workflow_file: ReadWorkflow, ) -> Result, Error> { - let (tx, rx) = oneshot::channel(); + let (tx, rx) = AsyncChannel::oneshot(); self.runner_sender - .send((ServerMessage::Run((name, workflow_file)), Some(tx))) + .send_async((ServerMessage::Run((name, workflow_file)), Some(tx))) .await .map_err(|e| Error::FailureToSendOnChannel(e.to_string()))?; let now = time::Instant::now(); select! { - Ok(msg) = rx => { + Ok(msg) = rx.recv_async() => { match msg { ServerMessage::RunAck(response) => { Ok(response) @@ -137,7 +138,9 @@ impl Interface for ServerHandler { }, _ = time::sleep_until(now + self.timeout) => { let s = format!("server timeout of {} ms reached", self.timeout.as_millis()); - info!("{s}"); + info!(subject = "rpc.timeout", + category = "rpc", + "{s}"); Err(Error::FailureToReceiveOnChannel(s)) } @@ -148,7 +151,7 @@ impl Interface for ServerHandler { } async fn stop(self, _: context::Context) -> Result<(), Error> { self.runner_sender - .send((ServerMessage::ShutdownCmd, None)) + .send_async((ServerMessage::ShutdownCmd, None)) .await .map_err(|e| Error::FailureToSendOnChannel(e.to_string())) } @@ -157,19 +160,19 @@ impl Interface for ServerHandler { impl Server { /// Create a new instance of the RPC server. pub(crate) fn new(settings: &settings::Network, runner_sender: Arc) -> Self { - let (tx, rx) = AsyncBoundedChannel::oneshot(); + let (tx, rx) = AsyncChannel::oneshot(); Self { - addr: SocketAddr::new(settings.rpc_host, settings.rpc_port), + addr: SocketAddr::new(settings.rpc.host, settings.rpc.port), sender: tx.into(), receiver: rx, runner_sender, - max_connections: settings.rpc_max_connections, - timeout: settings.rpc_server_timeout, + max_connections: settings.rpc.max_connections, + timeout: settings.rpc.server_timeout, } } /// Return a RPC server channel sender. - pub(crate) fn sender(&self) -> Arc> { + pub(crate) fn sender(&self) -> Arc> { self.sender.clone() } @@ -179,7 +182,12 @@ impl Server { tarpc::serde_transport::tcp::listen(self.addr, MessagePack::default).await?; listener.config_mut().max_frame_length(usize::MAX); - info!("RPC server listening on {}", self.addr); + info!( + subject = "rpc.spawn", + category = "rpc", + "RPC server listening on {}", + self.addr + ); // setup valved listener for cancellation let (exit, incoming) = Valved::new(listener); @@ -202,11 +210,16 @@ impl Server { select! { Ok(ServerMessage::GracefulShutdown(tx)) = self.receiver.recv_async() => { - info!("RPC server shutting down"); + info!(subject = "shutdown", + category = "homestar.shutdown", + "RPC server shutting down"); drop(exit); - let _ = tx.send(()); + let _ = tx.send_async(()).await; } - _ = fut => warn!("RPC server exited unexpectedly"), + _ = fut => + warn!(subject = "rpc.spawn.err", + category = "rpc", + "RPC server exited unexpectedly"), } }); diff --git a/homestar-runtime/src/network/swarm.rs b/homestar-runtime/src/network/swarm.rs index 825b221d..22ab6b2a 100644 --- a/homestar-runtime/src/network/swarm.rs +++ b/homestar-runtime/src/network/swarm.rs @@ -1,9 +1,6 @@ -#![allow(missing_docs)] - //! Sets up a [libp2p] [Swarm], containing the state of the network and the way //! it should behave. //! -//! [libp2p]: libp2p //! [Swarm]: libp2p::Swarm use crate::{ @@ -11,6 +8,7 @@ use crate::{ settings, Receipt, RECEIPT_TAG, WORKFLOW_TAG, }; use anyhow::{Context, Result}; +use const_format::formatcp; use enum_assoc::Assoc; use faststr::FastStr; use libp2p::{ @@ -32,31 +30,38 @@ use serde::{Deserialize, Serialize}; use std::fmt; use tracing::{info, warn}; -pub(crate) const HOMESTAR_PROTOCOL_VER: &str = "homestar/0.0.1"; +/// Homestar protocol version, shared among peers, tied to the homestar version. +pub(crate) const HOMESTAR_PROTOCOL_VER: &str = formatcp!("homestar/{VERSION}"); + +const VERSION: &str = env!("CARGO_PKG_VERSION"); /// Build a new [Swarm] with a given transport and a tokio executor. -pub(crate) async fn new(settings: &settings::Node) -> Result> { +pub(crate) async fn new(settings: &settings::Network) -> Result> { let keypair = settings - .network .keypair_config .keypair() - .with_context(|| "Failed to generate/import keypair for libp2p".to_string())?; + .with_context(|| "failed to generate/import keypair for libp2p".to_string())?; let peer_id = keypair.public().to_peer_id(); - info!(peer_id = peer_id.to_string(), "local peer ID generated"); + info!( + subject = "swarm.init", + category = "libp2p.swarm", + peer_id = peer_id.to_string(), + "local peer ID generated" + ); let transport = tcp::tokio::Transport::new(tcp::Config::default().nodelay(true)) .upgrade(upgrade::Version::V1Lazy) .authenticate(noise::Config::new(&keypair)?) .multiplex(yamux::Config::default()) - .timeout(settings.network.transport_connection_timeout) + .timeout(settings.libp2p.transport_connection_timeout) .boxed(); let mut swarm = Swarm::new( transport, ComposedBehaviour { - gossipsub: Toggle::from(if settings.network.enable_pubsub { - Some(pubsub::new(keypair.clone(), settings)?) + gossipsub: Toggle::from(if settings.libp2p.pubsub.enable { + Some(pubsub::new(keypair.clone(), settings.libp2p().pubsub())?) } else { None }), @@ -85,24 +90,24 @@ pub(crate) async fn new(settings: &settings::Node) -> Result Result Result<()> { // Listen-on given address - swarm.listen_on(settings.listen_address.to_string().parse()?)?; + swarm.listen_on(settings.libp2p.listen_address.to_string().parse()?)?; // Set Kademlia server mode swarm @@ -148,43 +153,58 @@ pub(crate) fn init( .set_mode(Some(kad::Mode::Server)); // add external addresses from settings - if !settings.announce_addresses.is_empty() { - for addr in settings.announce_addresses.iter() { + if !settings.libp2p.announce_addresses.is_empty() { + for addr in settings.libp2p.announce_addresses.iter() { swarm.add_external_address(addr.clone()); } } else { - warn!( - err = "no addresses to announce to peers defined in settings", - "node may be unreachable to external peers" + info!( + subject = "swarm.init", + category = "libp2p.swarm", + "no addresses to announce to peers defined in settings: node may be unreachable to external peers" ) } // Dial nodes specified in settings. Failure here shouldn't halt node startup. - for (index, addr) in settings.node_addresses.iter().enumerate() { - if index < settings.max_connected_peers as usize { + for (index, addr) in settings.libp2p.node_addresses.iter().enumerate() { + if index < settings.libp2p.max_connected_peers as usize { let _ = swarm .dial(addr.clone()) // log dial failure and continue - .map_err(|e| warn!(err=?e, "failed to dial configured node")); + .map_err(|e| { + warn!(subject = "swarm.init.err", + category = "libp2p.swarm", + err=?e, "failed to dial configured node") + }); // add node to kademlia routing table if let Some(Protocol::P2p(peer_id)) = addr.iter().find(|proto| matches!(proto, Protocol::P2p(_))) { - info!(addr=?addr, "added configured node to kademlia routing table"); + info!(subject = "swarm.init", + category = "libp2p.swarm", + addr=?addr, + "added configured node to kademlia routing table"); swarm .behaviour_mut() .kademlia .add_address(&peer_id, addr.clone()); } else { - warn!(addr=?addr, err="configured node address did not include a peer ID", "node not added to kademlia routing table") + warn!(subject = "swarm.init.err", + category = "libp2p.swarm", + addr=?addr, + err="configured node address did not include a peer ID", + "node not added to kademlia routing table") } } else { - warn!(addr=?addr, "address not dialed because node addresses count exceeds max connected peers configuration") + warn!(subject = "swarm.init.err", + category = "libp2p.swarm", + addr=?addr, + "address not dialed because node addresses count exceeds max connected peers configuration") } } - if settings.enable_pubsub { + if settings.libp2p.pubsub.enable { // join `receipts` topic swarm .behaviour_mut() @@ -270,7 +290,7 @@ pub(crate) enum ComposedEvent { #[derive(Debug)] pub(crate) enum TopicMessage { /// Receipt topic, wrapping [Receipt]. - CapturedReceipt(Receipt), + CapturedReceipt(pubsub::Message), } /// Custom behaviours for [Swarm]. @@ -316,8 +336,9 @@ impl ComposedBehaviour { if let Some(gossipsub) = self.gossipsub.as_mut() { let id_topic = gossipsub::IdentTopic::new(topic); // Make this a match once we have other topics. - let TopicMessage::CapturedReceipt(receipt) = msg; - let msg_bytes: Vec = receipt.try_into()?; + let TopicMessage::CapturedReceipt(message) = msg; + let msg_bytes: Vec = message.try_into()?; + if gossipsub .mesh_peers(&TopicHash::from_raw(topic)) .peekable() diff --git a/homestar-runtime/src/network/webserver.rs b/homestar-runtime/src/network/webserver.rs new file mode 100644 index 00000000..3668b0bf --- /dev/null +++ b/homestar-runtime/src/network/webserver.rs @@ -0,0 +1,548 @@ +//! Sets up a webserver for WebSocket and HTTP interaction with clients. + +use crate::{ + runner, + runner::{DynamicNodeInfo, StaticNodeInfo, WsSender}, + settings, +}; +use anyhow::{anyhow, Result}; +use faststr::FastStr; +use homestar_core::Workflow; +use homestar_wasm::io::Arg; +use http::{ + header::{AUTHORIZATION, CONTENT_TYPE}, + Method, +}; +use jsonrpsee::{ + self, + server::{middleware::ProxyGetRequestLayer, RandomStringIdProvider, ServerHandle}, +}; +use libipld::Cid; +use metrics_exporter_prometheus::PrometheusHandle; +use std::{ + iter::once, + net::{IpAddr, SocketAddr, TcpListener}, + str::FromStr, + time::Duration, +}; +use tokio::runtime::Handle; +#[cfg(feature = "websocket-notify")] +use tokio::sync::broadcast; +use tower_http::{ + cors::{self, CorsLayer}, + sensitive_headers::SetSensitiveRequestHeadersLayer, +}; +use tracing::info; + +pub(crate) mod listener; +#[cfg(feature = "websocket-notify")] +pub(crate) mod notifier; +mod prom; +mod rpc; + +#[cfg(feature = "websocket-notify")] +pub(crate) use notifier::Notifier; +#[cfg(feature = "websocket-notify")] +pub(crate) use rpc::SUBSCRIBE_NETWORK_EVENTS_ENDPOINT; +use rpc::{Context, JsonRpc}; + +/// Message type for messages sent back from the +/// WebSocket server to the [runner] for example. +/// +/// [runner]: crate::Runner +#[allow(dead_code)] +#[derive(Debug)] +pub(crate) enum Message { + /// Error attempting to run a [Workflow]. + RunErr(runner::Error), + /// Run a workflow, given a tuple of name, and [Workflow]. + RunWorkflow((FastStr, Workflow<'static, Arg>)), + /// Acknowledgement of a [Workflow] run. + AckWorkflow((Cid, FastStr)), + /// Message sent to the [Runner] to gather node information from the [EventHandler]. + /// + /// [Runner]: crate::Runner + /// [EventHandler]: crate::EventHandler + GetNodeInfo, + /// Acknowledgement of a [Message::GetNodeInfo] request, receiving static and dynamic + /// node information. + AckNodeInfo((StaticNodeInfo, DynamicNodeInfo)), +} + +/// Server fields. +#[cfg(feature = "websocket-notify")] +#[derive(Clone, Debug)] +pub(crate) struct Server { + /// Address of the server. + addr: SocketAddr, + /// Message buffer capacity for the server. + capacity: usize, + /// Message sender for broadcasting internal events to clients connected to + /// to the server. + evt_notifier: Notifier, + /// Message sender for broadcasting workflow-related events to clients + /// connected to to the server. + workflow_msg_notifier: Notifier, + /// Receiver timeout for the server when communicating with the [Runner]. + /// + /// [Runner]: crate::Runner + receiver_timeout: Duration, + /// General timeout for the server. + webserver_timeout: Duration, +} + +/// Server fields. +#[cfg(not(feature = "websocket-notify"))] +#[derive(Clone, Debug)] +pub(crate) struct Server { + /// Address of the server. + addr: SocketAddr, + /// Message buffer capacity for the server. + capacity: usize, + /// Receiver timeout for the server when communicating with the [Runner]. + /// + /// [Runner]: crate::Runner + receiver_timeout: Duration, + /// General timeout for the server. + webserver_timeout: Duration, +} + +impl Server { + /// Setup bounded, MPMC channel for runtime to send and received messages + /// through the WebSocket connection(s). + #[cfg(feature = "websocket-notify")] + fn setup_channel( + capacity: usize, + ) -> ( + broadcast::Sender, + broadcast::Receiver, + ) { + broadcast::channel(capacity) + } + + /// Set up a new [Server] instance, which acts as both a + /// WebSocket and HTTP server. + #[cfg(feature = "websocket-notify")] + pub(crate) fn new(settings: &settings::Webserver) -> Result { + let (evt_sender, _receiver) = Self::setup_channel(settings.websocket_capacity); + let (msg_sender, _receiver) = Self::setup_channel(settings.websocket_capacity); + let host = IpAddr::from_str(&settings.host.to_string())?; + let port_setting = settings.port; + let addr = if port_available(host, port_setting) { + SocketAddr::from((host, port_setting)) + } else { + let port = (port_setting..port_setting + 1000) + .find(|port| port_available(host, *port)) + .ok_or_else(|| anyhow!("no free TCP ports available"))?; + SocketAddr::from((host, port)) + }; + + Ok(Self { + addr, + capacity: settings.websocket_capacity, + evt_notifier: Notifier::new(evt_sender), + workflow_msg_notifier: Notifier::new(msg_sender), + receiver_timeout: settings.websocket_receiver_timeout, + webserver_timeout: settings.timeout, + }) + } + + /// Set up a new [Server] instance, which only acts as an HTTP server. + #[cfg(not(feature = "websocket-notify"))] + pub(crate) fn new(settings: &settings::Webserver) -> Result { + let host = IpAddr::from_str(&settings.host.to_string())?; + let port_setting = settings.port; + let addr = if port_available(host, port_setting) { + SocketAddr::from((host, port_setting)) + } else { + let port = (port_setting..port_setting + 1000) + .find(|port| port_available(host, *port)) + .ok_or_else(|| anyhow!("no free TCP ports available"))?; + SocketAddr::from((host, port)) + }; + + Ok(Self { + addr, + capacity: settings.websocket_capacity, + receiver_timeout: settings.websocket_receiver_timeout, + webserver_timeout: settings.timeout, + }) + } + + /// Instantiates the [JsonRpc] module, and starts the server. + #[cfg(feature = "websocket-notify")] + pub(crate) async fn start( + &self, + runner_sender: WsSender, + metrics_hdl: PrometheusHandle, + ) -> Result { + let module = JsonRpc::new(Context::new( + metrics_hdl, + self.evt_notifier.clone(), + self.workflow_msg_notifier.clone(), + runner_sender, + self.receiver_timeout, + )) + .await?; + + self.start_inner(module).await + } + + /// Instantiates the [JsonRpc] module, and starts the server. + #[cfg(not(feature = "websocket-notify"))] + pub(crate) async fn start( + &self, + runner_sender: WsSender, + metrics_hdl: PrometheusHandle, + ) -> Result { + let module = JsonRpc::new(Context::new( + metrics_hdl, + runner_sender, + self.receiver_timeout, + )) + .await?; + self.start_inner(module).await + } + + /// Return the WebSocket event sender for broadcasting messages to connected + /// clients. + #[cfg(feature = "websocket-notify")] + pub(crate) fn evt_notifier(&self) -> Notifier { + self.evt_notifier.clone() + } + + /// Get WebSocket message sender for broadcasting workflow-related messages + /// to connected clients. + #[cfg(feature = "websocket-notify")] + pub(crate) fn workflow_msg_notifier(&self) -> Notifier { + self.workflow_msg_notifier.clone() + } + + /// Shared start logic for both WebSocket and HTTP servers. + async fn start_inner(&self, module: JsonRpc) -> Result { + let addr = self.addr; + info!( + subject = "webserver.start", + category = "webserver", + "webserver listening on {}", + addr + ); + + let cors = CorsLayer::new() + // Allow `POST` when accessing the resource + .allow_methods([Method::GET, Method::POST]) + // Allow requests from any origin + .allow_origin(cors::Any) + .allow_headers([CONTENT_TYPE]); + + let middleware = tower::ServiceBuilder::new() + .layer(ProxyGetRequestLayer::new("/health", rpc::HEALTH_ENDPOINT)?) + .layer(ProxyGetRequestLayer::new( + "/metrics", + rpc::METRICS_ENDPOINT, + )?) + .layer(cors) + .layer(SetSensitiveRequestHeadersLayer::new(once(AUTHORIZATION))) + .timeout(self.webserver_timeout); + + let runtime_hdl = Handle::current(); + + let server = jsonrpsee::server::Server::builder() + .custom_tokio_runtime(runtime_hdl.clone()) + .set_middleware(middleware) + .set_id_provider(Box::new(RandomStringIdProvider::new(16))) + .set_message_buffer_capacity(self.capacity as u32) + .build(addr) + .await + .expect("Webserver to startup"); + + let hdl = server.start(module.into_inner()); + runtime_hdl.spawn(hdl.clone().stopped()); + + Ok(hdl) + } +} + +fn port_available(host: IpAddr, port: u16) -> bool { + TcpListener::bind((host.to_string(), port)).is_ok() +} + +#[cfg(test)] +mod test { + use super::*; + #[cfg(feature = "websocket-notify")] + use crate::event_handler::notification::ReceiptNotification; + use crate::{channel::AsyncChannel, db::Database, settings::Settings}; + #[cfg(feature = "websocket-notify")] + use homestar_core::{ + ipld::DagJson, + test_utils, + workflow::{config::Resources, instruction::RunInstruction, prf::UcanPrf, Task}, + }; + #[cfg(feature = "websocket-notify")] + use jsonrpsee::core::client::{Subscription, SubscriptionClientT}; + #[cfg(feature = "websocket-notify")] + use jsonrpsee::types::error::ErrorCode; + use jsonrpsee::{core::client::ClientT, rpc_params, ws_client::WsClientBuilder}; + use libp2p::Multiaddr; + #[cfg(feature = "websocket-notify")] + use notifier::{self, Header}; + + async fn metrics_handle(settings: Settings) -> PrometheusHandle { + #[cfg(feature = "monitoring")] + let metrics_hdl = + crate::metrics::start(settings.node.monitoring(), settings.node.network()) + .await + .unwrap(); + + #[cfg(not(feature = "monitoring"))] + let metrics_hdl = crate::metrics::start(settings.node.network()) + .await + .unwrap(); + + metrics_hdl + } + + #[homestar_runtime_proc_macro::runner_test] + fn ws_connect() { + let TestRunner { runner, settings } = TestRunner::start(); + runner.runtime.block_on(async { + let server = Server::new(settings.node().network().webserver()).unwrap(); + let metrics_hdl = metrics_handle(settings).await; + let (runner_tx, _runner_rx) = AsyncChannel::oneshot(); + server.start(runner_tx, metrics_hdl).await.unwrap(); + + let ws_url = format!("ws://{}", server.addr); + let http_url = format!("http://{}", server.addr); + + tokio_tungstenite::connect_async(ws_url.clone()) + .await + .unwrap(); + + let client = WsClientBuilder::default().build(ws_url).await.unwrap(); + let ws_resp: serde_json::Value = client + .request(rpc::HEALTH_ENDPOINT, rpc_params![]) + .await + .unwrap(); + let peer_id = + libp2p::PeerId::from_str("12D3KooWRNw2pJC9748Fmq4WNV27HoSTcX3r37132FLkQMrbKAiC") + .unwrap(); + let static_info = StaticNodeInfo::new(peer_id); + assert_eq!( + ws_resp, + serde_json::json!({"healthy": true, "nodeInfo": {"static": static_info, "dynamic": {"listeners": Vec::::new()}}}) + ); + let http_resp = reqwest::get(format!("{}/health", http_url)).await.unwrap(); + assert_eq!(http_resp.status(), 200); + let http_resp = http_resp.json::().await.unwrap(); + assert_eq!( + http_resp, + serde_json::json!({"healthy": true, "nodeInfo": {"static": static_info, "dynamic": {"listeners": Vec::::new()}}}) + ); + }); + + unsafe { metrics::clear_recorder() } + } + + #[cfg(feature = "monitoring")] + #[homestar_runtime_proc_macro::runner_test] + async fn ws_metrics_no_prefix() { + let TestRunner { runner, settings } = TestRunner::start(); + runner.runtime.block_on(async { + let server = Server::new(settings.node().network().webserver()).unwrap(); + let metrics_hdl = metrics_handle(settings).await; + let (runner_tx, _runner_rx) = AsyncChannel::oneshot(); + server.start(runner_tx, metrics_hdl).await.unwrap(); + + let ws_url = format!("ws://{}", server.addr); + + // wait for interval to pass + std::thread::sleep(Duration::from_millis(150)); + + let client = WsClientBuilder::default().build(ws_url).await.unwrap(); + let ws_resp1: serde_json::Value = client + .request(rpc::METRICS_ENDPOINT, rpc_params![]) + .await + .unwrap(); + + let len = if let serde_json::Value::Array(array) = &ws_resp1["metrics"] { + array.len() + } else { + panic!("expected array"); + }; + + assert!(len > 0); + + unsafe { metrics::clear_recorder() } + }); + } + + #[cfg(feature = "websocket-notify")] + #[homestar_runtime_proc_macro::runner_test] + async fn ws_subscribe_unsubscribe_network_events() { + let TestRunner { runner, settings } = TestRunner::start(); + runner.runtime.block_on(async { + let server = Server::new(settings.node().network().webserver()).unwrap(); + let metrics_hdl = metrics_handle(settings).await; + let (runner_tx, _runner_rx) = AsyncChannel::oneshot(); + server.start(runner_tx, metrics_hdl).await.unwrap(); + + let ws_url = format!("ws://{}", server.addr); + + let client1 = WsClientBuilder::default().build(ws_url).await.unwrap(); + let mut sub: Subscription> = client1 + .subscribe( + rpc::SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + rpc_params![], + rpc::UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + ) + .await + .unwrap(); + + // send any bytes through (Vec) + let (invocation_receipt, runtime_receipt) = crate::test_utils::receipt::receipts(); + let receipt = + ReceiptNotification::with(invocation_receipt, runtime_receipt.cid(), None); + server + .evt_notifier + .notify(notifier::Message::new( + Header::new( + notifier::SubscriptionTyp::EventSub( + rpc::SUBSCRIBE_NETWORK_EVENTS_ENDPOINT.to_string(), + ), + None, + ), + receipt.to_json().unwrap(), + )) + .unwrap(); + + // send an unknown msg: this should be dropped + server + .evt_notifier + .notify(notifier::Message::new( + Header::new( + notifier::SubscriptionTyp::EventSub("test".to_string()), + None, + ), + vec![], + )) + .unwrap(); + + server + .evt_notifier + .notify(notifier::Message::new( + Header::new( + notifier::SubscriptionTyp::EventSub( + rpc::SUBSCRIBE_NETWORK_EVENTS_ENDPOINT.to_string(), + ), + None, + ), + receipt.to_json().unwrap(), + )) + .unwrap(); + + let msg1 = sub.next().await.unwrap().unwrap(); + let returned1: ReceiptNotification = DagJson::from_json(&msg1).unwrap(); + assert_eq!(returned1, receipt); + + let msg2 = sub.next().await.unwrap().unwrap(); + let _returned1: ReceiptNotification = DagJson::from_json(&msg2).unwrap(); + + assert!(sub.unsubscribe().await.is_ok()); + + unsafe { metrics::clear_recorder() } + }); + } + + #[cfg(feature = "websocket-notify")] + #[homestar_runtime_proc_macro::runner_test] + async fn ws_subscribe_workflow_incorrect_params() { + let TestRunner { runner, settings } = TestRunner::start(); + runner.runtime.block_on(async { + let server = Server::new(settings.node().network().webserver()).unwrap(); + let metrics_hdl = metrics_handle(settings).await; + let (runner_tx, _runner_rx) = AsyncChannel::oneshot(); + server.start(runner_tx, metrics_hdl).await.unwrap(); + + let ws_url = format!("ws://{}", server.addr); + + let client = WsClientBuilder::default().build(ws_url).await.unwrap(); + let sub: Result>, jsonrpsee::core::error::Error> = client + .subscribe( + rpc::SUBSCRIBE_RUN_WORKFLOW_ENDPOINT, + rpc_params![], + rpc::UNSUBSCRIBE_RUN_WORKFLOW_ENDPOINT, + ) + .await; + + assert!(sub.is_err()); + + if let Err(jsonrpsee::core::error::Error::Call(err)) = sub { + let check = ErrorCode::InvalidParams; + assert_eq!(err.code(), check.code()); + } else { + panic!("expected same error code"); + } + + unsafe { metrics::clear_recorder() } + }); + } + + #[cfg(feature = "websocket-notify")] + #[homestar_runtime_proc_macro::runner_test] + async fn ws_subscribe_workflow_runner_timeout() { + let TestRunner { runner, settings } = TestRunner::start(); + runner.runtime.block_on(async { + let server = Server::new(settings.node().network().webserver()).unwrap(); + let metrics_hdl = metrics_handle(settings).await; + let (runner_tx, _runner_rx) = AsyncChannel::oneshot(); + server.start(runner_tx, metrics_hdl).await.unwrap(); + + let ws_url = format!("ws://{}", server.addr); + + let config = Resources::default(); + let instruction1 = test_utils::workflow::instruction::(); + let (instruction2, _) = test_utils::workflow::wasm_instruction_with_nonce::(); + + let task1 = Task::new( + RunInstruction::Expanded(instruction1), + config.clone().into(), + UcanPrf::default(), + ); + let task2 = Task::new( + RunInstruction::Expanded(instruction2), + config.into(), + UcanPrf::default(), + ); + + let workflow = Workflow::new(vec![task1.clone(), task2.clone()]); + let run_str = format!( + r#"{{"name": "test","workflow": {}}}"#, + workflow.to_json_string().unwrap() + ); + + let run: serde_json::Value = serde_json::from_str(&run_str).unwrap(); + let client = WsClientBuilder::default().build(ws_url).await.unwrap(); + let sub: Result>, jsonrpsee::core::error::Error> = client + .subscribe( + rpc::SUBSCRIBE_RUN_WORKFLOW_ENDPOINT, + rpc_params![run], + rpc::UNSUBSCRIBE_RUN_WORKFLOW_ENDPOINT, + ) + .await; + + assert!(sub.is_err()); + + // Assure error is not on parse of params, but due to runner + // timeout (as runner is not available). + if let Err(jsonrpsee::core::error::Error::Call(err)) = sub { + let check = ErrorCode::ServerIsBusy; + assert_eq!(err.code(), check.code()); + } else { + panic!("expected same error code"); + } + + unsafe { metrics::clear_recorder() } + }); + } +} diff --git a/homestar-runtime/src/network/ws/listener.rs b/homestar-runtime/src/network/webserver/listener.rs similarity index 84% rename from homestar-runtime/src/network/ws/listener.rs rename to homestar-runtime/src/network/webserver/listener.rs index e933f7d7..9423ce20 100644 --- a/homestar-runtime/src/network/ws/listener.rs +++ b/homestar-runtime/src/network/webserver/listener.rs @@ -1,19 +1,18 @@ -use std::borrow::Cow; +//! Listener for incoming requests types. use faststr::FastStr; use homestar_core::{ipld::DagJson, Workflow}; use homestar_wasm::io::Arg; -use names::Name; +use names::{Generator, Name}; use serde::{de, Deserialize, Deserializer, Serialize}; use serde_json::value::RawValue; -/// A [Workflow] run command via a websocket channel. +/// A [Workflow] run command via a WebSocket channel. /// /// Note: We leverage the [RawValue] type in order to use our [DagJson] /// implementation, which is not a direct [Deserialize] implementation. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub(crate) struct Run<'a> { - pub(crate) action: Cow<'static, str>, #[serde(default = "default_name")] pub(crate) name: FastStr, #[serde(deserialize_with = "from_raw_value")] @@ -21,7 +20,7 @@ pub(crate) struct Run<'a> { } fn default_name() -> FastStr { - let mut name_gen = names::Generator::with_naming(Name::Numbered); + let mut name_gen = Generator::with_naming(Name::Numbered); name_gen .next() .unwrap_or_else(|| "workflow".to_string()) @@ -36,6 +35,12 @@ where Workflow::from_json(raw_value.get().as_bytes()).map_err(de::Error::custom) } +/// Filter metrics by prefix. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub(crate) struct MetricsPrefix { + pub(crate) prefix: String, +} + #[cfg(test)] mod test { use super::*; @@ -64,13 +69,12 @@ mod test { let workflow = Workflow::new(vec![task1.clone(), task2.clone()]); let run = Run { - action: Cow::Borrowed("run"), name: "test".into(), workflow: workflow.clone(), }; let run_str = format!( - r#"{{"action": "run","name": "test","workflow": {}}}"#, + r#"{{"name": "test","workflow": {}}}"#, workflow.to_json_string().unwrap() ); diff --git a/homestar-runtime/src/network/webserver/notifier.rs b/homestar-runtime/src/network/webserver/notifier.rs new file mode 100644 index 00000000..b1ad4f86 --- /dev/null +++ b/homestar-runtime/src/network/webserver/notifier.rs @@ -0,0 +1,96 @@ +//! Notifier for broadcasting messages to websocket clients. + +use anyhow::Result; +use faststr::FastStr; +use libipld::Cid; +use std::{fmt, sync::Arc}; +use tokio::sync::broadcast; + +/// Type-wrapper for WebSocket sender. +#[derive(Debug)] +pub(crate) struct Notifier(Arc>); + +impl Clone for Notifier { + fn clone(&self) -> Self { + Self(self.0.clone()) + } +} + +impl Notifier +where + T: Send + Sync + fmt::Debug + 'static, +{ + /// Create a new [Notifier]. + pub(crate) fn new(sender: broadcast::Sender) -> Self { + Self(sender.into()) + } + + /// Get a reference to the inner [broadcast::Sender]. + #[allow(dead_code)] + pub(crate) fn inner(&self) -> &Arc> { + &self.0 + } + + /// Get and take ownership of the inner [broadcast::Sender]. + #[allow(dead_code)] + pub(crate) fn into_inner(self) -> Arc> { + self.0 + } + + /// Send a message to all connected WebSocket clients. + pub(crate) fn notify(&self, msg: T) -> Result<()> { + let _ = self.0.send(msg)?; + Ok(()) + } +} + +/// Subscription type: either directed via a [Cid] or an event subscription string. +#[allow(dead_code)] +#[derive(Debug, Clone)] +pub(crate) enum SubscriptionTyp { + EventSub(String), + Cid(Cid), +} + +/// A header for a message to be sent to a WebSocket client. +#[derive(Debug, Clone)] +pub(crate) struct Header { + pub(crate) subscription: SubscriptionTyp, + pub(crate) ident: Option, +} + +impl Header { + /// Create a new [Header]. + pub(crate) fn new(sub: SubscriptionTyp, ident: Option) -> Self { + Self { + subscription: sub, + ident, + } + } +} + +/// A message to be sent to a WebSocket client, with a header and payload. +#[derive(Debug, Clone)] +pub(crate) struct Message { + pub(crate) header: Header, + pub(crate) payload: Vec, +} + +impl Message { + /// Create a new [Message]. + pub(crate) fn new(header: Header, payload: Vec) -> Self { + Self { header, payload } + } + + /// Get a reference to the [Header] of a [Message]. + #[allow(dead_code)] + pub(crate) fn header(&self) -> &Header { + &self.header + } + + /// Get a reference to the payload of a [Message]. + #[allow(dead_code)] + pub(crate) fn payload(&self) -> &[u8] { + &self.payload + } +} diff --git a/homestar-runtime/src/network/webserver/prom.rs b/homestar-runtime/src/network/webserver/prom.rs new file mode 100644 index 00000000..622b8850 --- /dev/null +++ b/homestar-runtime/src/network/webserver/prom.rs @@ -0,0 +1,434 @@ +/// A module to parse prometheus metrics data into json +/// +/// Influenced by https://crates.io/crates/prom2jsonrs/0.1.0. +use anyhow::{anyhow, bail, Result}; +use dyn_clone::DynClone; +use once_cell::sync::Lazy; +use regex::Regex; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; + +const HISTOGRAM_TYPE: &str = "HISTOGRAM"; +const SUMMARY_TYPE: &str = "SUMMARY"; + +static METRIC_REGEX_NO_LABEL: Lazy<&Regex> = Lazy::new(|| { + static RE: once_cell::sync::OnceCell = once_cell::sync::OnceCell::new(); + RE.get_or_init(|| Regex::new(r"([a-zA-Z_:][a-zA-Z0-9_:]*)\s(-?[\d.]+(?:e-?\d+)?|NaN)").unwrap()) +}); + +static METRIC_REGEX_WITH_LABEL: Lazy<&Regex> = Lazy::new(|| { + static RE: once_cell::sync::OnceCell = once_cell::sync::OnceCell::new(); + RE.get_or_init(|| { + Regex::new(r"[a-zA-Z_:][a-zA-Z0-9_:]*\{(.*)\}\s(-?[\d.]+(?:e-?\d+)?|NaN)").unwrap() + }) +}); + +static LABELS_REGEX: Lazy<&Regex> = Lazy::new(|| { + static RE: once_cell::sync::OnceCell = once_cell::sync::OnceCell::new(); + RE.get_or_init(|| Regex::new("([a-zA-Z0-9_:]*)=\"([^\"]+)\"").unwrap()) +}); + +static MULTI_NEWLINE: Lazy<&Regex> = Lazy::new(|| { + static RE: once_cell::sync::OnceCell = once_cell::sync::OnceCell::new(); + RE.get_or_init(|| Regex::new(r"\n\n").unwrap()) +}); + +type Labels = HashMap; +type Value = String; + +#[derive(Clone, Serialize)] +/// A parsed representation of the prometheus metrics data +pub(crate) struct PrometheusData { + metrics: Vec, +} + +impl PrometheusData { + /// Parse promethues metric data from string + pub(crate) fn from_string(s: &str) -> Result { + let text = MULTI_NEWLINE.replace_all(s, "\n"); + let mut metrics = Vec::new(); + let mut metric_lines = Vec::new(); + let mut num_comment_lines = 0; + for line in text.lines() { + if line.starts_with('#') { + if num_comment_lines == 2 { + // One set complete + metrics.push(MetricFamily::from_raw(&metric_lines)?); + metric_lines = vec![line]; + num_comment_lines = 1; + } else { + num_comment_lines += 1; + metric_lines.push(line); + } + } else { + metric_lines.push(line) + } + } + Ok(PrometheusData { metrics }) + } +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +struct Metric { + labels: Option, + value: Value, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +struct Summary { + labels: Option, + quantiles: Labels, + count: Value, + sum: Value, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +struct Histogram { + labels: Option>, + buckets: Labels, + count: Value, + sum: Value, +} + +#[derive(Debug, Clone, PartialEq, Serialize)] +#[serde(rename_all = "lowercase")] +enum MetricType { + Gauge, + Histogram, + Summary, +} + +#[derive(Clone, Serialize)] +struct MetricFamily { + metric_type: MetricType, + metric_name: String, + help: String, + data: Vec>, +} + +#[typetag::serde(tag = "type")] +trait MetricLike: DynClone { + fn parse_from_string(s: &str) -> Result<(Value, Option)> + where + Self: Sized, + { + if let Some(caps) = METRIC_REGEX_NO_LABEL.captures(s) { + Ok((caps[2].to_string(), None)) + } else if let Some(caps) = METRIC_REGEX_WITH_LABEL.captures(s) { + let value = caps[2].to_string(); + let mut labels: HashMap = HashMap::new(); + for cap in LABELS_REGEX.captures_iter(&caps[1]) { + labels.insert(cap[1].to_string(), cap[2].to_string()); + } + Ok((value, Some(labels))) + } else { + Err(anyhow!("invalid format {}", s)) + } + } + + fn metric_type() -> String + where + Self: Sized; +} + +dyn_clone::clone_trait_object!(MetricLike); + +impl Metric { + fn from_string(s: &str) -> Result { + let (value, labels) = Self::parse_from_string(s)?; + Ok(Metric { labels, value }) + } +} + +#[typetag::serde(name = "metric")] +impl MetricLike for Metric { + fn metric_type() -> String { + String::from("DEFAULT") + } +} + +impl Summary { + fn from_raw(metric_name: &str, raw_lines: &Vec<&str>) -> Result { + let mut sum = String::from(""); + let mut count = String::from(""); + let sum_prefix = format!("{}_sum", metric_name); + let count_prefix = format!("{}_count", metric_name); + let mut labels = HashMap::new(); + let mut quantiles = HashMap::new(); + for raw_line in raw_lines { + if raw_line.starts_with(&sum_prefix) { + sum = Summary::parse_from_string(raw_line)?.0; + } else if raw_line.starts_with(&count_prefix) { + count = Summary::parse_from_string(raw_line)?.0; + } else if let Some(caps) = METRIC_REGEX_WITH_LABEL.captures(raw_line) { + for cap in LABELS_REGEX.captures_iter(&caps[1]) { + let key = &cap[1]; + let value = &cap[2]; + match key { + "quantile" => quantiles.insert(key.to_string(), value.to_string()), + _ => labels.insert(key.to_string(), value.to_string()), + }; + } + } else { + bail!("invalid format {}", raw_line); + } + } + + Ok(Summary { + sum, + count, + labels: Some(labels), + quantiles, + }) + } +} + +#[typetag::serde] +impl MetricLike for Summary { + fn metric_type() -> String { + String::from(SUMMARY_TYPE) + } +} + +impl Histogram { + fn from_raw(metric_name: &str, raw_lines: &Vec<&str>) -> Result { + let mut sum = String::from(""); + let mut count = String::from(""); + let sum_prefix = format!("{}_sum", metric_name); + let count_prefix = format!("{}_count", metric_name); + let mut labels: HashMap = HashMap::new(); + let mut buckets: HashMap = HashMap::new(); + for raw_line in raw_lines { + if raw_line.starts_with(&sum_prefix) { + sum = Summary::parse_from_string(raw_line)?.0; + } else if raw_line.starts_with(&count_prefix) { + count = Summary::parse_from_string(raw_line)?.0; + } else if let Some(caps) = METRIC_REGEX_WITH_LABEL.captures(raw_line) { + for cap in LABELS_REGEX.captures_iter(&caps[1]) { + let key = &cap[1]; + let value = &cap[2]; + match key { + "le" => buckets.insert(value.to_string(), caps[2].to_string()), + _ => labels.insert(key.to_string(), value.to_string()), + }; + } + } else { + bail!("invalid format {}", raw_line) + } + } + + Ok(Histogram { + sum, + count, + labels: Some(labels), + buckets, + }) + } +} + +#[typetag::serde] +impl MetricLike for Histogram { + fn metric_type() -> String { + String::from(HISTOGRAM_TYPE) + } +} + +impl MetricFamily { + fn from_raw(raw: &[&str]) -> Result { + let mut raw_iter = raw.iter(); + let help = MetricFamily::metric_help_fron_raw( + raw_iter + .next() + .ok_or(anyhow!("invalid metric help{}", raw.join("\n")))?, + ); + let (metric_name, metric_type) = MetricFamily::metric_name_and_type( + raw_iter + .next() + .ok_or(anyhow!("invalid metric name/type {}", raw.join("\n")))?, + )?; + let mut data: Vec> = Vec::new(); + match metric_type { + MetricType::Gauge => { + for raw_line in raw_iter { + data.push(Box::new(Metric::from_string(raw_line)?)) + } + } + MetricType::Histogram => { + let count_prefix = format!("{}_count", metric_name); + let mut histogram_lines: Vec<&str> = Vec::new(); + for raw_line in raw_iter { + histogram_lines.push(raw_line); + if raw_line.starts_with(&count_prefix) { + data.push(Box::new(Histogram::from_raw( + &metric_name, + &histogram_lines, + )?)); + histogram_lines = Vec::new(); + } + } + } + MetricType::Summary => { + let count_prefix = format!("{}_count", metric_name); + let mut summary_lines: Vec<&str> = Vec::new(); + for raw_line in raw_iter { + summary_lines.push(raw_line); + if raw_line.starts_with(&count_prefix) { + data.push(Box::new(Summary::from_raw(&metric_name, &summary_lines)?)); + summary_lines = Vec::new(); + } + } + } + } + Ok(MetricFamily { + metric_type, + metric_name, + help, + data, + }) + } + + fn metric_name_and_type(type_line: &str) -> Result<(String, MetricType)> { + let tags: Vec<&str> = type_line.split_whitespace().collect(); + let (name, type_raw) = (tags[2], tags[3]); + let metric_type = match type_raw { + "gauge" => MetricType::Gauge, + "counter" => MetricType::Gauge, + "histogram" => MetricType::Histogram, + "summary" => MetricType::Summary, + _ => bail!("invalid metric type {}", type_raw), + }; + + Ok((name.to_string(), metric_type)) + } + + fn metric_help_fron_raw(help_line: &str) -> String { + let tags: Vec<&str> = help_line.split_whitespace().collect(); + tags[3..].join(" ").to_string() + } +} + +#[cfg(test)] +mod test { + use super::*; + use maplit::hashmap; + + #[test] + fn parse_metric() { + assert_eq!( + Metric { + labels: None, + value: String::from("205632") + }, + Metric::from_string("go_memstats_mspan_inuse_bytes 205632").unwrap() + ); + assert_eq!( + Metric { + labels: Some(hashmap!{ + "dialer_name".to_string() => "default".to_string(), + "reason".to_string() => "unknown".to_string(), + }), + value: String::from("0") + }, + Metric::from_string("net_conntrack_dialer_conn_failed_total{dialer_name=\"default\",reason=\"unknown\"} 0").unwrap() + ) + } + + #[test] + fn parse_metric_raw_data() { + let raw_data = "# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +go_goroutines 31 +# HELP go_info Information about the Go environment. +# TYPE go_info gauge +go_info{version=\"go1.15.5\"} 1"; + let prom_data = PrometheusData::from_string(raw_data).unwrap(); + assert_eq!(MetricType::Gauge, prom_data.metrics[0].metric_type) + } + + #[test] + fn parse_metric_summary() { + let raw_data = + "prometheus_engine_query_duration_seconds{slice=\"inner_eval\",quantile=\"0.5\"} NaN +prometheus_engine_query_duration_seconds{slice=\"inner_eval\",quantile=\"0.9\"} NaN +prometheus_engine_query_duration_seconds{slice=\"inner_eval\",quantile=\"0.99\"} NaN +prometheus_engine_query_duration_seconds_sum{slice=\"inner_eval\"} 12 +prometheus_engine_query_duration_seconds_count{slice=\"inner_eval\"} 0"; + let summary = Summary::from_raw( + "prometheus_engine_query_duration_seconds", + &raw_data.lines().collect(), + ) + .unwrap(); + assert_eq!(summary.sum, "12".to_string()); + assert_eq!( + summary.labels, + Some(hashmap! {"slice".to_string() => "inner_eval".to_string()}) + ); + } + + #[test] + fn parse_metric_histogram() { + let raw_data = r#"prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="0.1"} 10871 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="0.2"} 10871 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="0.4"} 10871 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="1"} 10871 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="3"} 10871 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="8"} 10871 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="20"} 10871 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="60"} 10871 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="120"} 10871 +prometheus_http_request_duration_seconds_bucket{handler="/metrics",le="+Inf"} 10871 +prometheus_http_request_duration_seconds_sum{handler="/metrics"} 67.48398663499978 +prometheus_http_request_duration_seconds_count{handler="/metrics"} 10871"#; + let histogram = Histogram::from_raw( + "prometheus_http_request_duration_seconds", + &raw_data.lines().collect(), + ) + .unwrap(); + assert_eq!(histogram.sum, "67.48398663499978"); + assert_eq!( + histogram.labels, + Some(hashmap! {"handler".to_string() => "/metrics".to_string()}) + ); + } + + #[test] + fn parse_metric_collection_to_json() { + let raw_data = r#"# HELP homestar_process_disk_total_read_bytes Total bytes read from disk. +# TYPE homestar_process_disk_total_read_bytes gauge +homestar_process_disk_total_read_bytes 45969408 + +# HELP homestar_process_virtual_memory_bytes The virtual memory size in bytes. +# TYPE homestar_process_virtual_memory_bytes gauge +homestar_process_virtual_memory_bytes 418935930880 + +# HELP homestar_network_received_bytes The bytes received since last refresh. +# TYPE homestar_network_received_bytes gauge +homestar_network_received_bytes 0 + +# HELP homestar_system_available_memory_bytes The amount of available memory. +# TYPE homestar_system_available_memory_bytes gauge +homestar_system_available_memory_bytes 0 + +# HELP homestar_system_disk_available_space_bytes The total amount of available disk space. +# TYPE homestar_system_disk_available_space_bytes gauge +homestar_system_disk_available_space_bytes 0 + +# HELP homestar_system_load_average_percentage The load average over a five minute interval. +# TYPE homestar_system_load_average_percentage gauge +homestar_system_load_average_percentage 6.26611328125"#; + + let prom_data = PrometheusData::from_string(raw_data).unwrap(); + let json_string = serde_json::to_string(&prom_data).unwrap(); + let root: serde_json::Value = serde_json::from_str(&json_string).unwrap(); + + let check = root + .get("metrics") + .and_then(|v| v.get(0)) + .and_then(|v| v.get("data")) + .and_then(|v| v.get(0)) + .and_then(|v| v.get("value")) + .unwrap(); + + assert_eq!(check, &serde_json::Value::String("45969408".to_string())); + } +} diff --git a/homestar-runtime/src/network/webserver/rpc.rs b/homestar-runtime/src/network/webserver/rpc.rs new file mode 100644 index 00000000..51b72eb8 --- /dev/null +++ b/homestar-runtime/src/network/webserver/rpc.rs @@ -0,0 +1,403 @@ +//! JSON-RPC module for registering methods and subscriptions. + +#[cfg(feature = "websocket-notify")] +use super::notifier::{self, Header, Notifier, SubscriptionTyp}; +#[allow(unused_imports)] +use super::{listener, prom::PrometheusData, Message}; +#[cfg(feature = "websocket-notify")] +use crate::channel::AsyncChannel; +use crate::runner::WsSender; +#[cfg(feature = "websocket-notify")] +use anyhow::anyhow; +use anyhow::Result; +#[cfg(feature = "websocket-notify")] +use dashmap::DashMap; +#[cfg(feature = "websocket-notify")] +use faststr::FastStr; +#[cfg(feature = "websocket-notify")] +use futures::StreamExt; +#[cfg(feature = "websocket-notify")] +use homestar_core::ipld::DagCbor; +use jsonrpsee::{ + server::RpcModule, + types::error::{ErrorCode, ErrorObject}, +}; +#[cfg(feature = "websocket-notify")] +use jsonrpsee::{types::SubscriptionId, SubscriptionMessage, SubscriptionSink, TrySendError}; +#[cfg(feature = "websocket-notify")] +use libipld::Cid; +use metrics_exporter_prometheus::PrometheusHandle; +#[cfg(feature = "websocket-notify")] +use std::sync::Arc; +use std::time::Duration; +#[allow(unused_imports)] +use tokio::sync::oneshot; +#[cfg(feature = "websocket-notify")] +use tokio::{runtime::Handle, select}; +#[cfg(feature = "websocket-notify")] +use tokio_stream::wrappers::BroadcastStream; +#[cfg(feature = "websocket-notify")] +use tracing::debug; +#[allow(unused_imports)] +use tracing::{error, warn}; + +/// Health endpoint. +pub(crate) const HEALTH_ENDPOINT: &str = "health"; +/// Metrics endpoint for prometheus / openmetrics polling. +pub(crate) const METRICS_ENDPOINT: &str = "metrics"; +/// Run a workflow and subscribe to that workflow's events. +#[cfg(feature = "websocket-notify")] +pub(crate) const SUBSCRIBE_RUN_WORKFLOW_ENDPOINT: &str = "subscribe_run_workflow"; +/// Unsubscribe from a workflow's events. +#[cfg(feature = "websocket-notify")] +pub(crate) const UNSUBSCRIBE_RUN_WORKFLOW_ENDPOINT: &str = "unsubscribe_run_workflow"; +/// Subscribe to network events. +#[cfg(feature = "websocket-notify")] +pub(crate) const SUBSCRIBE_NETWORK_EVENTS_ENDPOINT: &str = "subscribe_network_events"; +/// Unsubscribe from network events. +#[cfg(feature = "websocket-notify")] +pub(crate) const UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT: &str = "unsubscribe_network_events"; + +/// Context for RPC methods. +#[cfg(feature = "websocket-notify")] +pub(crate) struct Context { + metrics_hdl: PrometheusHandle, + evt_notifier: Notifier, + workflow_msg_notifier: Notifier, + runner_sender: WsSender, + receiver_timeout: Duration, + workflow_listeners: Arc, (Cid, FastStr)>>, +} + +/// Context for RPC methods. +#[allow(dead_code)] +#[cfg(not(feature = "websocket-notify"))] +pub(crate) struct Context { + metrics_hdl: PrometheusHandle, + runner_sender: WsSender, + receiver_timeout: Duration, +} + +impl Context { + /// Create a new [Context] instance. + #[cfg(feature = "websocket-notify")] + #[cfg_attr(docsrs, doc(cfg(feature = "websocket-notify")))] + pub(crate) fn new( + metrics_hdl: PrometheusHandle, + evt_notifier: Notifier, + workflow_msg_notifier: Notifier, + runner_sender: WsSender, + receiver_timeout: Duration, + ) -> Self { + Self { + metrics_hdl, + evt_notifier, + workflow_msg_notifier, + runner_sender, + receiver_timeout, + workflow_listeners: DashMap::new().into(), + } + } + + /// Create a new [Context] instance. + #[cfg(not(feature = "websocket-notify"))] + pub(crate) fn new( + metrics_hdl: PrometheusHandle, + runner_sender: WsSender, + receiver_timeout: Duration, + ) -> Self { + Self { + metrics_hdl, + runner_sender, + receiver_timeout, + } + } +} + +/// [RpcModule] wrapper. +pub(crate) struct JsonRpc(RpcModule); + +impl JsonRpc { + /// Create a new [JsonRpc] instance, registering methods on initialization. + pub(crate) async fn new(ctx: Context) -> Result { + let module = Self::register(ctx).await?; + Ok(Self(module)) + } + + /// Get a reference to the inner [RpcModule]. + #[allow(dead_code)] + pub(crate) fn inner(&self) -> &RpcModule { + &self.0 + } + + /// Get and take ownership of the inner [RpcModule]. + pub(crate) fn into_inner(self) -> RpcModule { + self.0 + } + + async fn register(ctx: Context) -> Result> { + let mut module = RpcModule::new(ctx); + + #[cfg(not(test))] + module.register_async_method(HEALTH_ENDPOINT, |_, ctx| async move { + let (tx, rx) = crate::channel::AsyncChannel::oneshot(); + ctx.runner_sender + .send_async((Message::GetNodeInfo, Some(tx))) + .await + .map_err(|err| internal_err(err.to_string()))?; + + if let Ok(Message::AckNodeInfo((static_info, dyn_info))) = + rx.recv_deadline(std::time::Instant::now() + ctx.receiver_timeout) + { + Ok(serde_json::json!({ "healthy": true, "nodeInfo": { + "static": static_info, "dynamic": dyn_info}})) + } else { + error!( + subject = "call.health", + category = "jsonrpc.call", + sub = HEALTH_ENDPOINT, + "did not acknowledge message in time" + ); + Err(internal_err("failed to get node information".to_string())) + } + })?; + + #[cfg(test)] + module.register_async_method(HEALTH_ENDPOINT, |_, _| async move { + use crate::runner::{DynamicNodeInfo, StaticNodeInfo}; + use std::str::FromStr; + let peer_id = + libp2p::PeerId::from_str("12D3KooWRNw2pJC9748Fmq4WNV27HoSTcX3r37132FLkQMrbKAiC") + .unwrap(); + Ok::>(serde_json::json!({ + "healthy": true, "nodeInfo": {"static": StaticNodeInfo::new(peer_id), "dynamic": DynamicNodeInfo::new(vec![])}, + })) + })?; + + module.register_async_method(METRICS_ENDPOINT, |params, ctx| async move { + let render = ctx.metrics_hdl.render(); + + // TODO: Handle prefix specific metrics in parser. + match params.one::() { + Ok(listener::MetricsPrefix { prefix }) => PrometheusData::from_string(&render) + .map_err(|err| { + internal_err(format!( + "failed to render metrics @prefix {} : {:#?}", + prefix, err + )) + }), + Err(_) => PrometheusData::from_string(&render) + .map_err(|err| internal_err(format!("failed to render metrics: {:#?}", err))), + } + })?; + + #[cfg(feature = "websocket-notify")] + module.register_subscription( + SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + |_, pending, ctx| async move { + let sink = pending.accept().await?; + let rx = ctx.evt_notifier.inner().subscribe(); + let stream = BroadcastStream::new(rx); + Self::handle_event_subscription( + sink, + stream, + SUBSCRIBE_NETWORK_EVENTS_ENDPOINT.to_string(), + ) + .await?; + Ok(()) + }, + )?; + + #[cfg(feature = "websocket-notify")] + module.register_subscription( + SUBSCRIBE_RUN_WORKFLOW_ENDPOINT, + SUBSCRIBE_RUN_WORKFLOW_ENDPOINT, + UNSUBSCRIBE_RUN_WORKFLOW_ENDPOINT, + |params, pending, ctx| async move { + match params.one::>() { + Ok(listener::Run { name, workflow }) => { + let (tx, rx) = AsyncChannel::oneshot(); + ctx.runner_sender + .send_async(( + Message::RunWorkflow((name.clone(), workflow.clone())), + Some(tx), + )) + .await?; + + if let Ok(Message::AckWorkflow((cid, name))) = + rx.recv_deadline(std::time::Instant::now() + ctx.receiver_timeout) + { + let sink = pending.accept().await?; + ctx.workflow_listeners + .insert(sink.subscription_id(), (cid, name)); + let rx = ctx.workflow_msg_notifier.inner().subscribe(); + let stream = BroadcastStream::new(rx); + Self::handle_workflow_subscription(sink, stream, ctx).await?; + } else { + error!( + subject = "subscription.workflow.err", + category = "jsonrpc.subscription", + sub = SUBSCRIBE_RUN_WORKFLOW_ENDPOINT, + workflow_name = name.to_string(), + "did not acknowledge message in time" + ); + let _ = pending + .reject(busy_err(format!( + "not able to run workflow {}", + workflow.to_cid()? + ))) + .await; + } + } + Err(err) => { + warn!(subject = "subscription.workflow.err", + category = "jsonrpc.subscription", + err=?err, + "failed to parse run workflow params"); + let _ = pending.reject(err).await; + } + } + Ok(()) + }, + )?; + + Ok(module) + } + + #[cfg(feature = "websocket-notify")] + async fn handle_event_subscription( + mut sink: SubscriptionSink, + mut stream: BroadcastStream, + subscription_type: String, + ) -> Result<()> { + let rt_hdl = Handle::current(); + rt_hdl.spawn(async move { + loop { + select! { + _ = sink.closed() => { + break Ok(()); + } + next_msg = stream.next() => { + let msg = match next_msg { + Some(Ok(notifier::Message { + header: Header { + subscription: SubscriptionTyp::EventSub(evt), + .. + }, + payload, + })) if evt == subscription_type => payload, + Some(Ok(_)) => continue, + Some(Err(err)) => { + error!(subject = "subscription.event.err", + category = "jsonrpc.subscription", + err=?err, + "subscription stream error"); + break Err(err.into()); + } + None => break Ok(()), + }; + let sub_msg = SubscriptionMessage::from_json(&msg)?; + match sink.try_send(sub_msg) { + Ok(()) => (), + Err(TrySendError::Closed(_)) => { + break Err(anyhow!("subscription sink closed")); + } + Err(TrySendError::Full(_)) => { + error!(subject = "subscription.event.err", + category = "jsonrpc.subscription", + "subscription sink full"); + } + } + } + } + } + }); + + Ok(()) + } + + #[cfg(feature = "websocket-notify")] + async fn handle_workflow_subscription( + mut sink: SubscriptionSink, + mut stream: BroadcastStream, + ctx: Arc, + ) -> Result<()> { + let rt_hdl = Handle::current(); + rt_hdl.spawn(async move { + loop { + select! { + _ = sink.closed() => { + ctx.workflow_listeners.remove(&sink.subscription_id()); + break Ok(()); + } + next_msg = stream.next() => { + let msg = match next_msg { + Some(Ok(notifier::Message { + header: Header { subscription: SubscriptionTyp::Cid(cid), ident }, + payload, + })) => { + let msg = ctx.workflow_listeners + .get(&sink.subscription_id()) + .and_then(|v| { + let (v_cid, v_name) = v.value(); + if v_cid == &cid && (Some(v_name) == ident.as_ref() || ident.is_none()) { + debug!( + subject = "subscription.workflow", + category = "jsonrpc.subscription", + cid = cid.to_string(), + ident = ident.clone().unwrap_or("undefined".into()).to_string(), "received message"); + Some(payload) + } else { + None + } + }); + msg + } + Some(Ok(notifier::Message { + header: notifier::Header { subscription: _sub, ..}, + .. + })) => { + continue; + } + Some(Err(err)) => { + error!("subscription stream error: {}", err); + ctx.workflow_listeners.remove(&sink.subscription_id()); + break Err(err.into()); + } + None => break Ok(()), + }; + + if let Some(msg) = msg { + let sub_msg = SubscriptionMessage::from_json(&msg)?; + match sink.try_send(sub_msg) { + Ok(()) => (), + Err(TrySendError::Closed(_)) => { + ctx.workflow_listeners.remove(&sink.subscription_id()); + break Err(anyhow!("subscription sink closed")); + } + Err(TrySendError::Full(_)) => { + error!(subject = "subscription.workflow.err", + category = "jsonrpc.subscription", + "subscription sink full"); + } + } + } + } + } + } + }); + + Ok(()) + } +} + +fn internal_err<'a, T: ToString>(msg: T) -> ErrorObject<'a> { + ErrorObject::owned(ErrorCode::InternalError.code(), msg.to_string(), None::<()>) +} + +#[allow(dead_code)] +fn busy_err<'a, T: ToString>(msg: T) -> ErrorObject<'a> { + ErrorObject::owned(ErrorCode::ServerIsBusy.code(), msg.to_string(), None::<()>) +} diff --git a/homestar-runtime/src/network/ws.rs b/homestar-runtime/src/network/ws.rs deleted file mode 100644 index 0493cb10..00000000 --- a/homestar-runtime/src/network/ws.rs +++ /dev/null @@ -1,325 +0,0 @@ -//! Sets up a websocket server for sending and receiving messages from browser -//! clients. - -use crate::{ - channel::AsyncBoundedChannelReceiver, - runner::{self, WsSender}, - settings, -}; -use anyhow::{anyhow, Result}; -use axum::{ - extract::{ - ws::{self, Message as AxumMsg, WebSocketUpgrade}, - ConnectInfo, State, TypedHeader, - }, - response::IntoResponse, - routing::get, - Router, -}; -use faststr::FastStr; -use futures::{stream::StreamExt, SinkExt}; -use homestar_core::Workflow; -use homestar_wasm::io::Arg; -use std::{ - net::{IpAddr, SocketAddr, TcpListener}, - ops::ControlFlow, - str::FromStr, - time::Duration, -}; -use tokio::{ - runtime::Handle, - select, - sync::{broadcast, oneshot}, - time::{self, Instant}, -}; -use tracing::{debug, error, info, warn}; - -#[cfg(feature = "websocket-notify")] -pub(crate) mod listener; -#[cfg(feature = "websocket-notify")] -pub(crate) mod notifier; -#[cfg(feature = "websocket-notify")] -pub(crate) use notifier::Notifier; - -/// Message type for messages sent back from the -/// websocket server to the [runner] for example. -/// -/// [runner]: crate::Runner -#[allow(dead_code)] -#[derive(Debug)] -pub(crate) enum Message { - /// Notify the listener that the websocket server is shutting down - /// gracefully. - GracefulShutdown(oneshot::Sender<()>), - /// Error attempting to run a [Workflow]. - RunErr(runner::Error), - /// Run a workflow, given a tuple of name, and [Workflow]. - RunWorkflow((FastStr, Workflow<'static, Arg>)), - /// Acknowledgement of a [Workflow] run. - /// - /// TODO: Temporary Ack until we define semantics for JSON-RPC or similar. - RunWorkflowAck, -} - -/// WebSocket server fields. -#[allow(dead_code)] -#[derive(Clone, Debug)] -pub(crate) struct Server { - /// Address of the websocket server. - addr: SocketAddr, - /// Message sender for broadcasting to clients connected to the - /// websocket server. - notifier: Notifier, - /// Receiver timeout for the websocket server. - receiver_timeout: Duration, -} - -/// State used for the websocket server routes. -#[derive(Clone, Debug)] -struct ServerState { - notifier: Notifier, - runner_sender: WsSender, - receiver_timeout: Duration, -} - -impl Server { - /// Setup bounded, MPMC channel for runtime to send and received messages - /// through the websocket connection(s). - fn setup_channel( - capacity: usize, - ) -> (broadcast::Sender>, broadcast::Receiver>) { - broadcast::channel(capacity) - } - - pub(crate) fn new(settings: &settings::Network) -> Result { - let (sender, _receiver) = Self::setup_channel(settings.websocket_capacity); - - let host = IpAddr::from_str(&settings.websocket_host.to_string())?; - let port_setting = settings.websocket_port; - let addr = if port_available(host, port_setting) { - SocketAddr::from((host, port_setting)) - } else { - let port = (port_setting..port_setting + 1000) - .find(|port| port_available(host, *port)) - .ok_or_else(|| anyhow!("no free TCP ports available"))?; - SocketAddr::from((host, port)) - }; - - Ok(Self { - addr, - notifier: Notifier::new(sender), - receiver_timeout: settings.websocket_receiver_timeout, - }) - } - - /// Start the websocket server given settings. - pub(crate) async fn start( - &self, - rx: AsyncBoundedChannelReceiver, - runner_sender: WsSender, - ) -> Result<()> { - let addr = self.addr; - info!("websocket server listening on {}", addr); - let app = Router::new().route( - "/", - get(ws_handler).with_state(ServerState { - notifier: self.notifier.clone(), - runner_sender, - receiver_timeout: self.receiver_timeout, - }), - ); - - axum::Server::bind(&addr) - .serve(app.into_make_service_with_connect_info::()) - .with_graceful_shutdown(async { - if let Ok(Message::GracefulShutdown(tx)) = rx.recv_async().await { - info!("websocket server shutting down"); - let _ = tx.send(()); - } - }) - .await?; - - Ok(()) - } - - /// Get websocket message sender for broadcasting messages to websocket - /// clients. - pub(crate) fn notifier(&self) -> Notifier { - self.notifier.clone() - } -} - -async fn ws_handler( - ws: WebSocketUpgrade, - user_agent: Option>, - State(state): State, - ConnectInfo(addr): ConnectInfo, -) -> impl IntoResponse { - let user_agent = if let Some(TypedHeader(user_agent)) = user_agent { - user_agent.to_string() - } else { - String::from("Unknown browser") - }; - info!("`{user_agent}` at {addr} connected."); - - // Finalize the upgrade process by returning upgrade callback. - // We can customize the callback by sending additional info such as address. - ws.on_upgrade(move |socket| handle_socket(socket, addr, state)) -} - -async fn handle_socket(mut socket: ws::WebSocket, addr: SocketAddr, state: ServerState) { - // Send a ping (unsupported by some browsers) just to kick things off and - // get a response. - if socket.send(AxumMsg::Ping(vec![1, 2, 3])).await.is_ok() { - debug!("Pinged {}...", addr); - } else { - info!("Could not send ping {}!", addr); - // no Error here since the only thing we can do is to close the connection. - // If we can not send messages, there is no way to salvage the statemachine anyway. - return; - } - - // Receive single message from a client (we can either receive or send with - // the socket). This will likely be the Pong for our Ping or a processed - // message from client. - // Waiting for message from a client will block this task, but will not - // block other client's connections. - if let Some(msg) = socket.recv().await { - if let Ok(msg) = msg { - if process_message(msg, addr, &state).await.is_break() { - return; - } - } else { - info!("client {} abruptly disconnected", addr); - return; - } - } - - // By splitting socket we can send and receive at the same time. - let (mut socket_sender, mut socket_receiver) = socket.split(); - let mut subscribed_rx = state.notifier.inner().subscribe(); - let handle = Handle::current(); - - let mut send_task = handle.spawn(async move { - while let Ok(msg) = subscribed_rx.recv().await { - // In any websocket error, break loop. - if socket_sender.send(AxumMsg::Binary(msg)).await.is_err() { - break; - } - } - }); - - let mut recv_task = handle.spawn(async move { - let mut cnt = 0; - while let Some(Ok(msg)) = socket_receiver.next().await { - cnt += 1; - if process_message(msg, addr, &state).await.is_break() { - break; - } - } - cnt - }); - - // If any one of the tasks exit, abort the other. - select! { - _ = (&mut send_task) => recv_task.abort(), - _ = (&mut recv_task) => send_task.abort(), - }; - - info!("Websocket context {} destroyed", addr); -} - -/// Process [messages]. -/// -/// [messages]: Message -async fn process_message( - msg: AxumMsg, - addr: SocketAddr, - state: &ServerState, -) -> ControlFlow<(), ()> { - match msg { - AxumMsg::Text(t) => { - debug!(">>> {} sent str: {:?}", addr, t); - } - AxumMsg::Binary(bytes) => { - debug!(">>> {} sent {}", addr, bytes.len()); - match serde_json::from_slice::>(&bytes) { - Ok(listener::Run { - action, - name, - workflow, - }) if action.eq("run") => { - let (tx, rx) = oneshot::channel(); - if let Err(err) = state - .runner_sender - .send((Message::RunWorkflow((name, workflow)), Some(tx))) - .await - { - error!(err=?err, "error sending message to runner"); - } - - if (time::timeout_at(Instant::now() + state.receiver_timeout, rx).await) - .is_err() - { - error!("did not acknowledge action=run message in time"); - } - } - Ok(_) => warn!("unknown action or message shape"), - // another message - Err(_err) => debug!( - "{}", - std::str::from_utf8(&bytes).unwrap_or(format!("{:?}", bytes).as_ref()) - ), - } - } - AxumMsg::Close(c) => { - if let Some(cf) = c { - info!( - ">>> {} sent close with code {} and reason `{}`", - addr, cf.code, cf.reason - ); - } else { - info!(">>> {} sent close message without CloseFrame", addr); - } - return ControlFlow::Break(()); - } - - AxumMsg::Pong(v) => { - debug!(">>> {} sent pong with {:?}", addr, v); - } - // You should never need to manually handle AxumMsg::Ping, as axum's websocket library - // will do so for you automagically by replying with Pong and copying the v according to - // spec. But if you need the contents of the pings you can see them here. - AxumMsg::Ping(v) => { - debug!(">>> {} sent ping with {:?}", addr, v); - } - } - ControlFlow::Continue(()) -} - -fn port_available(host: IpAddr, port: u16) -> bool { - TcpListener::bind((host.to_string(), port)).is_ok() -} - -#[cfg(test)] -mod test { - use super::*; - use crate::{channel, settings::Settings}; - use tokio::sync::mpsc; - - #[tokio::test] - async fn ws_connect() { - let settings = Settings::load().unwrap(); - let server = Server::new(settings.node().network()).unwrap(); - let (_ws_tx, ws_rx) = channel::AsyncBoundedChannel::oneshot(); - let (runner_tx, _runner_rx) = mpsc::channel(1); - let _ws_hdl = tokio::spawn({ - let ws_server = server.clone(); - async move { ws_server.start(ws_rx, runner_tx).await } - }); - - tokio_tungstenite::connect_async("ws://localhost:1337".to_string()) - .await - .unwrap(); - } -} diff --git a/homestar-runtime/src/network/ws/notifier.rs b/homestar-runtime/src/network/ws/notifier.rs deleted file mode 100644 index 31e118e0..00000000 --- a/homestar-runtime/src/network/ws/notifier.rs +++ /dev/null @@ -1,72 +0,0 @@ -//! Notifier for broadcasting messages to websocket clients. - -use anyhow::Result; -use homestar_core::{ipld::DagJson, workflow::Receipt}; -use libipld::{ipld, Cid, Ipld}; -use std::sync::Arc; -use tokio::sync::broadcast; - -/// Type-wrapper for websocket sender. -#[derive(Debug)] -pub(crate) struct Notifier(Arc>>); - -impl Clone for Notifier { - fn clone(&self) -> Self { - Self(self.0.clone()) - } -} - -impl Notifier { - /// Create a new [Notifier]. - pub(crate) fn new(sender: broadcast::Sender>) -> Self { - Self(sender.into()) - } - - /// Get a reference to the inner [broadcast::Sender]. - #[allow(dead_code)] - pub(crate) fn inner(&self) -> &Arc>> { - &self.0 - } - - /// Get and take ownership of the inner [broadcast::Sender]. - #[allow(dead_code)] - pub(crate) fn into_inner(self) -> Arc>> { - self.0 - } - - /// Send a message to all connected websocket clients. - pub(crate) fn notify(&self, msg: Vec) -> Result<()> { - let _ = self.0.send(msg)?; - Ok(()) - } -} - -/// A [Receipt] that is sent out *just* for websocket notifications. -#[derive(Debug, Clone, PartialEq)] -pub(crate) struct NotifyReceipt(Ipld); - -impl NotifyReceipt { - pub(crate) fn with(receipt: Receipt, cid: Cid, metadata: Option) -> Self { - let receipt: Ipld = receipt.into(); - let data = ipld!({ - "receipt": receipt, - "metadata": metadata.as_ref().map(|m| m.to_owned()).map_or(Ipld::Null, |m| m), - "receipt_cid": cid, - }); - NotifyReceipt(data) - } -} - -impl DagJson for NotifyReceipt where Ipld: From {} - -impl From for Ipld { - fn from(receipt: NotifyReceipt) -> Self { - receipt.0 - } -} - -impl From for NotifyReceipt { - fn from(ipld: Ipld) -> Self { - NotifyReceipt(ipld) - } -} diff --git a/homestar-runtime/src/receipt.rs b/homestar-runtime/src/receipt.rs index bf65fa35..c8dcd1f1 100644 --- a/homestar-runtime/src/receipt.rs +++ b/homestar-runtime/src/receipt.rs @@ -1,4 +1,7 @@ -//! Output of an invocation, referenced by its invocation pointer. +//! Runtime, extended representation of a [Receipt] for [Invocation]s and storage. +//! +//! [Receipt]: homestar_core::workflow::Receipt +//! [Invocation]: homestar_core::workflow::Invocation use anyhow::anyhow; use diesel::{ diff --git a/homestar-runtime/src/runner.rs b/homestar-runtime/src/runner.rs index 0d721a48..7c5470ab 100644 --- a/homestar-runtime/src/runner.rs +++ b/homestar-runtime/src/runner.rs @@ -1,48 +1,51 @@ //! General [Runner] interface for working across multiple workers //! and executing workflows. -#[cfg(feature = "websocket-server")] -use crate::network::ws; #[cfg(feature = "ipfs")] use crate::network::IpfsCli; use crate::{ - channel::{AsyncBoundedChannel, AsyncBoundedChannelReceiver, AsyncBoundedChannelSender}, + channel::{AsyncChannel, AsyncChannelReceiver, AsyncChannelSender}, db::Database, event_handler::{Event, EventHandler}, - metrics, - network::{rpc, swarm}, + network::{rpc, swarm, webserver}, + tasks::Fetch, worker::WorkerMessage, - workflow, Settings, Worker, + workflow::{self, Resource}, + Settings, Worker, }; use anyhow::{anyhow, Context, Result}; use atomic_refcell::AtomicRefCell; use chrono::NaiveDateTime; use dashmap::DashMap; use faststr::FastStr; -use futures::future::poll_fn; +use fnv::FnvHashSet; +use futures::{future::poll_fn, FutureExt}; use homestar_core::Workflow; use homestar_wasm::io::Arg; +use jsonrpsee::server::ServerHandle; use libipld::Cid; +use metrics_exporter_prometheus::PrometheusHandle; #[cfg(not(test))] use std::sync::atomic::{AtomicUsize, Ordering}; -use std::{ops::ControlFlow, rc::Rc, sync::Arc, task::Poll}; +use std::{ops::ControlFlow, rc::Rc, sync::Arc, task::Poll, time::Instant}; #[cfg(not(windows))] use tokio::signal::unix::{signal, SignalKind}; #[cfg(windows)] use tokio::signal::windows; use tokio::{ runtime, select, - sync::{mpsc, oneshot}, task::{AbortHandle, JoinHandle}, time, }; use tokio_util::time::{delay_queue, DelayQueue}; -use tracing::{error, info, warn}; +use tracing::{debug, error, info, warn}; mod error; pub(crate) mod file; +mod nodeinfo; pub(crate) mod response; pub(crate) use error::Error; +pub(crate) use nodeinfo::{DynamicNodeInfo, StaticNodeInfo}; #[cfg(not(test))] const HOMESTAR_THREAD: &str = "homestar-runtime"; @@ -59,25 +62,29 @@ pub(crate) trait ModifiedSet { fn append_or_insert(&self, cid: Cid, handles: Vec); } -/// [mpsc::Sender] for RPC server messages. -pub(crate) type RpcSender = mpsc::Sender<( +/// [AsyncChannelSender] for RPC server messages. +pub(crate) type RpcSender = AsyncChannelSender<( rpc::ServerMessage, - Option>, + Option>, )>; -/// [mpsc::Receiver] for RPC server messages. -pub(crate) type RpcReceiver = mpsc::Receiver<( +/// [AsyncChannelReceiver] for RPC server messages. +pub(crate) type RpcReceiver = AsyncChannelReceiver<( rpc::ServerMessage, - Option>, + Option>, )>; -/// [mpsc::Sender] for sending messages websocket server clients. -#[cfg(feature = "websocket-server")] -pub(crate) type WsSender = mpsc::Sender<(ws::Message, Option>)>; +/// [AsyncChannelSender] for sending messages WebSocket server clients. +pub(crate) type WsSender = AsyncChannelSender<( + webserver::Message, + Option>, +)>; -/// [mpsc::Receiver] for receiving messages from websocket server clients. -#[cfg(feature = "websocket-server")] -pub(crate) type WsReceiver = mpsc::Receiver<(ws::Message, Option>)>; +/// [AsyncChannelReceiver] for receiving messages from WebSocket server clients. +pub(crate) type WsReceiver = AsyncChannelReceiver<( + webserver::Message, + Option>, +)>; impl ModifiedSet for RunningTaskSet { fn append_or_insert(&self, cid: Cid, mut handles: Vec) { @@ -93,65 +100,38 @@ impl ModifiedSet for RunningTaskSet { /// Used to manage workers and execute/run [Workflows]. /// /// [Workflows]: homestar_core::Workflow -#[cfg(feature = "websocket-server")] -#[cfg_attr(docsrs, doc(cfg(feature = "websocket-server")))] -#[allow(dead_code)] -#[derive(Debug)] -pub struct Runner { - event_sender: Arc>, - expiration_queue: Rc>>, - running_tasks: Arc, - running_workers: RunningWorkerSet, - runtime: tokio::runtime::Runtime, - settings: Arc, - ws_server: Arc, -} - -/// Runner interface. -/// Used to manage workers and execute/run [Workflows]. -/// -/// [Workflows]: homestar_core::Workflow -#[cfg(not(feature = "websocket-server"))] -#[allow(dead_code)] #[derive(Debug)] pub struct Runner { - event_sender: Arc>, + event_sender: Arc>, expiration_queue: Rc>>, + node_info: StaticNodeInfo, running_tasks: Arc, running_workers: RunningWorkerSet, - runtime: tokio::runtime::Runtime, - settings: Arc, + pub(crate) runtime: tokio::runtime::Runtime, + pub(crate) settings: Arc, + webserver: Arc, } impl Runner { /// Setup bounded, MPSC channel for top-level RPC communication. pub(crate) fn setup_rpc_channel(capacity: usize) -> (RpcSender, RpcReceiver) { - mpsc::channel(capacity) + AsyncChannel::with(capacity) } /// Setup bounded, MPSC channel for top-level Worker communication. pub(crate) fn setup_worker_channel( capacity: usize, - ) -> (mpsc::Sender, mpsc::Receiver) { - mpsc::channel(capacity) - } - - /// Oneshot channel for sending direct messages to the websocket server, - /// e.g. for shutdown. - #[cfg(feature = "websocket-server")] - pub(crate) fn setup_ws_oneshot_channel() -> ( - AsyncBoundedChannelSender, - AsyncBoundedChannelReceiver, + ) -> ( + AsyncChannelSender, + AsyncChannelReceiver, ) { - let (tx, rx) = AsyncBoundedChannel::oneshot(); - (tx, rx) + AsyncChannel::with(capacity) } /// MPSC channel for sending and receiving messages through to/from - /// websocket server clients. - #[cfg(feature = "websocket-server")] + /// WebSocket server clients. pub(crate) fn setup_ws_mpsc_channel(capacity: usize) -> (WsSender, WsReceiver) { - mpsc::channel(capacity) + AsyncChannel::with(capacity) } /// Initialize and start the Homestar [Runner] / runtime. @@ -185,59 +165,46 @@ impl Runner { db: impl Database + 'static, runtime: tokio::runtime::Runtime, ) -> Result { - let swarm = runtime.block_on(swarm::new(settings.node()))?; - - #[cfg(feature = "websocket-server")] - { - let ws_server = ws::Server::new(settings.node().network())?; - let ws_msg_tx = ws_server.notifier(); - - let event_handler = EventHandler::new(swarm, db, settings.node(), ws_msg_tx); - let event_sender = event_handler.sender(); - - #[cfg(feature = "ipfs")] - let _event_handler_hdl = runtime.spawn({ - let ipfs = IpfsCli::default(); - event_handler.start(ipfs) - }); - - #[cfg(not(feature = "ipfs"))] - let _event_handler_hdl = runtime.spawn(event_handler.start()); - - Ok(Self { - event_sender, - expiration_queue: Rc::new(AtomicRefCell::new(DelayQueue::new())), - running_tasks: DashMap::new().into(), - running_workers: DashMap::new(), - runtime, - settings: settings.into(), - ws_server: ws_server.into(), - }) - } + let swarm = runtime.block_on(swarm::new(settings.node().network()))?; + let peer_id = *swarm.local_peer_id(); - #[cfg(not(feature = "websocket-server"))] - { - let event_handler = EventHandler::new(swarm, db, settings.node()); - let event_sender = event_handler.sender(); - - #[cfg(feature = "ipfs")] - let _event_handler_hdl = runtime.spawn({ - let ipfs = IpfsCli::default(); - event_handler.start(ipfs) - }); - - #[cfg(not(feature = "ipfs"))] - let _event_handler_hdl = runtime.spawn(event_handler.start()); - - Ok(Self { - event_sender, - expiration_queue: Rc::new(AtomicRefCell::new(DelayQueue::new())), - running_tasks: DashMap::new().into(), - running_workers: DashMap::new(), - runtime, - settings: settings.into(), - }) - } + let webserver = webserver::Server::new(settings.node().network().webserver())?; + + #[cfg(feature = "websocket-notify")] + let (ws_msg_tx, ws_evt_tx) = { + let ws_msg_tx = webserver.workflow_msg_notifier(); + let ws_evt_tx = webserver.evt_notifier(); + + (ws_msg_tx, ws_evt_tx) + }; + + #[cfg(feature = "websocket-notify")] + let event_handler = + EventHandler::new(swarm, db, settings.node().network(), ws_evt_tx, ws_msg_tx); + #[cfg(not(feature = "websocket-notify"))] + let event_handler = EventHandler::new(swarm, db, settings.node().network()); + + let event_sender = event_handler.sender(); + + #[cfg(feature = "ipfs")] + let _event_handler_hdl = runtime.spawn({ + let ipfs = IpfsCli::new(settings.node.network.ipfs())?; + event_handler.start(ipfs) + }); + + #[cfg(not(feature = "ipfs"))] + let _event_handler_hdl = runtime.spawn(event_handler.start()); + + Ok(Self { + event_sender, + expiration_queue: Rc::new(AtomicRefCell::new(DelayQueue::new())), + node_info: StaticNodeInfo::new(peer_id), + running_tasks: DashMap::new().into(), + running_workers: DashMap::new(), + runtime, + settings: settings.into(), + webserver: webserver.into(), + }) } /// Listen loop for [Runner] signals and messages. @@ -245,24 +212,27 @@ impl Runner { fn serve(self, db: impl Database + 'static) -> Result<()> { let message_buffer_len = self.settings.node.network.events_buffer_len; - #[cfg(feature = "websocket-server")] - let (ws_sender, mut ws_receiver) = { - let (oneshot_ws_tx, oneshot_ws_rx) = Self::setup_ws_oneshot_channel(); + #[cfg(feature = "monitoring")] + let metrics_hdl: PrometheusHandle = self.runtime.block_on(crate::metrics::start( + self.settings.node.monitoring(), + self.settings.node.network(), + ))?; + + #[cfg(not(feature = "monitoring"))] + let metrics_hdl: PrometheusHandle = self + .runtime + .block_on(crate::metrics::start(self.settings.node.network()))?; + + let (ws_receiver, ws_hdl) = { let (mpsc_ws_tx, mpsc_ws_rx) = Self::setup_ws_mpsc_channel(message_buffer_len); - let _ws_hdl = self.runtime.spawn({ - let ws_server = self.ws_server.clone(); - async move { ws_server.start(oneshot_ws_rx, mpsc_ws_tx).await } - }); - (oneshot_ws_tx, mpsc_ws_rx) + let ws_hdl = self + .runtime + .block_on(self.webserver.start(mpsc_ws_tx, metrics_hdl))?; + (mpsc_ws_rx, ws_hdl) }; - let (rpc_tx, mut rpc_rx) = Self::setup_rpc_channel(message_buffer_len); - let (runner_worker_tx, mut runner_worker_rx) = - Self::setup_worker_channel(message_buffer_len); - - #[cfg(feature = "metrics")] - self.runtime - .block_on(metrics::start(&self.settings.monitoring))?; + let (rpc_tx, rpc_rx) = Self::setup_rpc_channel(message_buffer_len); + let (runner_worker_tx, runner_worker_rx) = Self::setup_worker_channel(message_buffer_len); let shutdown_timeout = self.settings.node.shutdown_timeout; let rpc_server = rpc::Server::new(self.settings.node.network(), rpc_tx.into()); @@ -272,77 +242,91 @@ impl Runner { let shutdown_time_left = self.runtime.block_on(async { let mut gc_interval = tokio::time::interval(self.settings.node.gc_interval); loop { - // Sadness to get around https://github.com/tokio-rs/tokio/issues/3974. - #[cfg(feature = "websocket-server")] - let ws_receiver_wait = ws_receiver.recv(); - #[cfg(not(feature = "websocket-server"))] - let ws_receiver_wait: future::Pending> = std::future::pending(); - select! { - biased; // Handle RPC messages. - Some((rpc_message, Some(oneshot_tx))) = rpc_rx.recv() => { + Ok((rpc_message, Some(oneshot_tx))) = rpc_rx.recv_async() => { let now = time::Instant::now(); - #[cfg(feature = "websocket-server")] let handle = self.handle_command_message( rpc_message, Channels { rpc: rpc_sender.clone(), runner: runner_worker_tx.clone(), - ws: ws_sender.clone(), }, + ws_hdl.clone(), db.clone(), now ).await; - #[cfg(not(feature = "websocket-server"))] - let handle = self.handle_command_message( - rpc_message, - Channels { - rpc: rpc_sender.clone(), - runner: runner_worker_tx.clone(), - }, - db.clone(), - now - ).await; match handle { Ok(ControlFlow::Break(())) => break now.elapsed(), Ok(ControlFlow::Continue(rpc::ServerMessage::Skip)) => {}, Ok(ControlFlow::Continue(msg @ rpc::ServerMessage::RunAck(_))) => { - info!("sending message to rpc server"); - let _ = oneshot_tx.send(msg); + debug!(subject = "rpc.ack", + category = "rpc", + "sending message to rpc server"); + let _ = oneshot_tx.send_async(msg).await; }, Err(err) => { - error!(err=?err, "error handling rpc message"); - let _ = oneshot_tx.send(rpc::ServerMessage::RunErr(err.into())); + error!(subject = "rpc.err", + category = "rpc", + err=?err, + "error handling rpc message"); + let _ = oneshot_tx.send_async(rpc::ServerMessage::RunErr(err.into())).await; }, _ => {} } } - Some((ws::Message::RunWorkflow((name, workflow)), Some(oneshot_tx))) = ws_receiver_wait => { - // TODO: Parse this from the workflow data itself. - info!("running workflow: {}", name); - let workflow_settings = workflow::Settings::default(); - match self.run_worker( - workflow, - workflow_settings, - Some(name), - runner_worker_tx.clone(), - db.clone(), - ).await { - Ok(_) => { - info!("sending message to rpc server"); - let _ = oneshot_tx.send(ws::Message::RunWorkflowAck); + Ok(msg) = ws_receiver.recv_async() => { + match msg { + (webserver::Message::RunWorkflow((name, workflow)), Some(oneshot_tx)) => { + info!(subject = "workflow", + category = "workflow.run", + "running workflow: {}", name); + // TODO: Parse this from the workflow data itself. + let workflow_settings = workflow::Settings::default(); + match self.run_worker( + workflow, + workflow_settings, + Some(name), + runner_worker_tx.clone(), + db.clone(), + ).await { + Ok(data) => { + debug!(subject = "jsonrpc.ack", + category = "jsonrpc", + "sending message to jsonrpc server"); + let _ = oneshot_tx.send_async(webserver::Message::AckWorkflow((data.info.cid, data.name))).await; + } + Err(err) => { + error!(subject = "jsonrpc.err", + category = "jsonrpc", + err=?err, + "error handling ws message"); + let _ = oneshot_tx.send_async(webserver::Message::RunErr(err.into())).await; + } + } + } - Err(err) => { - error!(err=?err, "error handling ws message"); - let _ = oneshot_tx.send(ws::Message::RunErr(err.into())); + (webserver::Message::GetNodeInfo, Some(oneshot_tx)) => { + debug!(subject = "jsonrpc.nodeinfo", + category = "jsonrpc", + "getting node info"); + let (tx, rx) = AsyncChannel::oneshot(); + let _ = self.event_sender.send_async(Event::GetListeners(tx)).await; + let dyn_node_info = if let Ok(listeners) = rx.recv_deadline(Instant::now() + self.settings.node.network.webserver.timeout) { + DynamicNodeInfo::new(listeners) + } else { + DynamicNodeInfo::new(vec![]) + }; + let _ = oneshot_tx.send_async(webserver::Message::AckNodeInfo((self.node_info.clone(), dyn_node_info))).await; } + _ => () } } + // Handle messages from the worker. - Some(msg) = runner_worker_rx.recv() => { + Ok(msg) = runner_worker_rx.recv_async() => { match msg { WorkerMessage::Dropped(cid) => { let _ = self.abort_worker(cid); @@ -360,42 +344,33 @@ impl Runner { Err(_) => Poll::Pending, } ) => { - info!("worker expired, aborting"); + info!(subject = "worker.expired", + category = "worker", + "worker expired, aborting"); let _ = self.abort_worker(*expired.get_ref()); }, // Handle shutdown signal. _ = Self::shutdown_signal() => { - info!("gracefully shutting down runner"); + info!(subject = "shutdown", + category = "homestar.shutdown", + "gracefully shutting down runner"); let now = time::Instant::now(); let drain_timeout = now + shutdown_timeout; - // Sub-select handling of runner `shutdown`. - #[cfg(feature = "websocket-server")] { - select! { - // Graceful shutdown. - Ok(()) = self.shutdown(rpc_sender, ws_sender) => { - break now.elapsed(); - }, - // Force shutdown upon drain timeout. - _ = time::sleep_until(drain_timeout) => { - info!("shutdown timeout reached, shutting down runner anyway"); - break now.elapsed(); - } - } - } - #[cfg(not(feature = "websocket-server"))] { - select! { - // Graceful shutdown. - Ok(()) = self.shutdown(rpc_sender) => { - break now.elapsed(); - }, - // Force shutdown upon drain timeout. - _ = time::sleep_until(drain_timeout) => { - info!("shutdown timeout reached, shutting down runner anyway"); - break now.elapsed(); - } + select! { + // Graceful shutdown. + Ok(()) = self.shutdown(rpc_sender, ws_hdl) => { + break now.elapsed(); + }, + // Force shutdown upon drain timeout. + _ = time::sleep_until(drain_timeout) => { + info!(subject = "shutdown", + category = "homestar.shutdown", + "shutdown timeout reached, shutting down runner anyway"); + break now.elapsed(); } } + } } } @@ -404,16 +379,20 @@ impl Runner { if shutdown_time_left < shutdown_timeout { self.runtime .shutdown_timeout(shutdown_timeout - shutdown_time_left); - info!("runner shutdown complete"); + info!( + subject = "shutdown", + category = "homestar.shutdown", + "runner shutdown complete" + ); } Ok(()) } - /// [mpsc::Sender] of the event-handler. + /// [AsyncChannelSender] of the event-handler. /// /// [EventHandler]: crate::EventHandler - pub(crate) fn event_sender(&self) -> Arc> { + pub(crate) fn event_sender(&self) -> Arc> { self.event_sender.clone() } @@ -532,9 +511,18 @@ impl Runner { let mut sigterm = signal(SignalKind::terminate())?; select! { - _ = tokio::signal::ctrl_c() => info!("CTRL-C received, shutting down"), - _ = sigint.recv() => info!("SIGINT received, shutting down"), - _ = sigterm.recv() => info!("SIGTERM received, shutting down"), + _ = tokio::signal::ctrl_c() => + info!(subject = "shutdown", + category = "homestar.shutdown", + "CTRL-C received, shutting down"), + _ = sigint.recv() => + info!(subject = "shutdown", + category = "homestar.shutdown", + "SIGINT received, shutting down"), + _ = sigterm.recv() => + info!(subject = "shutdown", + category = "homestar.shutdown", + "SIGTERM received, shutting down"), } Ok(()) } @@ -547,10 +535,22 @@ impl Runner { let mut sighup = windows::ctrl_break()?; select! { - _ = tokio::signal::ctrl_c() => info!("CTRL-C received, shutting down"), - _ = sigint.recv() => info!("SIGINT received, shutting down"), - _ = sigterm.recv() => info!("SIGTERM received, shutting down"), - _ = sighup.recv() => info!("SIGHUP received, shutting down") + _ = tokio::signal::ctrl_c() => + info!(subject = "shutdown", + category = "homestar.shutdown", + "CTRL-C received, shutting down"), + _ = sigint.recv() => + info!(subject = "shutdown", + category = "homestar.shutdown", + "SIGINT received, shutting down"), + _ = sigterm.recv() => + info!(subject = "shutdown", + category = "homestar.shutdown", + "SIGTERM received, shutting down"), + _ = sighup.recv() => + info!(subject = "shutdown", + category = "homestar.shutdown", + "SIGHUP received, shutting down") } Ok(()) } @@ -559,54 +559,31 @@ impl Runner { /// a) RPC and runner-related channels. /// b) Event-handler channels. /// c) Running workers - #[cfg(feature = "websocket-server")] async fn shutdown( &self, - rpc_sender: Arc>, - ws_sender: AsyncBoundedChannelSender, + rpc_sender: Arc>, + ws_hdl: ServerHandle, ) -> Result<()> { - let (shutdown_sender, shutdown_receiver) = oneshot::channel(); - let _ = rpc_sender.try_send(rpc::ServerMessage::GracefulShutdown(shutdown_sender)); - let _ = shutdown_receiver.await; - - let (shutdown_sender, shutdown_receiver) = oneshot::channel(); - let _ = self - .event_sender - .send_async(Event::Shutdown(shutdown_sender)) + let (shutdown_sender, shutdown_receiver) = AsyncChannel::oneshot(); + let _ = rpc_sender + .send_async(rpc::ServerMessage::GracefulShutdown(shutdown_sender)) .await; - let _ = shutdown_receiver.await; - - let (shutdown_sender, shutdown_receiver) = oneshot::channel(); - let _ = ws_sender - .send_async(ws::Message::GracefulShutdown(shutdown_sender)) - .await; - let _ = shutdown_receiver.await; - - // abort all workers - self.abort_workers(); - - Ok(()) - } + let _ = shutdown_receiver; - /// Sequence for shutting down a [Runner], including: - /// a) RPC and runner-related channels. - /// b) Event-handler channels. - /// c) Running workers - #[cfg(not(feature = "websocket-server"))] - async fn shutdown( - &self, - rpc_sender: Arc>, - ) -> Result<()> { - let (shutdown_sender, shutdown_receiver) = oneshot::channel(); - let _ = rpc_sender.try_send(rpc::ServerMessage::GracefulShutdown(shutdown_sender)); - let _ = shutdown_receiver.await; + info!( + subject = "shutdown", + category = "homestar.shutdown", + "shutting down webserver" + ); + let _ = ws_hdl.stop(); + ws_hdl.clone().stopped().await; - let (shutdown_sender, shutdown_receiver) = oneshot::channel(); + let (shutdown_sender, shutdown_receiver) = AsyncChannel::oneshot(); let _ = self .event_sender .send_async(Event::Shutdown(shutdown_sender)) .await; - let _ = shutdown_receiver.await; + let _ = shutdown_receiver; // abort all workers self.abort_workers(); @@ -618,38 +595,29 @@ impl Runner { &self, msg: rpc::ServerMessage, channels: Channels, + ws_hdl: ServerHandle, db: impl Database + 'static, now: time::Instant, ) -> Result> { - info!("received message: {:?}", msg); match msg { rpc::ServerMessage::ShutdownCmd => { - info!("RPC shutdown signal received, shutting down runner"); + info!( + subject = "rpc.command", + category = "rpc", + "RPC shutdown signal received, shutting down runner" + ); let drain_timeout = now + self.settings.node.shutdown_timeout; - #[cfg(feature = "websocket-server")] - { - select! { - // we can unwrap here b/c we know we have a sender based - // on the feature flag. - Ok(()) = self.shutdown(channels.rpc, channels.ws) => { - Ok(ControlFlow::Break(())) - }, - _ = time::sleep_until(drain_timeout) => { - info!("shutdown timeout reached, shutting down runner anyway"); - Ok(ControlFlow::Break(())) - } - } - } - #[cfg(not(feature = "websocket-server"))] - { - select! { - Ok(()) = self.shutdown(rpc_sender) => { - Ok(ControlFlow::Break(())) - }, - _ = time::sleep_until(drain_timeout) => { - info!("shutdown timeout reached, shutting down runner anyway"); - Ok(ControlFlow::Break(())) - } + select! { + // we can unwrap here b/c we know we have a sender based + // on the feature flag. + Ok(()) = self.shutdown(channels.rpc, ws_hdl) => { + Ok(ControlFlow::Break(())) + }, + _ = time::sleep_until(drain_timeout) => { + info!(subject = "shutdown", + category = "homestar.shutdown", + "shutdown timeout reached, shutting down runner anyway"); + Ok(ControlFlow::Break(())) } } } @@ -668,7 +636,12 @@ impl Runner { )))) } msg => { - warn!("received unexpected message: {:?}", msg); + warn!( + subject = "rpc.command", + category = "rpc", + "received unexpected message: {:?}", + msg + ); Ok(ControlFlow::Continue(rpc::ServerMessage::Skip)) } } @@ -679,7 +652,7 @@ impl Runner { workflow: Workflow<'static, Arg>, workflow_settings: workflow::Settings, name: Option, - runner_sender: mpsc::Sender, + runner_sender: AsyncChannelSender, db: impl Database + 'static, ) -> Result { let worker = { @@ -698,14 +671,18 @@ impl Runner { // `clone`, as the underlying type is an `Arc`. let initial_info = Arc::clone(&worker.workflow_info); let workflow_timeout = worker.workflow_settings.timeout; - let workflow_name = worker.workflow_name.to_string(); + let workflow_name = worker.workflow_name.clone(); + let workflow_settings = worker.workflow_settings.clone(); let timestamp = worker.workflow_started; // Spawn worker, which initializees the scheduler and runs // the workflow. info!( + subject = "workflow.run", + category = "workflow", cid = worker.workflow_info.cid.to_string(), - "running workflow with settings: {:#?}", worker.workflow_settings + "running workflow with settings: {:#?}", + worker.workflow_settings ); // Provide workflow to network. @@ -717,7 +694,23 @@ impl Runner { )) .await?; - let handle = self.runtime.spawn(worker.run(self.running_tasks())); + #[cfg(feature = "ipfs")] + let fetch_fn = { + let settings = Arc::clone(&self.settings); + let ipfs = IpfsCli::new(settings.node.network.ipfs())?; + move |rscs: FnvHashSet| { + async move { Fetch::get_resources(rscs, workflow_settings, ipfs).await }.boxed() + } + }; + + #[cfg(not(feature = "ipfs"))] + let fetch_fn = |rscs: FnvHashSet| { + async move { Fetch::get_resources(rscs, workflow_settings).await }.boxed() + }; + + let handle = self + .runtime + .spawn(worker.run(self.running_tasks(), fetch_fn)); // Add Cid to expirations timing wheel let delay_key = self @@ -740,24 +733,14 @@ impl Runner { struct WorkflowData { info: Arc, - name: String, + name: FastStr, timestamp: NaiveDateTime, } -#[cfg(feature = "websocket-server")] -#[derive(Debug)] -struct Channels { - rpc: Arc>, - runner: mpsc::Sender, - ws: AsyncBoundedChannelSender, -} - -#[cfg(not(feature = "websocket-server"))] #[derive(Debug)] struct Channels { - rpc: Arc>, - runner: mpsc::Sender, - runner: Arc>, + rpc: Arc>, + runner: AsyncChannelSender, } #[cfg(test)] @@ -773,34 +756,41 @@ mod test { #[homestar_runtime_proc_macro::runner_test] fn shutdown() { let TestRunner { runner, settings } = TestRunner::start(); - let (tx, _rx) = Runner::setup_rpc_channel(1); - let (ws_oneshot_tx, ws_oneshot_rx) = Runner::setup_ws_oneshot_channel(); - let (ws_tx, _ws_rx) = Runner::setup_ws_mpsc_channel(1); + let (runner_tx, _runner_rx) = Runner::setup_ws_mpsc_channel(1); let rpc_server = rpc::Server::new(settings.node.network(), Arc::new(tx)); let rpc_sender = rpc_server.sender(); let addr = SocketAddr::new( - settings.node.network.rpc_host, - settings.node.network.rpc_port, + settings.node.network.rpc.host, + settings.node.network.rpc.port, ); - runner.runtime.block_on(async { + let ws_hdl = runner.runtime.block_on(async { rpc_server.spawn().await.unwrap(); - - #[cfg(feature = "websocket-server")] - runner.runtime.spawn({ - let ws_server = runner.ws_server.clone(); - async move { ws_server.start(ws_oneshot_rx, ws_tx).await } - }); - + #[cfg(feature = "monitoring")] + let metrics_hdl = + crate::metrics::start(settings.node.monitoring(), settings.node.network()) + .await + .unwrap(); + #[cfg(not(feature = "monitoring"))] + let metrics_hdl = crate::metrics::start(settings.node.network()) + .await + .unwrap(); + + let ws_hdl = runner + .webserver + .start(runner_tx, metrics_hdl) + .await + .unwrap(); let _stream = TcpStream::connect(addr).await.expect("Connection error"); let _another_stream = TcpStream::connect(addr).await.expect("Connection error"); + + ws_hdl }); runner.runtime.block_on(async { - #[cfg(feature = "websocket-server")] - match runner.shutdown(rpc_sender, ws_oneshot_tx).await { + match runner.shutdown(rpc_sender, ws_hdl).await { Ok(()) => { // with shutdown, we should not be able to connect to the server(s) let stream_error = TcpStream::connect(addr).await; @@ -816,20 +806,9 @@ mod test { } _ => panic!("Shutdown failed."), } - #[cfg(not(feature = "websocket-server"))] - match runner.shutdown(rpc_sender).await { - Ok(()) => { - // with shutdown, we should not be able to connect to the server(s) - let stream_error = TcpStream::connect(addr).await; - assert!(stream_error.is_err()); - assert!(matches!( - stream_error.unwrap_err().kind(), - std::io::ErrorKind::ConnectionRefused - )); - } - _ => panic!("Shutdown failed."), - } }); + + unsafe { metrics::clear_recorder() } } #[homestar_runtime_proc_macro::runner_test] @@ -843,8 +822,8 @@ mod test { runner.runtime.spawn(async move { let addr = SocketAddr::new( - settings.node.network.rpc_host, - settings.node.network.rpc_port, + settings.node.network.rpc.host, + settings.node.network.rpc.port, ); let client = Client::new(addr, context::current()).await.unwrap(); @@ -858,10 +837,14 @@ mod test { let TestRunner { runner, settings } = TestRunner::start(); runner.runtime.block_on(async { - let worker = WorkerBuilder::new(settings.node).build().await; + let builder = WorkerBuilder::new(settings.node); + let fetch_fn = builder.fetch_fn(); + let worker = builder.build().await; let workflow_cid = worker.workflow_info.cid; let workflow_timeout = worker.workflow_settings.timeout; - let handle = runner.runtime.spawn(worker.run(runner.running_tasks())); + let handle = runner + .runtime + .spawn(worker.run(runner.running_tasks(), fetch_fn)); let delay_key = runner .expiration_queue .try_borrow_mut() @@ -891,10 +874,14 @@ mod test { let TestRunner { runner, settings } = TestRunner::start(); runner.runtime.block_on(async { - let worker = WorkerBuilder::new(settings.node).build().await; + let builder = WorkerBuilder::new(settings.node); + let fetch_fn = builder.fetch_fn(); + let worker = builder.build().await; let workflow_cid = worker.workflow_info.cid; let workflow_timeout = worker.workflow_settings.timeout; - let handle = runner.runtime.spawn(worker.run(runner.running_tasks())); + let handle = runner + .runtime + .spawn(worker.run(runner.running_tasks(), fetch_fn)); let delay_key = runner .expiration_queue .try_borrow_mut() @@ -915,10 +902,14 @@ mod test { let TestRunner { runner, settings } = TestRunner::start(); runner.runtime.block_on(async { - let worker = WorkerBuilder::new(settings.node).build().await; + let builder = WorkerBuilder::new(settings.node); + let fetch_fn = builder.fetch_fn(); + let worker = builder.build().await; let workflow_cid = worker.workflow_info.cid; let workflow_timeout = worker.workflow_settings.timeout; - let handle = runner.runtime.spawn(worker.run(runner.running_tasks())); + let handle = runner + .runtime + .spawn(worker.run(runner.running_tasks(), fetch_fn)); let delay_key = runner .expiration_queue .try_borrow_mut() @@ -953,10 +944,11 @@ mod test { #[homestar_runtime_proc_macro::runner_test] fn gc_while_workers_finished() { let TestRunner { runner, settings } = TestRunner::start(); - runner.runtime.block_on(async { - let worker = WorkerBuilder::new(settings.node).build().await; - let _ = worker.run(runner.running_tasks()).await; + let builder = WorkerBuilder::new(settings.node); + let fetch_fn = builder.fetch_fn(); + let worker = builder.build().await; + let _ = worker.run(runner.running_tasks(), fetch_fn).await; }); runner.running_tasks.iter().for_each(|handles| { diff --git a/homestar-runtime/src/runner/nodeinfo.rs b/homestar-runtime/src/runner/nodeinfo.rs new file mode 100644 index 00000000..aa14be84 --- /dev/null +++ b/homestar-runtime/src/runner/nodeinfo.rs @@ -0,0 +1,39 @@ +//! Node information. + +use libp2p::{Multiaddr, PeerId}; +use serde::{Deserialize, Serialize}; + +/// Static node information available at startup. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub(crate) struct StaticNodeInfo { + /// The [PeerId] of a node. + pub(crate) peer_id: PeerId, +} + +impl StaticNodeInfo { + /// Create an instance of [StaticNodeInfo]. + pub(crate) fn new(peer_id: PeerId) -> Self { + Self { peer_id } + } + + /// Get a reference to the [PeerId] of a node. + #[allow(dead_code)] + pub(crate) fn peer_id(&self) -> &PeerId { + &self.peer_id + } +} + +/// Dynamic node information available through events +/// at runtime. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub(crate) struct DynamicNodeInfo { + /// Listeners for the node. + pub(crate) listeners: Vec, +} + +impl DynamicNodeInfo { + /// Create an instance of [DynamicNodeInfo]. + pub(crate) fn new(listeners: Vec) -> Self { + Self { listeners } + } +} diff --git a/homestar-runtime/src/runner/response.rs b/homestar-runtime/src/runner/response.rs index 7f656456..8a5928c4 100644 --- a/homestar-runtime/src/runner/response.rs +++ b/homestar-runtime/src/runner/response.rs @@ -1,11 +1,12 @@ //! Responses for display/return to the user for -//! Client requests. +//! client requests. use crate::{ cli::show::{self, ApplyStyle}, workflow::{self, IndexedResources}, }; use chrono::NaiveDateTime; +use faststr::FastStr; use libipld::Cid; use serde::{Deserialize, Serialize}; use std::{fmt, net::SocketAddr, sync::Arc}; @@ -20,7 +21,7 @@ use tabled::{ #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Tabled)] pub struct AckWorkflow { pub(crate) cid: Cid, - pub(crate) name: String, + pub(crate) name: FastStr, pub(crate) num_tasks: u32, #[tabled(skip)] pub(crate) progress: Vec, @@ -44,7 +45,7 @@ impl AckWorkflow { /// Workflow information for response / display. pub(crate) fn new( workflow_info: Arc, - name: String, + name: FastStr, timestamp: NaiveDateTime, ) -> Self { Self { @@ -66,6 +67,7 @@ impl show::ConsoleTable for AckWorkflow { fn echo_table(&self) -> Result<(), std::io::Error> { let table = self.table(); + let mut resource_table = Table::new( self.resources .iter() diff --git a/homestar-runtime/src/scheduler.rs b/homestar-runtime/src/scheduler.rs index 4ca8ca07..de364ee1 100644 --- a/homestar-runtime/src/scheduler.rs +++ b/homestar-runtime/src/scheduler.rs @@ -19,7 +19,7 @@ use indexmap::IndexMap; use libipld::Cid; use std::{ops::ControlFlow, str::FromStr, sync::Arc}; use tokio::sync::RwLock; -use tracing::info; +use tracing::debug; /// Type alias for a [Dag] set of batched nodes. /// @@ -106,7 +106,8 @@ impl<'a> TaskScheduler<'a> { let mut_graph = Arc::make_mut(&mut graph); let schedule: &mut Schedule<'a> = mut_graph.schedule.as_mut(); let schedule_length = schedule.len(); - let mut resources_to_fetch: FnvHashSet = FnvHashSet::default(); + let mut resources_to_fetch = vec![]; + let linkmap = LinkMap::>::new(); let resume = 'resume: { for (idx, vec) in schedule.iter().enumerate().rev() { @@ -117,7 +118,7 @@ impl<'a> TaskScheduler<'a> { .get(&cid) .map(|resource| { resource.iter().for_each(|rsc| { - resources_to_fetch.insert(rsc.to_owned()); + resources_to_fetch.push((cid, rsc)); }); ptrs.push(Pointer::new(cid)); }) @@ -128,28 +129,32 @@ impl<'a> TaskScheduler<'a> { if let Ok(pointers) = folded_pointers { match Db::find_instruction_pointers(&pointers, conn) { Ok(found) => { - let linkmap = found.iter().fold( - LinkMap::>::new(), - |mut map, receipt| { - let _ = map.insert( - receipt.instruction().cid(), - receipt.output_as_arg(), - ); - - map - }, - ); + let linkmap = found.iter().fold(linkmap.clone(), |mut map, receipt| { + if let Some(idx) = resources_to_fetch + .iter() + .position(|(cid, _rsc)| cid == &receipt.instruction().cid()) + { + resources_to_fetch.swap_remove(idx); + } + + let _ = map + .insert(receipt.instruction().cid(), receipt.output_as_arg()); + + map + }); if found.len() == vec.len() { break 'resume ControlFlow::Break((idx + 1, linkmap)); - } else if !found.is_empty() && found.len() < vec.len() { - break 'resume ControlFlow::Break((idx, linkmap)); } else { continue; } } Err(_) => { - info!("receipt not available in the database"); + debug!( + subject = "receipt.db.check", + category = "scheduler.run", + "receipt not available in the database" + ); continue; } } @@ -160,6 +165,11 @@ impl<'a> TaskScheduler<'a> { ControlFlow::Continue(()) }; + let resources_to_fetch: FnvHashSet = resources_to_fetch + .into_iter() + .map(|(_, rsc)| rsc.to_owned()) + .collect(); + let fetched = fetch_fn(resources_to_fetch).await?; match resume { @@ -183,7 +193,7 @@ impl<'a> TaskScheduler<'a> { } _ => Ok(SchedulerContext { scheduler: Self { - linkmap: Arc::new(LinkMap::>::new().into()), + linkmap: Arc::new(linkmap.into()), ran: None, run: schedule.to_vec(), resume_step: None, @@ -192,6 +202,25 @@ impl<'a> TaskScheduler<'a> { }), } } + + /// Get the number of tasks that have already ran in the [Workflow]. + /// + /// [Workflow]: homestar_core::Workflow + #[allow(dead_code)] + pub(crate) fn ran_length(&self) -> usize { + self.ran + .as_ref() + .map(|ran| ran.iter().flatten().collect::>().len()) + .unwrap_or_default() + } + + /// Get the number of tasks left to run in the [Workflow]. + /// + /// [Workflow]: homestar_core::Workflow + #[allow(dead_code)] + pub(crate) fn run_length(&self) -> usize { + self.run.iter().flatten().collect::>().len() + } } #[cfg(test)] @@ -292,7 +321,7 @@ mod test { let mut conn = db.conn().unwrap(); let stored_receipt = MemoryDb::store_receipt(receipt.clone(), &mut conn).unwrap(); - assert_eq!(receipt, stored_receipt); + assert_eq!(receipt, stored_receipt.unwrap()); let workflow = Workflow::new(vec![task1.clone(), task2.clone()]); let fetch_fn = |_rscs: FnvHashSet| { diff --git a/homestar-runtime/src/settings.rs b/homestar-runtime/src/settings.rs index 2b286276..fb06d9a7 100644 --- a/homestar-runtime/src/settings.rs +++ b/homestar-runtime/src/settings.rs @@ -1,53 +1,50 @@ -//! Settings / Configuration. +//! General runtime settings / configuration. use config::{Config, ConfigError, Environment, File}; use http::Uri; use serde::{Deserialize, Serialize}; use serde_with::{serde_as, DisplayFromStr, DurationMilliSeconds, DurationSeconds}; +#[cfg(feature = "ipfs")] +use std::net::Ipv4Addr; use std::{ + env, net::{IpAddr, Ipv6Addr}, path::PathBuf, time::Duration, }; +mod libp2p_config; mod pubkey_config; +pub(crate) use libp2p_config::{Libp2p, Pubsub}; pub(crate) use pubkey_config::PubkeyConfig; +#[cfg(target_os = "windows")] +const HOME_VAR: &str = "USERPROFILE"; +#[cfg(not(target_os = "windows"))] +const HOME_VAR: &str = "HOME"; + /// Application settings. #[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] pub struct Settings { #[serde(default)] - pub(crate) monitoring: Monitoring, pub(crate) node: Node, } impl Settings { - /// Monitoring settings getter. - pub fn monitoring(&self) -> &Monitoring { - &self.monitoring - } - /// Node settings getter. pub fn node(&self) -> &Node { &self.node } } -/// Process monitoring settings. -#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] -pub struct Monitoring { - /// Metrics port for prometheus scraping. - pub metrics_port: u16, - /// Monitoring collection interval in milliseconds. - pub process_collector_interval: u64, - /// Tokio console port. - pub console_subscriber_port: u16, -} - /// Server settings. #[serde_as] -#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] +#[serde(default)] pub struct Node { + /// Monitoring settings. + #[serde(default)] + pub(crate) monitoring: Monitoring, /// Network settings. #[serde(default)] pub(crate) network: Network, @@ -56,135 +53,155 @@ pub struct Node { pub(crate) db: Database, /// Garbage collection interval. #[serde_as(as = "DurationSeconds")] - #[serde(default = "default_gc_interval")] pub(crate) gc_interval: Duration, /// Shutdown timeout. #[serde_as(as = "DurationSeconds")] - #[serde(default = "default_shutdown_timeout")] pub(crate) shutdown_timeout: Duration, } -/// Network-related settings for a homestar node. +/// Database-related settings for a homestar node. +#[serde_as] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] +#[serde(default)] +pub(crate) struct Database { + /// Database Url provided within the configuration file. + /// + /// Note: This is not used if the `DATABASE_URL` environment variable + /// is set. + #[serde_as(as = "Option")] + pub(crate) url: Option, + /// Maximum number of connections managed by the [pool]. + /// + /// [pool]: crate::db::Pool + pub(crate) max_pool_size: u32, +} + +/// Monitoring settings. +#[serde_as] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] +#[serde(default)] +pub struct Monitoring { + /// Tokio console port. + pub console_subscriber_port: u16, + /// Monitoring collection interval in milliseconds. + #[cfg(feature = "monitoring")] + #[serde_as(as = "DurationMilliSeconds")] + pub process_collector_interval: Duration, +} + +/// Network settings for a homestar node. #[serde_as] #[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] #[serde(default)] pub struct Network { + /// libp2p Settings. + pub(crate) libp2p: Libp2p, + /// Metrics Settings. + pub(crate) metrics: Metrics, /// Buffer-length for event(s) / command(s) channels. pub(crate) events_buffer_len: usize, - /// Address for [Swarm] to listen on. - /// - /// [Swarm]: libp2p::swarm::Swarm - #[serde(with = "http_serde::uri")] - pub(crate) listen_address: Uri, - /// Enable Rendezvous protocol client. - pub(crate) enable_rendezvous_client: bool, - /// Enable Rendezvous protocol server. - pub(crate) enable_rendezvous_server: bool, - /// Rendezvous registration TTL. - #[serde_as(as = "DurationSeconds")] - pub(crate) rendezvous_registration_ttl: Duration, - /// Rendezvous discovery interval. - #[serde_as(as = "DurationSeconds")] - pub(crate) rendezvous_discovery_interval: Duration, - /// Enable mDNS. - pub(crate) enable_mdns: bool, - /// mDNS IPv6 enable flag - pub(crate) mdns_enable_ipv6: bool, - /// mDNS query interval. - #[serde_as(as = "DurationSeconds")] - pub(crate) mdns_query_interval: Duration, - /// mDNS TTL. - #[serde_as(as = "DurationSeconds")] - pub(crate) mdns_ttl: Duration, - /// Timeout for p2p requests for a provided record. - #[serde_as(as = "DurationSeconds")] - pub(crate) p2p_provider_timeout: Duration, - /// Enable pub/sub. - pub(crate) enable_pubsub: bool, - /// Pub/sub duplicate cache time. - #[serde_as(as = "DurationSeconds")] - pub(crate) pubsub_duplication_cache_time: Duration, - /// Pub/sub hearbeat interval for mesh configuration. - #[serde_as(as = "DurationSeconds")] - pub(crate) pubsub_heartbeat: Duration, - /// Pub/sub idle timeout - #[serde_as(as = "DurationSeconds")] - pub(crate) pubsub_idle_timeout: Duration, - /// Quorum for receipt records on the DHT. - pub(crate) receipt_quorum: usize, + /// RPC server settings. + pub(crate) rpc: Rpc, + /// Pubkey setup configuration. + pub(crate) keypair_config: PubkeyConfig, + /// Event handler poll cache interval in milliseconds. + #[serde_as(as = "DurationMilliSeconds")] + pub(crate) poll_cache_interval: Duration, + /// IPFS settings. + #[cfg(feature = "ipfs")] + pub(crate) ipfs: Ipfs, + /// Webserver settings + pub(crate) webserver: Webserver, +} + +/// IPFS Settings +#[cfg(feature = "ipfs")] +#[serde_as] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] +#[serde(default)] +pub(crate) struct Ipfs { + /// The host where Homestar expects IPFS. + pub(crate) host: String, + /// The port where Homestar expects IPFS. + pub(crate) port: u16, +} + +/// Metrics settings. +#[serde_as] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] +#[serde(default)] +pub(crate) struct Metrics { + /// Metrics port for prometheus scraping. + pub(crate) port: u16, +} + +/// RPC server settings. +#[serde_as] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] +#[serde(default)] +pub(crate) struct Rpc { /// RPC-server port. #[serde_as(as = "DisplayFromStr")] - pub(crate) rpc_host: IpAddr, + pub(crate) host: IpAddr, /// RPC-server max-concurrent connections. - pub(crate) rpc_max_connections: usize, + pub(crate) max_connections: usize, /// RPC-server port. - pub(crate) rpc_port: u16, + pub(crate) port: u16, #[serde_as(as = "DurationSeconds")] /// RPC-server timeout. - pub(crate) rpc_server_timeout: Duration, - /// Transport connection timeout. - #[serde_as(as = "DurationSeconds")] - pub(crate) transport_connection_timeout: Duration, - /// Websocket-server host address. + pub(crate) server_timeout: Duration, +} + +/// Webserver settings +#[serde_as] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] +#[serde(default)] +pub(crate) struct Webserver { + /// Webserver host address. #[serde(with = "http_serde::uri")] - pub(crate) websocket_host: Uri, - /// Websocket-server port. - pub(crate) websocket_port: u16, + pub(crate) host: Uri, + /// Webserver-server port. + pub(crate) port: u16, + /// Webserver timeout. + #[serde_as(as = "DurationSeconds")] + pub(crate) timeout: Duration, /// Number of *bounded* clients to send messages to, used for a /// [tokio::sync::broadcast::channel] pub(crate) websocket_capacity: usize, /// Websocket-server timeout for receiving messages from the runner. #[serde_as(as = "DurationMilliSeconds")] pub(crate) websocket_receiver_timeout: Duration, - /// Quorum for [workflow::Info] records on the DHT. - /// - /// [workflow::Info]: crate::workflow::Info - pub(crate) workflow_quorum: usize, - /// Pubkey setup configuration. - pub(crate) keypair_config: PubkeyConfig, - /// Multiaddrs of the trusted nodes to connect to on startup. - #[serde_as(as = "Vec")] - pub(crate) node_addresses: Vec, - /// Multiaddrs of the external addresses this node will announce to the - /// network. - #[serde_as(as = "Vec")] - pub(crate) announce_addresses: Vec, - /// Maximum number of peers we will dial. - pub(crate) max_connected_peers: u32, - /// Limit on the number of external addresses we announce to other peers. - pub(crate) max_announce_addresses: u32, - /// Event handler poll cache interval in milliseconds. - #[serde_as(as = "DurationMilliSeconds")] - pub(crate) poll_cache_interval: Duration, -} - -/// Database-related settings for a homestar node. -#[serde_as] -#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] -#[serde(default)] -pub(crate) struct Database { - /// Database Url provided within the configuration file. - /// - /// Note: This is not used if the `DATABASE_URL` environment variable - /// is set. - #[serde_as(as = "Option")] - pub(crate) url: Option, - /// Maximum number of connections managed by the [pool]. - /// - /// [pool]: crate::db::Pool - pub(crate) max_pool_size: u32, } -impl Default for Monitoring { +impl Default for Node { fn default() -> Self { Self { - metrics_port: 4000, - process_collector_interval: 5000, - console_subscriber_port: 5555, + gc_interval: Duration::from_secs(1800), + shutdown_timeout: Duration::from_secs(20), + monitoring: Default::default(), + network: Default::default(), + db: Default::default(), } } } +impl Node { + /// Monitoring settings getter. + pub fn monitoring(&self) -> &Monitoring { + &self.monitoring + } + + /// Network settings. + pub fn network(&self) -> &Network { + &self.network + } + + /// Node shutdown timeout. + pub fn shutdown_timeout(&self) -> Duration { + self.shutdown_timeout + } +} + impl Default for Database { fn default() -> Self { Self { @@ -194,63 +211,96 @@ impl Default for Database { } } +#[cfg(feature = "monitoring")] +impl Default for Monitoring { + fn default() -> Self { + Self { + process_collector_interval: Duration::from_millis(5000), + console_subscriber_port: 6669, + } + } +} + +#[cfg(not(feature = "monitoring"))] +impl Default for Monitoring { + fn default() -> Self { + Self { + console_subscriber_port: 6669, + } + } +} + impl Default for Network { fn default() -> Self { Self { + libp2p: Libp2p::default(), + metrics: Metrics::default(), events_buffer_len: 1024, - listen_address: Uri::from_static("/ip4/0.0.0.0/tcp/0"), - enable_rendezvous_client: true, - enable_rendezvous_server: false, - rendezvous_registration_ttl: Duration::from_secs(2 * 60 * 60), - rendezvous_discovery_interval: Duration::from_secs(10 * 60), - // TODO: we would like to enable this by default, however this breaks mdns on at least some linux distros. Requires further investigation. - enable_mdns: true, - mdns_enable_ipv6: false, - mdns_query_interval: Duration::from_secs(5 * 60), - mdns_ttl: Duration::from_secs(60 * 9), - p2p_provider_timeout: Duration::new(30, 0), - enable_pubsub: true, - pubsub_duplication_cache_time: Duration::new(1, 0), - pubsub_heartbeat: Duration::new(60, 0), - pubsub_idle_timeout: Duration::new(60 * 60 * 24, 0), - receipt_quorum: 2, - rpc_host: IpAddr::V6(Ipv6Addr::LOCALHOST), - rpc_max_connections: 10, - rpc_port: 3030, - rpc_server_timeout: Duration::new(120, 0), - transport_connection_timeout: Duration::new(20, 0), - websocket_host: Uri::from_static("127.0.0.1"), - websocket_port: 1337, - websocket_capacity: 1024, - websocket_receiver_timeout: Duration::from_millis(200), - workflow_quorum: 3, + rpc: Rpc::default(), keypair_config: PubkeyConfig::Random, - node_addresses: Vec::new(), - announce_addresses: Vec::new(), - max_connected_peers: 32, - max_announce_addresses: 10, poll_cache_interval: Duration::from_millis(1000), + #[cfg(feature = "ipfs")] + ipfs: Default::default(), + webserver: Webserver::default(), } } } -impl Node { - /// Network settings. - pub fn network(&self) -> &Network { - &self.network +impl Network { + /// IPFS settings. + #[cfg(feature = "ipfs")] + pub(crate) fn ipfs(&self) -> &Ipfs { + &self.ipfs } - /// Node shutdown timeout. - pub fn shutdown_timeout(&self) -> Duration { - self.shutdown_timeout + + /// libp2p settings. + pub(crate) fn libp2p(&self) -> &Libp2p { + &self.libp2p + } + + /// Webserver settings. + pub(crate) fn webserver(&self) -> &Webserver { + &self.webserver + } +} + +#[cfg(feature = "ipfs")] +impl Default for Ipfs { + fn default() -> Self { + Self { + host: Ipv4Addr::LOCALHOST.to_string(), + port: 5001, + } } } -fn default_shutdown_timeout() -> Duration { - Duration::new(20, 0) +impl Default for Metrics { + fn default() -> Self { + Self { port: 4000 } + } } -fn default_gc_interval() -> Duration { - Duration::new(1800, 0) +impl Default for Rpc { + fn default() -> Self { + Self { + host: IpAddr::V6(Ipv6Addr::LOCALHOST), + max_connections: 10, + port: 3030, + server_timeout: Duration::new(120, 0), + } + } +} + +impl Default for Webserver { + fn default() -> Self { + Self { + host: Uri::from_static("127.0.0.1"), + port: 1337, + timeout: Duration::new(120, 0), + websocket_capacity: 2048, + websocket_receiver_timeout: Duration::from_millis(30_000), + } + } } impl Settings { @@ -262,34 +312,52 @@ impl Settings { /// Use two underscores as defined by the separator below pub fn load() -> Result { #[cfg(test)] - let path = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("config/settings.toml"); + { + let path = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("config/settings.toml"); + Self::build(Some(path)) + } #[cfg(not(test))] - let path = PathBuf::from("config/settings.toml"); - - Self::build(path) + Self::build(None) } /// Load settings from file string that must conform to a [PathBuf]. pub fn load_from_file(file: PathBuf) -> Result { - Self::build(file) + Self::build(Some(file)) } - fn build(path: PathBuf) -> Result { - let s = Config::builder() - .add_source(File::with_name( - &path - .canonicalize() + fn build(path: Option) -> Result { + let builder = if let Some(p) = path { + Config::builder().add_source(File::with_name( + &p.canonicalize() .map_err(|e| ConfigError::NotFound(e.to_string()))? .as_path() .display() .to_string(), )) + } else { + Config::builder() + }; + + let s = builder .add_source(Environment::with_prefix("HOMESTAR").separator("__")) .build()?; s.try_deserialize() } } +#[allow(dead_code)] +fn config_dir() -> PathBuf { + let config_dir = + env::var("XDG_CONFIG_HOME").map_or_else(|_| home_dir().join(".config"), PathBuf::from); + config_dir.join("homestar") +} + +#[allow(dead_code)] +fn home_dir() -> PathBuf { + let home = env::var(HOME_VAR).unwrap_or_else(|_| panic!("{} not found", HOME_VAR)); + PathBuf::from(home) +} + #[cfg(test)] mod test { use super::*; @@ -311,30 +379,38 @@ mod test { #[test] fn defaults_with_modification() { - let settings = Settings::build("fixtures/settings.toml".into()).unwrap(); + let settings = Settings::build(Some("fixtures/settings.toml".into())).unwrap(); let mut default_modded_settings = Node::default(); default_modded_settings.network.events_buffer_len = 1000; - default_modded_settings.network.websocket_port = 9999; + default_modded_settings.network.webserver.port = 9999; default_modded_settings.gc_interval = Duration::from_secs(1800); default_modded_settings.shutdown_timeout = Duration::from_secs(20); - default_modded_settings.network.node_addresses = + default_modded_settings.network.libp2p.node_addresses = vec!["/ip4/127.0.0.1/tcp/9998/ws".to_string().try_into().unwrap()]; assert_eq!(settings.node(), &default_modded_settings); } + #[test] + fn default_config() { + let settings = Settings::load().unwrap(); + let default_config = Settings::build(Some("fixtures/defaults.toml".into())) + .expect("default settings file in test fixtures"); + assert_eq!(settings, default_config); + } + #[test] fn overriding_env() { - std::env::set_var("HOMESTAR__NODE__NETWORK__RPC_PORT", "2046"); + std::env::set_var("HOMESTAR__NODE__NETWORK__RPC__PORT", "2046"); std::env::set_var("HOMESTAR__NODE__DB__MAX_POOL_SIZE", "1"); - let settings = Settings::build("fixtures/settings.toml".into()).unwrap(); - assert_eq!(settings.node.network.rpc_port, 2046); + let settings = Settings::build(Some("fixtures/settings.toml".into())).unwrap(); + assert_eq!(settings.node.network.rpc.port, 2046); assert_eq!(settings.node.db.max_pool_size, 1); } #[test] fn import_existing_key() { - let settings = Settings::build("fixtures/settings-import-ed25519.toml".into()) + let settings = Settings::build(Some("fixtures/settings-import-ed25519.toml".into())) .expect("setting file in test fixtures"); let msg = b"foo bar"; @@ -355,7 +431,7 @@ mod test { #[test] fn import_secp256k1_key() { - let settings = Settings::build("fixtures/settings-import-secp256k1.toml".into()) + let settings = Settings::build(Some("fixtures/settings-import-secp256k1.toml".into())) .expect("setting file in test fixtures"); settings @@ -368,7 +444,7 @@ mod test { #[test] fn seeded_secp256k1_key() { - let settings = Settings::build("fixtures/settings-random-secp256k1.toml".into()) + let settings = Settings::build(Some("fixtures/settings-random-secp256k1.toml".into())) .expect("setting file in test fixtures"); settings @@ -378,4 +454,34 @@ mod test { .keypair() .expect("generate a seeded secp256k1 key"); } + + #[test] + fn test_config_dir_xdg() { + env::remove_var("HOME"); + env::set_var("XDG_CONFIG_HOME", "/home/user/custom_config"); + assert_eq!( + config_dir(), + PathBuf::from("/home/user/custom_config/homestar") + ); + env::remove_var("XDG_CONFIG_HOME"); + } + + #[cfg(not(target_os = "windows"))] + #[test] + fn test_config_dir() { + env::set_var("HOME", "/home/user"); + env::remove_var("XDG_CONFIG_HOME"); + assert_eq!(config_dir(), PathBuf::from("/home/user/.config/homestar")); + env::remove_var("HOME"); + } + + #[cfg(target_os = "windows")] + #[test] + fn test_config_dir() { + env::remove_var("XDG_CONFIG_HOME"); + assert_eq!( + config_dir(), + PathBuf::from(format!(r"{}\.config\homestar", env!("USERPROFILE"))) + ); + } } diff --git a/homestar-runtime/src/settings/libp2p_config.rs b/homestar-runtime/src/settings/libp2p_config.rs new file mode 100644 index 00000000..3df3aba9 --- /dev/null +++ b/homestar-runtime/src/settings/libp2p_config.rs @@ -0,0 +1,190 @@ +//! [libp2p] configuration. + +use http::Uri; +use serde::{Deserialize, Serialize}; +use serde_with::{serde_as, DurationSeconds}; +use std::time::Duration; + +/// libp2p settings. +#[serde_as] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] +#[serde(default)] +pub(crate) struct Libp2p { + /// Multiaddrs of the external addresses this node will announce to the + /// network. + #[serde_as(as = "Vec")] + pub(crate) announce_addresses: Vec, + /// Kademlia DHT Settings + pub(crate) dht: Dht, + /// Address for [Swarm] to listen on. + /// + /// [Swarm]: libp2p::swarm::Swarm + #[serde(with = "http_serde::uri")] + pub(crate) listen_address: Uri, + /// Maximum number of peers we will dial. + pub(crate) max_connected_peers: u32, + /// Limit on the number of external addresses we announce to other peers. + pub(crate) max_announce_addresses: u32, + /// Multiaddrs of the trusted nodes to connect to on startup. + #[serde_as(as = "Vec")] + pub(crate) node_addresses: Vec, + /// mDNS Settings. + pub(crate) mdns: Mdns, + /// Pubsub Settings. + pub(crate) pubsub: Pubsub, + /// Rendezvous Settings. + pub(crate) rendezvous: Rendezvous, + /// Transport connection timeout. + #[serde_as(as = "DurationSeconds")] + pub(crate) transport_connection_timeout: Duration, +} + +/// DHT settings. +#[serde_as] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] +#[serde(default)] +pub(crate) struct Dht { + /// Timeout for p2p requests for a provided record. + #[serde_as(as = "DurationSeconds")] + pub(crate) p2p_provider_timeout: Duration, + /// Quorum for receipt records on the DHT. + pub(crate) receipt_quorum: usize, + /// Quorum for [workflow::Info] records on the DHT. + /// + /// [workflow::Info]: crate::workflow::Info + pub(crate) workflow_quorum: usize, +} + +/// mDNS settings. +#[serde_as] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] +#[serde(default)] +pub(crate) struct Mdns { + /// Enable mDNS. + pub(crate) enable: bool, + /// mDNS IPv6 enable flag + pub(crate) enable_ipv6: bool, + /// mDNS query interval. + #[serde_as(as = "DurationSeconds")] + pub(crate) query_interval: Duration, + /// mDNS TTL. + #[serde_as(as = "DurationSeconds")] + pub(crate) ttl: Duration, +} + +/// Pubsub settings. +#[serde_as] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] +#[serde(default)] +pub(crate) struct Pubsub { + /// Enable pub/sub. + pub(crate) enable: bool, + /// Pub/sub duplicate cache time. + #[serde_as(as = "DurationSeconds")] + pub(crate) duplication_cache_time: Duration, + /// Pub/sub hearbeat interval for mesh configuration. + #[serde_as(as = "DurationSeconds")] + pub(crate) heartbeat: Duration, + /// Pub/sub idle timeout + #[serde_as(as = "DurationSeconds")] + pub(crate) idle_timeout: Duration, + /// Maximum byte size of pub/sub messages. + pub(crate) max_transmit_size: usize, + /// Minimum number of pub/sub peers. + pub(crate) mesh_n_low: usize, + /// Maximum number of pub/sub peers. + pub(crate) mesh_n_high: usize, + /// Target number of pub/sub peers. + pub(crate) mesh_n: usize, + /// Minimum outbound pub/sub peers before adding more peers. + pub(crate) mesh_outbound_min: usize, +} + +/// Rendezvous settings. +#[serde_as] +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] +#[serde(default)] +pub(crate) struct Rendezvous { + /// Enable Rendezvous protocol client. + pub(crate) enable_client: bool, + /// Enable Rendezvous protocol server. + pub(crate) enable_server: bool, + /// Rendezvous registration TTL. + #[serde_as(as = "DurationSeconds")] + pub(crate) registration_ttl: Duration, + /// Rendezvous discovery interval. + #[serde_as(as = "DurationSeconds")] + pub(crate) discovery_interval: Duration, +} + +impl Default for Libp2p { + fn default() -> Self { + Self { + announce_addresses: Vec::new(), + dht: Dht::default(), + listen_address: Uri::from_static("/ip4/0.0.0.0/tcp/0"), + max_connected_peers: 32, + max_announce_addresses: 10, + mdns: Mdns::default(), + node_addresses: Vec::new(), + pubsub: Pubsub::default(), + rendezvous: Rendezvous::default(), + transport_connection_timeout: Duration::new(60, 0), + } + } +} + +impl Libp2p { + /// Pub/sub settings getter. + pub(crate) fn pubsub(&self) -> &Pubsub { + &self.pubsub + } +} + +impl Default for Dht { + fn default() -> Self { + Self { + p2p_provider_timeout: Duration::new(30, 0), + receipt_quorum: 2, + workflow_quorum: 3, + } + } +} + +impl Default for Mdns { + fn default() -> Self { + Self { + enable: true, + enable_ipv6: false, + query_interval: Duration::from_secs(5 * 60), + ttl: Duration::from_secs(60 * 9), + } + } +} + +impl Default for Pubsub { + fn default() -> Self { + Self { + enable: true, + duplication_cache_time: Duration::new(1, 0), + heartbeat: Duration::new(60, 0), + idle_timeout: Duration::new(60 * 60 * 24, 0), + max_transmit_size: 10 * 1024 * 1024, + mesh_n_low: 1, + mesh_n_high: 10, + mesh_n: 2, + mesh_outbound_min: 1, + } + } +} + +impl Default for Rendezvous { + fn default() -> Self { + Self { + enable_client: true, + enable_server: false, + registration_ttl: Duration::from_secs(2 * 60 * 60), + discovery_interval: Duration::from_secs(10 * 60), + } + } +} diff --git a/homestar-runtime/src/settings/pubkey_config.rs b/homestar-runtime/src/settings/pubkey_config.rs index 171dcb87..a8c87ec6 100644 --- a/homestar-runtime/src/settings/pubkey_config.rs +++ b/homestar-runtime/src/settings/pubkey_config.rs @@ -1,3 +1,5 @@ +//! Pubkey configuration. + use anyhow::{anyhow, Context}; use libp2p::{identity, identity::secp256k1}; use rand::{Rng, SeedableRng}; @@ -54,7 +56,11 @@ impl PubkeyConfig { pub(crate) fn keypair(&self) -> anyhow::Result { match self { PubkeyConfig::Random => { - info!("generating random ed25519 key"); + info!( + subject = "pubkey_config.random", + category = "pubkey_config", + "generating random ed25519 key" + ); Ok(identity::Keypair::generate_ed25519()) } PubkeyConfig::GenerateFromSeed(RNGSeed { key_type, seed }) => { @@ -64,14 +70,22 @@ impl PubkeyConfig { match key_type { KeyType::Ed25519 => { - info!("generating random ed25519 key from seed"); + info!( + subject = "pubkey_config.random_seed.ed25519", + category = "pubkey_config", + "generating random ed25519 key from seed" + ); identity::Keypair::ed25519_from_bytes(new_key).map_err(|e| { anyhow!("failed to generate ed25519 key from random: {:?}", e) }) } KeyType::Secp256k1 => { - info!("generating random secp256k1 key from seed"); + info!( + subject = "pubkey_config.random_seed.secp256k1", + category = "pubkey_config", + "generating random secp256k1 key from seed" + ); let sk = secp256k1::SecretKey::try_from_bytes(&mut new_key).map_err(|e| { @@ -94,7 +108,12 @@ impl PubkeyConfig { KeyType::Ed25519 => { const PEM_HEADER: &str = "PRIVATE KEY"; - info!("importing ed25519 key from: {}", path.display()); + info!( + subject = "pubkey_config.path.ed25519", + category = "pubkey_config", + "importing ed25519 key from: {}", + path.display() + ); let (tag, mut key) = sec1::der::pem::decode_vec(&buf) .map_err(|e| anyhow!("key file must be PEM formatted: {:#?}", e))?; @@ -107,7 +126,12 @@ impl PubkeyConfig { .with_context(|| "imported key material was invalid for ed25519") } KeyType::Secp256k1 => { - info!("importing secp256k1 key from: {}", path.display()); + info!( + subject = "pubkey_config.path.secp256k1", + category = "pubkey_config", + "importing secp256k1 key from: {}", + path.display() + ); let sk = match path.extension().and_then(|ext| ext.to_str()) { Some("der") => sec1::EcPrivateKey::from_der(buf.as_slice()).map_err(|e| anyhow!("failed to parse DER encoded secp256k1 key: {e:#?}")), diff --git a/homestar-runtime/src/tasks.rs b/homestar-runtime/src/tasks.rs index caf8ca77..c054c877 100644 --- a/homestar-runtime/src/tasks.rs +++ b/homestar-runtime/src/tasks.rs @@ -1,5 +1,3 @@ -#![allow(missing_docs)] - //! Module for working with task-types and task-specific functionality. use anyhow::{anyhow, Result}; diff --git a/homestar-runtime/src/tasks/fetch.rs b/homestar-runtime/src/tasks/fetch.rs index cc752a33..d56df0f5 100644 --- a/homestar-runtime/src/tasks/fetch.rs +++ b/homestar-runtime/src/tasks/fetch.rs @@ -5,24 +5,19 @@ #[cfg(feature = "ipfs")] use crate::network::IpfsCli; -#[cfg(any(test, feature = "test-utils"))] -use crate::tasks::WasmContext; use crate::workflow::{self, Resource}; use anyhow::Result; use fnv::FnvHashSet; -#[cfg(all(feature = "ipfs", not(test), not(feature = "test-utils")))] -use futures::{stream::FuturesUnordered, TryStreamExt}; use indexmap::IndexMap; -#[cfg(all(feature = "ipfs", not(test), not(feature = "test-utils")))] -use libipld::Cid; use std::sync::Arc; -#[cfg(all(feature = "ipfs", not(test), not(feature = "test-utils")))] -use tracing::info; -#[cfg(all(feature = "ipfs", not(test), not(feature = "test-utils")))] -use tryhard::RetryFutureConfig; pub(crate) struct Fetch; +#[cfg(any(test, feature = "test-utils"))] +const WASM_CID: &str = "bafybeig6u35v6t3f4j3zgz2jvj4erd45fbkeolioaddu3lmu6uxm3ilb7a"; +#[cfg(any(test, feature = "test-utils"))] +const CAT_CID: &str = "bafybeiejevluvtoevgk66plh5t6xiy3ikyuuxg3vgofuvpeckb6eadresm"; + impl Fetch { /// Gather resources from IPFS or elsewhere, leveraging an exponential backoff. #[cfg(all(feature = "ipfs", not(test), not(feature = "test-utils")))] @@ -32,32 +27,68 @@ impl Fetch { settings: Arc, ipfs: IpfsCli, ) -> Result>> { + use futures::{stream::FuturesUnordered, TryStreamExt}; let settings = settings.as_ref(); + let retries = settings.retries; let tasks = FuturesUnordered::new(); for rsc in resources.iter() { - info!(rsc = rsc.to_string(), "Fetching resource"); - let task = tryhard::retry_fn(|| async { Self::fetch(rsc.clone(), ipfs.clone()).await }) - .with_config( - RetryFutureConfig::new(settings.retries) - .exponential_backoff(settings.retry_initial_delay) - .max_delay(settings.retry_max_delay), + let task = tryhard::retry_fn(|| async { + tracing::info!( + subject = "fetch_rsc", + category = "fetch", + rsc = rsc.to_string(), + "attempting to fetch resource from IPFS" ); - + Self::fetch(rsc.clone(), ipfs.clone()).await + }) + .retries(retries) + .exponential_backoff(settings.retry_initial_delay) + .max_delay(settings.retry_max_delay) + .on_retry(|attempts, next_delay, error| { + let err = error.to_string(); + async move { + if attempts < retries { + tracing::warn!( + subject = "fetch_rsc.err", + category = "fetch", + err = err, + attempts = attempts, + "retrying fetch after error @ {}ms", + next_delay.map(|d| d.as_millis()).unwrap_or(0) + ); + } else { + tracing::warn!( + subject = "fetch_rsc.err", + category = "fetch", + err = err, + attempts = attempts, + "maxed out # of retries" + ); + } + } + }); tasks.push(task); } - tasks.try_collect::>().await?.into_iter().try_fold( - IndexMap::default(), - |mut acc, res| { - let answer = res.1?; - acc.insert(res.0, answer); - Ok::<_, anyhow::Error>(acc) - }, - ) + tracing::info!( + subject = "fetch_rscs", + category = "fetch", + "fetching necessary resources from IPFS" + ); + if let Ok(vec) = tasks.try_collect::>().await { + vec.into_iter() + .try_fold(IndexMap::default(), |mut acc, res| { + let answer = res.1?; + acc.insert(res.0, answer); + + Ok::<_, anyhow::Error>(acc) + }) + } else { + Err(anyhow::anyhow!("Failed to fetch resources from IPFS")) + } } /// Gather resources via URLs, leveraging an exponential backoff. - /// TODO: Client calls (only) over http(s). #[cfg(all(not(feature = "ipfs"), not(test), not(feature = "test-utils")))] #[allow(dead_code)] pub(crate) async fn get_resources( @@ -76,14 +107,23 @@ impl Fetch { ) -> Result>> { println!("Running in test mode"); use crate::tasks::FileLoad; - let path = std::path::PathBuf::from(format!( + let wasm_path = std::path::PathBuf::from(format!( "{}/../homestar-wasm/fixtures/example_test.wasm", env!("CARGO_MANIFEST_DIR") )); - let bytes = WasmContext::load(path).await?; + let img_path = std::path::PathBuf::from(format!( + "{}/../examples/websocket-relay/synthcat.png", + env!("CARGO_MANIFEST_DIR") + )); + + let bytes = crate::tasks::WasmContext::load(wasm_path).await.unwrap(); + let buf = crate::tasks::WasmContext::load(img_path).await.unwrap(); let mut map = IndexMap::default(); - let rsc = "ipfs://bafybeihzvrlcfqf6ffbp2juhuakspxj2bdsc54cabxnuxfvuqy5lvfxapy"; - map.insert(Resource::Url(url::Url::parse(rsc)?), bytes); + map.insert( + Resource::Url(url::Url::parse(format!("ipfs://{WASM_CID}").as_str()).unwrap()), + bytes, + ); + map.insert(Resource::Cid(libipld::Cid::try_from(CAT_CID).unwrap()), buf); Ok(map) } @@ -97,26 +137,34 @@ impl Fetch { ) -> Result>> { println!("Running in test mode"); use crate::tasks::FileLoad; - let path = std::path::PathBuf::from(format!( + let wasm_path = std::path::PathBuf::from(format!( "{}/../homestar-wasm/fixtures/example_test.wasm", env!("CARGO_MANIFEST_DIR") )); - let bytes = WasmContext::load(path).await?; + let img_path = std::path::PathBuf::from(format!( + "{}/../examples/websocket-relay/synthcat.png", + env!("CARGO_MANIFEST_DIR") + )); + + let bytes = crate::tasks::WasmContext::load(wasm_path).await.unwrap(); + let buf = crate::tasks::WasmContext::load(img_path).await.unwrap(); let mut map = IndexMap::default(); - let rsc = "ipfs://bafybeihzvrlcfqf6ffbp2juhuakspxj2bdsc54cabxnuxfvuqy5lvfxapy"; - map.insert(Resource::Url(url::Url::parse(rsc)?), bytes); + map.insert( + Resource::Url(url::Url::parse(format!("ipfs://{WASM_CID}").as_str()).unwrap()), + bytes, + ); + map.insert(Resource::Cid(libipld::Cid::try_from(CAT_CID).unwrap()), buf); Ok(map) } #[cfg(all(feature = "ipfs", not(test), not(feature = "test-utils")))] - #[cfg_attr(docsrs, doc(cfg(feature = "ipfs")))] async fn fetch(rsc: Resource, client: IpfsCli) -> Result<(Resource, Result>)> { match rsc { Resource::Url(url) => { let bytes = match (url.scheme(), url.domain(), url.path()) { ("ipfs", Some(cid), _) => { - let cid = Cid::try_from(cid)?; - client.get_cid(cid).await + let parsed_cid = libipld::Cid::try_from(cid)?; + client.get_cid(parsed_cid).await } (_, Some("ipfs.io"), _) => client.get_resource(&url).await, (_, _, path) if path.contains("/ipfs/") || path.contains("/ipns/") => { @@ -126,14 +174,14 @@ impl Fetch { let split: Vec<&str> = domain.splitn(3, '.').collect(); // subdomain-gateway case: // - if let (Ok(_cid), "ipfs") = (Cid::try_from(split[0]), split[1]) { + if let (Ok(_cid), "ipfs") = (libipld::Cid::try_from(split[0]), split[1]) { client.get_resource(&url).await } else { - // TODO: reqwest call + // TODO: reqwest call or error todo!() } } - // TODO: reqwest call + // TODO: reqwest call or error (_, _, _) => todo!(), }; diff --git a/homestar-runtime/src/tasks/wasm.rs b/homestar-runtime/src/tasks/wasm.rs index b55f3a37..7ba5fd70 100644 --- a/homestar-runtime/src/tasks/wasm.rs +++ b/homestar-runtime/src/tasks/wasm.rs @@ -1,3 +1,7 @@ +//! Functionality around Wasm-based [tasks]. +//! +//! [tasks]: homestar_core::workflow::Task + use super::FileLoad; use async_trait::async_trait; use homestar_core::workflow::input::Args; diff --git a/homestar-runtime/src/test_utils/event.rs b/homestar-runtime/src/test_utils/event.rs index ec1c3b11..4e8660ad 100644 --- a/homestar-runtime/src/test_utils/event.rs +++ b/homestar-runtime/src/test_utils/event.rs @@ -1,24 +1,23 @@ use crate::{ - channel::{AsyncBoundedChannel, AsyncBoundedChannelReceiver, AsyncBoundedChannelSender}, + channel::{AsyncChannel, AsyncChannelReceiver, AsyncChannelSender}, event_handler::Event, settings, worker::WorkerMessage, }; -use tokio::sync::mpsc; -/// Create an [mpsc::Sender], [mpsc::Receiver] pair for [Event]s. +/// Create an [AsynBoundedChannelSender], [AsyncChannelReceiver] pair for [Event]s. pub(crate) fn setup_event_channel( settings: settings::Node, -) -> ( - AsyncBoundedChannelSender, - AsyncBoundedChannelReceiver, -) { - AsyncBoundedChannel::with(settings.network.events_buffer_len) +) -> (AsyncChannelSender, AsyncChannelReceiver) { + AsyncChannel::with(settings.network.events_buffer_len) } -/// Create an [mpsc::Sender], [mpsc::Receiver] pair for worker messages. +/// Create an [AsyncChannelSender], [AsyncChannelReceiver] pair for worker messages. pub(crate) fn setup_worker_channel( settings: settings::Node, -) -> (mpsc::Sender, mpsc::Receiver) { - mpsc::channel(settings.network.events_buffer_len) +) -> ( + AsyncChannelSender, + AsyncChannelReceiver, +) { + AsyncChannel::with(settings.network.events_buffer_len) } diff --git a/homestar-runtime/src/test_utils/proc_macro/src/lib.rs b/homestar-runtime/src/test_utils/proc_macro/src/lib.rs index fb2386fb..5ffbdcc9 100644 --- a/homestar-runtime/src/test_utils/proc_macro/src/lib.rs +++ b/homestar-runtime/src/test_utils/proc_macro/src/lib.rs @@ -70,8 +70,8 @@ pub fn db_async_test(_attr: TokenStream, item: TokenStream) -> TokenStream { /// runner.runtime.block_on(rpc_server.spawn()).unwrap(); /// runner.runtime.spawn(async move { /// let addr = SocketAddr::new( -/// settings.node.network.rpc_host, -/// settings.node.network.rpc_port, +/// settings.node.network.rpc.host, +/// settings.node.network.rpc.port, /// ); /// let client = Client::new(addr, context::current()).await.unwrap(); /// let response = client.ping().await.unwrap(); @@ -98,9 +98,11 @@ pub fn runner_test(_attr: TokenStream, item: TokenStream) -> TokenStream { impl TestRunner { fn start() -> TestRunner { let mut settings = crate::Settings::load().unwrap(); - settings.node.network.websocket_port = ::homestar_core::test_utils::ports::get_port() as u16; - settings.node.network.rpc_port = ::homestar_core::test_utils::ports::get_port() as u16; + settings.node.network.webserver.port = ::homestar_core::test_utils::ports::get_port() as u16; + settings.node.network.rpc.port = ::homestar_core::test_utils::ports::get_port() as u16; + settings.node.network.metrics.port = ::homestar_core::test_utils::ports::get_port() as u16; settings.node.db.url = Some(format!("{}.db", #func_name_as_string)); + settings.node.network.webserver.websocket_receiver_timeout = std::time::Duration::from_millis(500); let db = crate::test_utils::db::MemoryDb::setup_connection_pool(&settings.node, None).unwrap(); let runner = crate::Runner::start(settings.clone(), db).unwrap(); TestRunner { runner, settings } diff --git a/homestar-runtime/src/test_utils/worker_builder.rs b/homestar-runtime/src/test_utils/worker_builder.rs index 4546fdb6..14e0d255 100644 --- a/homestar-runtime/src/test_utils/worker_builder.rs +++ b/homestar-runtime/src/test_utils/worker_builder.rs @@ -1,10 +1,20 @@ //! Module for building out [Worker]s for testing purposes. use super::{db::MemoryDb, event}; +#[cfg(feature = "ipfs")] +use crate::network::IpfsCli; use crate::{ - channel::AsyncBoundedChannelSender, db::Database, event_handler::Event, settings, - worker::WorkerMessage, workflow, Settings, Worker, + channel::AsyncChannelSender, + db::Database, + event_handler::Event, + settings, + tasks::Fetch, + worker::WorkerMessage, + workflow::{self, Resource}, + Settings, Worker, }; +use fnv::FnvHashSet; +use futures::{future::BoxFuture, FutureExt}; use homestar_core::{ ipld::DagCbor, test_utils::workflow as workflow_test_utils, @@ -12,15 +22,48 @@ use homestar_core::{ Workflow, }; use homestar_wasm::io::Arg; +use indexmap::IndexMap; use libipld::Cid; -use tokio::sync::mpsc; +/// Utility structure for building out [Worker]s for testing purposes. +/// +/// [Worker]: crate::Worker +#[cfg(feature = "ipfs")] pub(crate) struct WorkerBuilder<'a> { + /// In-memory database for testing. db: MemoryDb, - event_sender: AsyncBoundedChannelSender, - runner_sender: mpsc::Sender, + /// Event channel sender. + event_sender: AsyncChannelSender, + /// [IPFS client]. + /// + /// [IPFS client]: crate::network::IpfsCli + ipfs: IpfsCli, + /// Runner channel sender. + runner_sender: AsyncChannelSender, + /// Name of the workflow. name: Option, + /// [Workflow] to run. workflow: Workflow<'a, Arg>, + /// [Workflow] settings. + workflow_settings: workflow::Settings, +} + +/// Utility structure for building out [Worker]s for testing purposes. +/// +/// [Worker]: crate::Worker +#[cfg(not(feature = "ipfs"))] +pub(crate) struct WorkerBuilder<'a> { + /// In-memory database for testing. + db: MemoryDb, + /// Event channel sender. + event_sender: AsyncChannelSender, + /// Runner channel sender. + runner_sender: AsyncChannelSender, + /// Name of the workflow. + name: Option, + /// [Workflow] to run. + workflow: Workflow<'a, Arg>, + /// [Workflow] settings. workflow_settings: workflow::Settings, } @@ -31,12 +74,12 @@ impl<'a> WorkerBuilder<'a> { let (instruction1, instruction2, _) = workflow_test_utils::related_wasm_instructions::(); let task1 = Task::new( - RunInstruction::Expanded(instruction1), + RunInstruction::Expanded(instruction1.clone()), config.clone().into(), UcanPrf::default(), ); let task2 = Task::new( - RunInstruction::Expanded(instruction2), + RunInstruction::Expanded(instruction2.clone()), config.into(), UcanPrf::default(), ); @@ -46,13 +89,31 @@ impl<'a> WorkerBuilder<'a> { let workflow = Workflow::new(vec![task1, task2]); let workflow_cid = workflow.clone().to_cid().unwrap(); - Self { - db: MemoryDb::setup_connection_pool(&settings, None).unwrap(), - event_sender: evt_tx, - runner_sender: wk_tx, - name: Some(workflow_cid.to_string()), - workflow, - workflow_settings: workflow::Settings::default(), + + #[cfg(feature = "ipfs")] + { + let ipfs = IpfsCli::new(settings.network.ipfs()).unwrap(); + Self { + db: MemoryDb::setup_connection_pool(&settings, None).unwrap(), + event_sender: evt_tx, + ipfs: ipfs.clone(), + runner_sender: wk_tx, + name: Some(workflow_cid.to_string()), + workflow, + workflow_settings: workflow::Settings::default(), + } + } + + #[cfg(not(feature = "ipfs"))] + { + Self { + db: MemoryDb::setup_connection_pool(&settings, None).unwrap(), + event_sender: evt_tx, + runner_sender: wk_tx, + name: Some(workflow_cid.to_string()), + workflow, + workflow_settings: workflow::Settings::default(), + } } } @@ -71,6 +132,43 @@ impl<'a> WorkerBuilder<'a> { .unwrap() } + /// Fetch-function closure for the [Worker]/[Scheduler] to use. + /// + /// [Worker]: crate::Worker + /// [Scheduler]: crate::TaskScheduler + #[cfg(feature = "ipfs")] + #[allow(dead_code)] + pub(crate) fn fetch_fn( + &self, + ) -> impl FnOnce(FnvHashSet) -> BoxFuture<'a, anyhow::Result>>> + { + let fetch_settings = self.workflow_settings.clone().into(); + let ipfs = self.ipfs.clone(); + let fetch_fn = move |rscs: FnvHashSet| { + async move { Fetch::get_resources(rscs, fetch_settings, ipfs).await }.boxed() + }; + + fetch_fn + } + + /// Fetch-function closure for the [Worker]/[Scheduler] to use. + /// + /// [Worker]: crate::Worker + /// [Scheduler]: crate::TaskScheduler + #[cfg(not(feature = "ipfs"))] + #[allow(dead_code)] + pub(crate) fn fetch_fn( + &self, + ) -> impl FnOnce(FnvHashSet) -> BoxFuture<'a, anyhow::Result>>> + { + let fetch_settings = self.workflow_settings.clone().into(); + let fetch_fn = |rscs: FnvHashSet| { + async move { Fetch::get_resources(rscs, fetch_settings).await }.boxed() + }; + + fetch_fn + } + /// Get the [Cid] of the workflow from the builder state. #[allow(dead_code)] pub(crate) fn workflow_cid(&self) -> Cid { @@ -100,13 +198,10 @@ impl<'a> WorkerBuilder<'a> { self } - /// Build a [Worker] with a specific Event [mpsc::Sender]. + /// Build a [Worker] with a specific Event [AsyncChannelSender]. #[allow(dead_code)] - pub(crate) fn with_event_sender( - mut self, - event_sender: AsyncBoundedChannelSender, - ) -> Self { - self.event_sender = event_sender.into(); + pub(crate) fn with_event_sender(mut self, event_sender: AsyncChannelSender) -> Self { + self.event_sender = event_sender; self } diff --git a/homestar-runtime/src/worker.rs b/homestar-runtime/src/worker.rs index 0a122e36..50bbbe71 100644 --- a/homestar-runtime/src/worker.rs +++ b/homestar-runtime/src/worker.rs @@ -2,30 +2,30 @@ //! sends [Event]'s to the [EventHandler]. //! //! [Workflow]: homestar_core::Workflow -//! [EventHandler]: crate::event_handler::EventHandler +//! [EventHandler]: crate::EventHandler -#[cfg(feature = "ipfs")] -use crate::network::IpfsCli; +#[cfg(feature = "websocket-notify")] +use crate::event_handler::event::Replay; use crate::{ - channel::{AsyncBoundedChannel, AsyncBoundedChannelSender}, + channel::{AsyncChannel, AsyncChannelSender}, db::Database, event_handler::{ - event::{Captured, QueryRecord, Replay}, + event::{Captured, QueryRecord}, swarm_event::{FoundEvent, ResponseEvent}, Event, }, network::swarm::CapsuleTag, runner::{ModifiedSet, RunningTaskSet}, - scheduler::{ExecutionGraph, TaskScheduler}, - tasks::{Fetch, RegisteredTasks, WasmContext}, + scheduler::ExecutionGraph, + tasks::{RegisteredTasks, WasmContext}, workflow::{self, Resource}, - Db, Receipt, + Db, Receipt, TaskScheduler, }; -use anyhow::{anyhow, Result}; +use anyhow::{anyhow, Context, Result}; use chrono::NaiveDateTime; use faststr::FastStr; use fnv::FnvHashSet; -use futures::FutureExt; +use futures::{future::BoxFuture, FutureExt}; use homestar_core::{ bail, ipld::DagCbor, @@ -43,20 +43,20 @@ use homestar_wasm::{ }; use indexmap::IndexMap; use libipld::{Cid, Ipld}; -use std::{collections::BTreeMap, sync::Arc}; -use tokio::{ - sync::{mpsc, RwLock}, - task::JoinSet, - time::{self, Instant}, -}; +use std::{collections::BTreeMap, sync::Arc, time::Instant}; +use tokio::{sync::RwLock, task::JoinSet}; use tracing::{debug, error, info}; /// [JoinSet] of tasks run by a [Worker]. #[allow(dead_code)] pub(crate) type TaskSet = JoinSet>; +/// Messages sent to [Worker] from [Runner]. +/// +/// [Runner]: crate::Runner #[derive(Debug, Clone, PartialEq)] pub(crate) enum WorkerMessage { + /// Signal that the [Worker] has been dropped for a workflow run. Dropped(Cid), } @@ -64,13 +64,25 @@ pub(crate) enum WorkerMessage { #[allow(dead_code)] #[allow(missing_debug_implementations)] pub(crate) struct Worker<'a, DB: Database> { + /// [ExecutionGraph] of the [Workflow] to run. pub(crate) graph: Arc>, - pub(crate) event_sender: Arc>, - pub(crate) runner_sender: mpsc::Sender, + /// [EventHandler] channel to send [Event]s to. + /// + /// [EventHandler]: crate::EventHandler + pub(crate) event_sender: Arc>, + /// [Runner] channel to send [WorkerMessage]s to. + /// + /// [Runner]: crate::Runner + pub(crate) runner_sender: AsyncChannelSender, + /// [Database] pool to pull connections from for the [Worker] run. pub(crate) db: DB, + /// Local name of the [Workflow] being run. pub(crate) workflow_name: FastStr, + /// [Workflow] information. pub(crate) workflow_info: Arc, + /// [Workflow] settings. pub(crate) workflow_settings: Arc, + /// [NaiveDateTime] of when the [Workflow] was started. pub(crate) workflow_started: NaiveDateTime, } @@ -87,8 +99,8 @@ where settings: workflow::Settings, // Name would be runner specific, separated from core workflow spec. name: Option, - event_sender: Arc>, - runner_sender: mpsc::Sender, + event_sender: Arc>, + runner_sender: AsyncChannelSender, db: DB, ) -> Result> { let p2p_timeout = settings.p2p_timeout; @@ -143,33 +155,29 @@ where /// /// [Instruction]: homestar_core::workflow::Instruction /// [Swarm]: crate::network::swarm - pub(crate) async fn run(self, running_tasks: Arc) -> Result<()> { - let workflow_settings_fetch = self.workflow_settings.clone(); - #[cfg(feature = "ipfs")] - let fetch_fn = { - let ipfs = IpfsCli::default(); - - move |rscs: FnvHashSet| { - async move { Fetch::get_resources(rscs, workflow_settings_fetch, ipfs).await } - .boxed() - } - }; - - #[cfg(not(feature = "ipfs"))] - let fetch_fn = |rscs: Vec| { - async move { fetch::get_resources(rscs, workflow_settings_fetch).await }.boxed() - }; - - let scheduler_ctx = TaskScheduler::init( + pub(crate) async fn run(self, running_tasks: Arc, fetch_fn: F) -> Result<()> + where + F: FnOnce(FnvHashSet) -> BoxFuture<'a, Result>>>, + { + match TaskScheduler::init( self.graph.clone(), // Arc'ed &mut self.db.conn()?, fetch_fn, ) - .await?; - - self.run_queue(scheduler_ctx.scheduler, running_tasks).await + .await + { + Ok(ctx) => self.run_queue(ctx.scheduler, running_tasks).await, + Err(err) => { + error!(subject = "worker.init.err", + category = "worker.run", + err=?err, + "error initializing scheduler"); + Err(anyhow!("error initializing scheduler")) + } + } } + #[allow(unused_mut)] async fn run_queue( mut self, mut scheduler: TaskScheduler<'a>, @@ -192,18 +200,31 @@ where linkmap: Arc>>>, resources: Arc>>>, db: impl Database, - event_sender: Arc>, + event_sender: Arc>, ) -> Result, ResolveError> { info!( + subject = "worker.resolve_cid", + category = "worker.run", workflow_cid = workflow_cid.to_string(), cid = cid.to_string(), - "resolving cid" + "attempting to resolve cid in workflow" ); if let Some(result) = linkmap.read().await.get(&cid) { - info!(cid = cid.to_string(), "found in in-memory linkmap"); + debug!( + subject = "worker.resolve_cid", + category = "worker.run", + cid = cid.to_string(), + "found CID in in-memory linkmap" + ); Ok(result.to_owned()) } else if let Some(bytes) = resources.read().await.get(&Resource::Cid(cid)) { + debug!( + subject = "worker.resolve_cid", + category = "worker.run", + cid = cid.to_string(), + "found CID in map of resources" + ); Ok(InstructionResult::Ok(Arg::Ipld(Ipld::Bytes( bytes.to_vec(), )))) @@ -212,8 +233,12 @@ where match Db::find_instruction_by_cid(cid, conn) { Ok(found) => Ok(found.output_as_arg()), Err(_) => { - debug!("no related instruction receipt found in the DB"); - let (tx, rx) = AsyncBoundedChannel::oneshot(); + debug!( + subject = "worker.resolve_cid", + category = "worker.run", + "no related instruction receipt found in the DB" + ); + let (tx, rx) = AsyncChannel::oneshot(); let _ = event_sender .send_async(Event::FindRecord(QueryRecord::with( cid, @@ -222,24 +247,23 @@ where ))) .await; - let found = match time::timeout_at( - Instant::now() + workflow_settings.p2p_timeout, - rx.recv_async(), - ) - .await + let found = match rx + .recv_deadline(Instant::now() + workflow_settings.p2p_timeout) { - Ok(Ok(ResponseEvent::Found(Ok(FoundEvent::Receipt(found))))) => found, - Ok(Ok(ResponseEvent::Found(Err(err)))) => { + Ok(ResponseEvent::Found(Ok(FoundEvent::Receipt(found)))) => found, + Ok(ResponseEvent::Found(Err(err))) => { bail!(ResolveError::UnresolvedCid(format!( "failure in attempting to find event: {err}" ))) } - Ok(Ok(_)) => bail!(ResolveError::UnresolvedCid( + Ok(ResponseEvent::NoPeersAvailable) => { + bail!(ResolveError::UnresolvedCid( + "no peers available to communicate with".to_string() + )) + } + Ok(_) => bail!(ResolveError::UnresolvedCid( "wrong or unexpected event message received".to_string(), )), - Ok(Err(err)) => bail!(ResolveError::UnresolvedCid(format!( - "failure in attempting to find receipt: {err}" - ))), Err(err) => bail!(ResolveError::UnresolvedCid(format!( "timeout deadline reached for invocation receipt @ {cid}: {err}", ))), @@ -257,14 +281,16 @@ where } } - // Always replay previous receipts. + // Replay previous receipts if subscriptions are on. #[cfg(feature = "websocket-notify")] { - if scheduler.ran.as_ref().is_some_and(|ran| !ran.is_empty()) { + if scheduler.ran_length() > 0 { info!( + subject = "worker.replay", + category = "worker.run", workflow_cid = self.workflow_info.cid.to_string(), "{} tasks left to run, sending last batch for workflow", - scheduler.ran.as_ref().unwrap().len() + scheduler.run_length() ); let mut pointers = Vec::new(); for batch in scheduler @@ -358,21 +384,38 @@ where }); let handle = task_set.spawn(async move { - let resolved = resolved.await?; + let resolved = match resolved.await { + Ok(inst_result) => inst_result, + Err(err) => { + error!(subject = "worker.resolve_cid.err", + category = "worker.run", + err=?err, + "error resolving cid"); + return Err(anyhow!("error resolving cid: {err}")) + .with_context(|| { + format!("could not spawn task for cid: {workflow_cid}") + }); + } + }; match wasm_ctx.run(wasm, &fun, resolved).await { Ok(output) => Ok(( output, instruction_ptr, invocation_ptr, receipt_meta, - additional_meta, - )), - Err(e) => Err(anyhow!("cannot execute wasm module: {e}")), + additional_meta)), + Err(err) => Err( + anyhow!("cannot execute wasm module: {err}")) + .with_context(|| { + format!("not able to run fn {fun} for cid: {instruction_ptr}, in workflow {workflow_cid}") + }), } }); handles.push(handle); } None => error!( + subject = "worker.run.task.err", + category = "worker.run", "no valid task/instruction-type referenced by operation: {}", instruction.op() ), @@ -381,11 +424,27 @@ where // Concurrently add handles to Runner's running set. running_tasks.append_or_insert(self.workflow_info.cid(), handles); - while let Some(res) = task_set.join_next().await { - let (executed, instruction_ptr, invocation_ptr, receipt_meta, add_meta) = res??; - let output_to_store = Ipld::try_from(executed)?; + let (executed, instruction_ptr, invocation_ptr, receipt_meta, add_meta) = match res + { + Ok(Ok(data)) => data, + Ok(Err(err)) => { + error!(subject = "worker.run.task.err", + category = "worker.run", + err=?err, + "error in running task"); + break; + } + Err(err) => { + error!(subject = "worker.run.task.err", + category = "worker.run", + err=?err, + "error in running task"); + break; + } + }; + let output_to_store = Ipld::try_from(executed)?; let invocation_receipt = InvocationReceipt::new( invocation_ptr, InstructionResult::Ok(output_to_store), @@ -414,6 +473,13 @@ where let stored_receipt = Db::commit_receipt(self.workflow_info.cid, receipt, &mut self.db.conn()?)?; + debug!( + subject = "db.commit_receipt", + category = "worker.run", + cid = self.workflow_info.cid.to_string(), + "commited to database" + ); + let _ = self .event_sender .send_async(Event::CapturedReceipt(Captured::with( @@ -462,6 +528,7 @@ mod test { let (tx, rx) = test_utils::event::setup_event_channel(settings.clone().node); let builder = WorkerBuilder::new(settings.node).with_event_sender(tx); + let fetch_fn = builder.fetch_fn(); let db = builder.db(); let worker = builder.build().await; let workflow_cid = worker.workflow_info.cid; @@ -481,7 +548,7 @@ mod test { let running_tasks = Arc::new(RunningTaskSet::new()); let worker_workflow_cid = worker.workflow_info.cid; - worker.run(running_tasks.clone()).await.unwrap(); + worker.run(running_tasks.clone(), fetch_fn).await.unwrap(); assert_eq!(running_tasks.len(), 1); assert!(running_tasks.contains_key(&worker_workflow_cid)); assert_eq!(running_tasks.get(&worker_workflow_cid).unwrap().len(), 2); @@ -579,6 +646,7 @@ mod test { let builder = WorkerBuilder::new(settings.node) .with_event_sender(tx) .with_tasks(vec![task1, task2]); + let fetch_fn = builder.fetch_fn(); let db = builder.db(); let workflow_cid = builder.workflow_cid(); @@ -623,15 +691,17 @@ mod test { let running_tasks = Arc::new(RunningTaskSet::new()); let worker_workflow_cid = worker.workflow_info.cid; - worker.run(running_tasks.clone()).await.unwrap(); + worker.run(running_tasks.clone(), fetch_fn).await.unwrap(); assert_eq!(running_tasks.len(), 1); assert!(running_tasks.contains_key(&worker_workflow_cid)); assert_eq!(running_tasks.get(&worker_workflow_cid).unwrap().len(), 1); // First receipt is a replay receipt. - let replay_msg = rx.recv_async().await.unwrap(); - - assert!(matches!(replay_msg, Event::ReplayReceipts(_))); + #[cfg(feature = "websocket-notify")] + { + let replay_msg = rx.recv_async().await.unwrap(); + assert!(matches!(replay_msg, Event::ReplayReceipts(_))); + } // we should have received 1 receipt let next_run_receipt = rx.recv_async().await.unwrap(); diff --git a/homestar-runtime/src/workflow.rs b/homestar-runtime/src/workflow.rs index fd2b40c7..b4669a9a 100644 --- a/homestar-runtime/src/workflow.rs +++ b/homestar-runtime/src/workflow.rs @@ -29,6 +29,7 @@ use itertools::Itertools; use libipld::{cbor::DagCborCodec, cid::Cid, prelude::Codec, serde::from_ipld, Ipld}; use serde::{Deserialize, Serialize}; use std::{collections::BTreeMap, path::Path}; +use tracing::debug; use url::Url; mod info; @@ -150,53 +151,74 @@ impl<'a> Builder<'a> { fn aot(self) -> anyhow::Result> { let lookup_table = self.lookup_table()?; + let (mut dag, unawaits, awaited, resources) = + self.into_inner().tasks().into_iter().enumerate().try_fold( + (Dag::default(), vec![], vec![], IndexMap::new()), + |(mut dag, mut unawaits, mut awaited, mut resources), (i, task)| { + let instr_cid = task.instruction_cid()?; + debug!( + subject = "task.instruction", + category = "aot.information", + "instruction cid of task: {}", + instr_cid + ); + + // Clone as we're owning the struct going backward. + let ptr: Pointer = Invocation::::from(task.clone()).try_into()?; + + let RunInstruction::Expanded(instr) = task.into_instruction() else { + bail!("workflow tasks/instructions must be expanded / inlined") + }; - let (dag, resources) = self.into_inner().tasks().into_iter().enumerate().try_fold( - (Dag::default(), IndexMap::new()), - |(mut dag, mut resources), (i, task)| { - let instr_cid = task.instruction_cid()?; - - // Clone as we're owning the struct going backward. - let ptr: Pointer = Invocation::::from(task.clone()).try_into()?; - - let RunInstruction::Expanded(instr) = task.into_instruction() else { - bail!("workflow tasks/instructions must be expanded / inlined") - }; - - resources - .entry(instr_cid) - .or_insert_with(|| vec![Resource::Url(instr.resource().to_owned())]); - - let parsed = instr.input().parse()?; - let reads = parsed - .args() - .deferreds() - .fold(vec![], |mut in_flow_reads, cid| { - if let Some(v) = lookup_table.get(&cid) { - in_flow_reads.push(*v) - } - // TODO: else, it's a Promise from another task outside - // of the workflow. - in_flow_reads - }); - - parsed.args().links().for_each(|cid| { resources .entry(instr_cid) - .and_modify(|prev_rscs| { - prev_rscs.push(Resource::Cid(cid.to_owned())); - }) - .or_insert_with(|| vec![Resource::Cid(cid.to_owned())]); - }); + .or_insert_with(|| vec![Resource::Url(instr.resource().to_owned())]); + let parsed = instr.input().parse()?; + let reads = parsed + .args() + .deferreds() + .fold(vec![], |mut in_flow_reads, cid| { + if let Some(v) = lookup_table.get(&cid) { + in_flow_reads.push(*v) + } + // TODO: else, it's a Promise from another task outside + // of the workflow. + in_flow_reads + }); + + parsed.args().links().for_each(|cid| { + resources + .entry(instr_cid) + .and_modify(|prev_rscs| { + prev_rscs.push(Resource::Cid(cid.to_owned())); + }) + .or_insert_with(|| vec![Resource::Cid(cid.to_owned())]); + }); - let node = Node::new(Vertex::new(instr.to_owned(), parsed, ptr)) - .with_name(instr_cid.to_string()) - .with_result(i); + let node = Node::new(Vertex::new(instr.to_owned(), parsed, ptr)) + .with_name(instr_cid.to_string()) + .with_result(i); - dag.add_node(node.with_reads(reads)); - Ok::<_, anyhow::Error>((dag, resources)) - }, - )?; + if !reads.is_empty() { + dag.add_node(node.with_reads(reads.clone())); + awaited.extend(reads); + } else { + unawaits.push(node); + } + + Ok::<_, anyhow::Error>((dag, unawaits, awaited, resources)) + }, + )?; + + for mut node in unawaits.clone().into_iter() { + if node.get_results().any(|r| awaited.contains(r)) { + dag.add_node(node); + } else { + // set barrier for non-awaited nodes + node.set_barrier(1); + dag.add_node(node); + } + } Ok(AOTContext { dag, @@ -266,13 +288,13 @@ impl IndexedResources { /// Iterate over all [Resource]s as references. #[allow(dead_code)] pub(crate) fn iter(&self) -> impl Iterator { - self.0.values().flatten().dedup() + self.0.values().flatten().unique() } /// Iterate over all [Resource]s. #[allow(dead_code)] pub(crate) fn into_iter(self) -> impl Iterator { - self.0.into_values().flatten().dedup() + self.0.into_values().flatten().unique() } } @@ -365,8 +387,15 @@ where mod test { use super::*; use homestar_core::{ + ipld::DagCbor, test_utils, - workflow::{config::Resources, instruction::RunInstruction, prf::UcanPrf, Task}, + workflow::{ + config::Resources, + instruction::RunInstruction, + pointer::{Await, AwaitResult}, + prf::UcanPrf, + Ability, Input, Task, + }, }; use std::path::Path; @@ -457,7 +486,7 @@ mod test { let (instruction1, instruction2, instruction3) = test_utils::workflow::related_wasm_instructions::(); let task1 = Task::new( - RunInstruction::Expanded(instruction1), + RunInstruction::Expanded(instruction1.clone()), config.clone().into(), UcanPrf::default(), ); @@ -475,17 +504,56 @@ mod test { let (instruction4, _) = test_utils::workflow::wasm_instruction_with_nonce::(); let task4 = Task::new( RunInstruction::Expanded(instruction4), + config.clone().into(), + UcanPrf::default(), + ); + + let (instruction5, _) = test_utils::workflow::wasm_instruction_with_nonce::(); + let task5 = Task::new( + RunInstruction::Expanded(instruction5), + config.clone().into(), + UcanPrf::default(), + ); + + let promise1 = Await::new( + Pointer::new(instruction1.clone().to_cid().unwrap()), + AwaitResult::Ok, + ); + + let dep_instr = Instruction::new( + instruction1.resource().to_owned(), + Ability::from("wasm/run"), + Input::::Ipld(Ipld::Map(BTreeMap::from([ + ("func".into(), Ipld::String("add_two".to_string())), + ( + "args".into(), + Ipld::List(vec![Ipld::try_from(promise1.clone()).unwrap()]), + ), + ]))), + ); + + let task6 = Task::new( + RunInstruction::Expanded(dep_instr), config.into(), UcanPrf::default(), ); - let tasks = vec![task1.clone(), task2.clone(), task3.clone(), task4.clone()]; + let tasks = vec![ + task6.clone(), + task1.clone(), + task2.clone(), + task3.clone(), + task4.clone(), + task5.clone(), + ]; let workflow = Workflow::new(tasks); let instr1 = task1.instruction_cid().unwrap().to_string(); let instr2 = task2.instruction_cid().unwrap().to_string(); let instr3 = task3.instruction_cid().unwrap().to_string(); let instr4 = task4.instruction_cid().unwrap().to_string(); + let instr5 = task5.instruction_cid().unwrap().to_string(); + let instr6 = task6.instruction_cid().unwrap().to_string(); let builder = Builder::new(workflow); let schedule = builder.graph().unwrap().schedule; @@ -508,11 +576,32 @@ mod test { assert!( nodes == vec![ - format!("{instr1}, {instr4}"), - instr2.clone(), - instr3.clone() + format!("{instr1}"), + format!("{instr6}, {instr2}"), + format!("{instr3}"), + format!("{instr4}, {instr5}") ] - || nodes == vec![format!("{instr4}, {instr1}"), instr2, instr3] + || nodes + == vec![ + format!("{instr1}"), + format!("{instr6}, {instr2}"), + format!("{instr3}"), + format!("{instr5}, {instr4}") + ] + || nodes + == vec![ + format!("{instr1}"), + format!("{instr2}, {instr6}"), + format!("{instr3}"), + format!("{instr4}, {instr5}") + ] + || nodes + == vec![ + format!("{instr1}"), + format!("{instr2}, {instr6}"), + format!("{instr3}"), + format!("{instr5}, {instr4}") + ] ); } } diff --git a/homestar-runtime/src/workflow/info.rs b/homestar-runtime/src/workflow/info.rs index a82f6e99..e4b7e351 100644 --- a/homestar-runtime/src/workflow/info.rs +++ b/homestar-runtime/src/workflow/info.rs @@ -1,6 +1,6 @@ use super::IndexedResources; use crate::{ - channel::{AsyncBoundedChannel, AsyncBoundedChannelSender}, + channel::{AsyncChannel, AsyncChannelSender}, db::{Connection, Database}, event_handler::{ event::QueryRecord, @@ -17,11 +17,13 @@ use faststr::FastStr; use homestar_core::{ipld::DagJson, workflow::Pointer}; use libipld::{cbor::DagCborCodec, prelude::Codec, serde::from_ipld, Cid, Ipld}; use serde::{Deserialize, Serialize}; -use std::{collections::BTreeMap, fmt, sync::Arc, time::Duration}; -use tokio::{ - runtime::Handle, - time::{self, Instant}, +use std::{ + collections::BTreeMap, + fmt, + sync::Arc, + time::{Duration, Instant}, }; +use tokio::runtime::Handle; use tracing::info; /// [Workflow] header tag, for sharing workflow information over libp2p. @@ -30,6 +32,7 @@ use tracing::info; pub const WORKFLOW_TAG: &str = "ipvm/workflow"; const CID_KEY: &str = "cid"; +const NAME_KEY: &str = "name"; const NUM_TASKS_KEY: &str = "num_tasks"; const PROGRESS_KEY: &str = "progress"; const PROGRESS_COUNT_KEY: &str = "progress_count"; @@ -41,11 +44,29 @@ const RESOURCES_KEY: &str = "resources"; #[derive(Debug, Clone, PartialEq, Queryable, Insertable, Identifiable, Selectable)] #[diesel(table_name = crate::db::schema::workflows, primary_key(cid))] pub struct Stored { + /// Wrapped-[Cid] of [Workflow]. + /// + /// [Workflow]: homestar_core::Workflow pub(crate) cid: Pointer, + /// Local name of [Workflow]. + /// + /// [Workflow]: homestar_core::Workflow pub(crate) name: Option, + /// Number of tasks in [Workflow]. + /// + /// [Workflow]: homestar_core::Workflow pub(crate) num_tasks: i32, + /// Map of [Instruction] [Cid]s to resources. + /// + /// [Instruction]: homestar_core::workflow::Instruction pub(crate) resources: IndexedResources, + /// Local timestamp of [Workflow] creation. + /// + /// [Workflow]: homestar_core::Workflow pub(crate) created_at: NaiveDateTime, + /// Local timestamp of [Workflow] completion. + /// + /// [Workflow]: homestar_core::Workflow pub(crate) completed_at: Option, } @@ -137,6 +158,7 @@ impl StoredReceipt { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct Info { pub(crate) cid: Cid, + pub(crate) name: Option, pub(crate) num_tasks: u32, pub(crate) progress: Vec, pub(crate) progress_count: u32, @@ -147,8 +169,11 @@ impl fmt::Display for Info { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, - "cid: {}, progress: {}/{}", - self.cid, self.progress_count, self.num_tasks + "cid: {}, local_name: {}, name, progress: {}/{}", + self.cid, + self.name.clone().unwrap_or(self.cid.to_string().into()), + self.progress_count, + self.num_tasks ) } } @@ -161,6 +186,7 @@ impl Info { let cid = stored.cid.cid(); Self { cid, + name: stored.name.map(|name| name.into()), num_tasks: stored.num_tasks as u32, progress, progress_count, @@ -173,6 +199,7 @@ impl Info { let cid = stored.cid.cid(); Self { cid, + name: stored.name.map(|name| name.into()), num_tasks: stored.num_tasks as u32, progress: vec![], progress_count: 0, @@ -254,7 +281,7 @@ impl Info { name: FastStr, resources: IndexedResources, p2p_timeout: Duration, - event_sender: Arc>, + event_sender: Arc>, mut conn: Connection, ) -> Result<(Self, NaiveDateTime)> { let timestamp = Utc::now().naive_utc(); @@ -266,21 +293,21 @@ impl Info { Ok((_, info)) => Ok((info, timestamp)), Err(_err) => { info!( + subject = "workflow.init.db.check", + category = "workflow", cid = workflow_cid.to_string(), "workflow information not available in the database" ); - let result = Db::store_workflow( - Stored::new( - Pointer::new(workflow_cid), - Some(name.into_string()), - workflow_len as i32, - resources, - timestamp, - ), - &mut conn, - )?; + let stored = Stored::new( + Pointer::new(workflow_cid), + Some(name.into_string()), + workflow_len as i32, + resources, + timestamp, + ); + let result = Db::store_workflow(stored.clone(), &mut conn)?; let workflow_info = Self::default(result); // spawn a task to retrieve the workflow info from the @@ -304,7 +331,7 @@ impl Info { pub(crate) async fn gather<'a>( workflow_cid: Cid, p2p_timeout: Duration, - event_sender: Arc>, + event_sender: Arc>, mut conn: Option, handle_timeout_fn: Option) -> Result>, ) -> Result { @@ -315,6 +342,8 @@ impl Info { Some((_name, workflow_info)) => Ok(workflow_info), None => { info!( + subject = "workflow.gather.db.check", + category = "workflow", cid = workflow_cid.to_string(), "workflow information not available in the database" ); @@ -336,11 +365,11 @@ impl Info { async fn retrieve_from_query<'a>( workflow_cid: Cid, p2p_timeout: Duration, - event_sender: Arc>, + event_sender: Arc>, conn: Option, handle_timeout_fn: Option) -> Result>, ) -> Result { - let (tx, rx) = AsyncBoundedChannel::oneshot(); + let (tx, rx) = AsyncChannel::oneshot(); event_sender .send_async(Event::FindRecord(QueryRecord::with( workflow_cid, @@ -349,8 +378,8 @@ impl Info { ))) .await?; - match time::timeout_at(Instant::now() + p2p_timeout, rx.recv_async()).await { - Ok(Ok(ResponseEvent::Found(Ok(FoundEvent::Workflow(workflow_info))))) => { + match rx.recv_deadline(Instant::now() + p2p_timeout) { + Ok(ResponseEvent::Found(Ok(FoundEvent::Workflow(workflow_info)))) => { // store workflow receipts from info, as we've already stored // the static information. if let Some(mut conn) = conn { @@ -359,13 +388,12 @@ impl Info { Ok(workflow_info) } - Ok(Ok(ResponseEvent::Found(Err(err)))) => { + Ok(ResponseEvent::Found(Err(err))) => { bail!("failure in attempting to find event: {err}") } - Ok(Ok(event)) => { + Ok(event) => { bail!("received unexpected event {event:?} for workflow {workflow_cid}") } - Ok(Err(err)) => bail!("failure in attempting to find workflow: {err}"), Err(err) => handle_timeout_fn .map(|f| f(workflow_cid, conn).context(err)) .unwrap_or(Err(anyhow!( @@ -379,6 +407,14 @@ impl From for Ipld { fn from(workflow: Info) -> Self { Ipld::Map(BTreeMap::from([ (CID_KEY.into(), Ipld::Link(workflow.cid)), + ( + NAME_KEY.into(), + workflow + .name + .as_ref() + .map(|name| name.to_string().into()) + .unwrap_or(Ipld::Null), + ), ( NUM_TASKS_KEY.into(), Ipld::Integer(workflow.num_tasks as i128), @@ -406,6 +442,13 @@ impl TryFrom for Info { .ok_or_else(|| anyhow!("no `cid` set"))? .to_owned(), )?; + let name = map + .get(NAME_KEY) + .and_then(|ipld| match ipld { + Ipld::Null => None, + ipld => Some(ipld), + }) + .and_then(|ipld| from_ipld(ipld.to_owned()).ok()); let num_tasks = from_ipld( map.get(NUM_TASKS_KEY) .ok_or_else(|| anyhow!("no `num_tasks` set"))? @@ -429,6 +472,7 @@ impl TryFrom for Info { Ok(Self { cid, + name, num_tasks, progress, progress_count, diff --git a/homestar-runtime/src/workflow/settings.rs b/homestar-runtime/src/workflow/settings.rs index dc5c0313..b5eae6de 100644 --- a/homestar-runtime/src/workflow/settings.rs +++ b/homestar-runtime/src/workflow/settings.rs @@ -7,10 +7,15 @@ use std::time::Duration; /// Workflow settings. #[derive(Debug, Clone, PartialEq)] pub struct Settings { + /// Number of retries for a given workflow. pub(crate) retries: u32, + /// Maximum delay between retries. pub(crate) retry_max_delay: Duration, + /// Initial delay between retries. pub(crate) retry_initial_delay: Duration, + /// Timeout for P2P/DHT operations. pub(crate) p2p_timeout: Duration, + /// Timeout for a given workflow. pub(crate) timeout: Duration, } @@ -18,10 +23,10 @@ pub struct Settings { impl Default for Settings { fn default() -> Self { Self { - retries: 10, + retries: 3, retry_max_delay: Duration::new(60, 0), retry_initial_delay: Duration::from_millis(500), - p2p_timeout: Duration::new(60, 0), + p2p_timeout: Duration::from_millis(500), timeout: Duration::new(3600, 0), } } @@ -31,11 +36,11 @@ impl Default for Settings { impl Default for Settings { fn default() -> Self { Self { - retries: 1, + retries: 0, retry_max_delay: Duration::new(1, 0), retry_initial_delay: Duration::from_millis(50), p2p_timeout: Duration::from_millis(10), - timeout: Duration::from_secs(120), + timeout: Duration::from_secs(3600), } } } diff --git a/homestar-runtime/tests/cli.rs b/homestar-runtime/tests/cli.rs index 0687aac5..e7fce91c 100644 --- a/homestar-runtime/tests/cli.rs +++ b/homestar-runtime/tests/cli.rs @@ -1,14 +1,15 @@ #[cfg(not(windows))] use crate::utils::kill_homestar_daemon; -use crate::utils::{kill_homestar, startup_ipfs, stop_all_bins, BIN_NAME}; +use crate::utils::{ + kill_homestar, remove_db, stop_homestar, wait_for_socket_connection, + wait_for_socket_connection_v6, BIN_NAME, +}; use anyhow::Result; use assert_cmd::prelude::*; use once_cell::sync::Lazy; use predicates::prelude::*; -use retry::{delay::Exponential, retry}; use serial_test::file_serial; use std::{ - net::{IpAddr, Ipv4Addr, Ipv6Addr, Shutdown, SocketAddr, TcpStream}, path::PathBuf, process::{Command, Stdio}, }; @@ -18,10 +19,7 @@ static BIN: Lazy = Lazy::new(|| assert_cmd::cargo::cargo_bin(BIN_NAME)) #[test] #[file_serial] fn test_help_serial() -> Result<()> { - let _ = stop_all_bins(); - - #[cfg(feature = "ipfs")] - let _ = startup_ipfs(); + let _ = stop_homestar(); Command::new(BIN.as_os_str()) .arg("help") @@ -45,7 +43,7 @@ fn test_help_serial() -> Result<()> { .stdout(predicate::str::contains("help")) .stdout(predicate::str::contains("version")); - let _ = stop_all_bins(); + let _ = stop_homestar(); Ok(()) } @@ -53,10 +51,7 @@ fn test_help_serial() -> Result<()> { #[test] #[file_serial] fn test_version_serial() -> Result<()> { - let _ = stop_all_bins(); - - #[cfg(feature = "ipfs")] - let _ = startup_ipfs(); + let _ = stop_homestar(); Command::new(BIN.as_os_str()) .arg("--version") @@ -68,7 +63,7 @@ fn test_version_serial() -> Result<()> { env!("CARGO_PKG_VERSION") ))); - let _ = stop_all_bins(); + let _ = stop_homestar(); Ok(()) } @@ -76,10 +71,7 @@ fn test_version_serial() -> Result<()> { #[test] #[file_serial] fn test_server_not_running_serial() -> Result<()> { - let _ = stop_all_bins(); - - #[cfg(feature = "ipfs")] - let _ = startup_ipfs(); + let _ = stop_homestar(); Command::new(BIN.as_os_str()) .arg("ping") @@ -111,7 +103,7 @@ fn test_server_not_running_serial() -> Result<()> { .or(predicate::str::contains("No connection could be made"))), ); - let _ = stop_all_bins(); + let _ = stop_homestar(); Ok(()) } @@ -119,35 +111,28 @@ fn test_server_not_running_serial() -> Result<()> { #[test] #[file_serial] fn test_server_serial() -> Result<()> { - let _ = stop_all_bins(); - - #[cfg(feature = "ipfs")] - let _ = startup_ipfs(); + const DB: &str = "test_server_serial.db"; + let _ = stop_homestar(); Command::new(BIN.as_os_str()) .arg("start") .arg("-db") - .arg("homestar.db") + .arg(DB) .assert() .failure(); - let mut homestar_proc = Command::new(BIN.as_os_str()) + let homestar_proc = Command::new(BIN.as_os_str()) .arg("start") .arg("-c") .arg("tests/fixtures/test_v6.toml") .arg("--db") - .arg("homestar.db") + .arg(DB) .stdout(Stdio::piped()) .spawn() .unwrap(); - let socket = SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), 9837); - let result = retry(Exponential::from_millis(1000).take(10), || { - TcpStream::connect(socket).map(|stream| stream.shutdown(Shutdown::Both)) - }); - - if result.is_err() { - homestar_proc.kill().unwrap(); + if wait_for_socket_connection_v6(9837, 1000).is_err() { + let _ = kill_homestar(homestar_proc, None); panic!("Homestar server/runtime failed to start in time"); } @@ -178,37 +163,31 @@ fn test_server_serial() -> Result<()> { let _ = Command::new(BIN.as_os_str()).arg("stop").output(); let _ = kill_homestar(homestar_proc, None); - let _ = stop_all_bins(); + let _ = stop_homestar(); + remove_db(DB); Ok(()) } -#[cfg(feature = "test-utils")] #[test] #[file_serial] fn test_workflow_run_serial() -> Result<()> { - let _ = stop_all_bins(); + const DB: &str = "test_workflow_run_serial.db"; - #[cfg(feature = "ipfs")] - let _ = startup_ipfs(); + let _ = stop_homestar(); - let mut homestar_proc = Command::new(BIN.as_os_str()) + let homestar_proc = Command::new(BIN.as_os_str()) .arg("start") .arg("-c") - .arg("tests/fixtures/test_workflow.toml") + .arg("tests/fixtures/test_workflow1.toml") .arg("--db") - .arg("homestar.db") + .arg(DB) .stdout(Stdio::piped()) .spawn() .unwrap(); - let socket = SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), 9840); - let result = retry(Exponential::from_millis(1000).take(10), || { - TcpStream::connect(socket).map(|stream| stream.shutdown(Shutdown::Both)) - }); - - if result.is_err() { - homestar_proc.kill().unwrap(); + if wait_for_socket_connection_v6(9840, 1000).is_err() { + let _ = kill_homestar(homestar_proc, None); panic!("Homestar server/runtime failed to start in time"); } @@ -221,10 +200,7 @@ fn test_workflow_run_serial() -> Result<()> { .assert() .success() .stdout(predicate::str::contains( - "bafyrmibcfltf6vhtfdson5z4av4r4wg3rccpt4hxajt54msacojeecazqy", - )) - .stdout(predicate::str::contains( - "ipfs://bafybeiabbxwf2vn4j3zm7bbojr6rt6k7o6cg6xcbhqkllubmsnvocpv7y4", + "ipfs://bafybeig6u35v6t3f4j3zgz2jvj4erd45fbkeolioaddu3lmu6uxm3ilb7a", )) .stdout(predicate::str::contains("num_tasks")) .stdout(predicate::str::contains("progress_count")); @@ -239,18 +215,15 @@ fn test_workflow_run_serial() -> Result<()> { .assert() .success() .stdout(predicate::str::contains( - "bafyrmibcfltf6vhtfdson5z4av4r4wg3rccpt4hxajt54msacojeecazqy", - )) - .stdout(predicate::str::contains( - "ipfs://bafybeiabbxwf2vn4j3zm7bbojr6rt6k7o6cg6xcbhqkllubmsnvocpv7y4", + "ipfs://bafybeig6u35v6t3f4j3zgz2jvj4erd45fbkeolioaddu3lmu6uxm3ilb7a", )) .stdout(predicate::str::contains("num_tasks")) .stdout(predicate::str::contains("progress_count")); let _ = Command::new(BIN.as_os_str()).arg("stop").output(); - let _ = kill_homestar(homestar_proc, None); - let _ = stop_all_bins(); + let _ = stop_homestar(); + remove_db(DB); Ok(()) } @@ -259,27 +232,20 @@ fn test_workflow_run_serial() -> Result<()> { #[file_serial] #[cfg(not(windows))] fn test_daemon_serial() -> Result<()> { - let _ = stop_all_bins(); - - #[cfg(feature = "ipfs")] - let _ = startup_ipfs(); + const DB: &str = "test_daemon_serial.db"; + let _ = stop_homestar(); Command::new(BIN.as_os_str()) .arg("start") .arg("-c") - .arg("tests/fixtures/test_v4_alt.toml") + .arg("tests/fixtures/test_v4.toml") .arg("-d") - .env("DATABASE_URL", "homestar.db") + .env("DATABASE_URL", DB) .stdout(Stdio::piped()) .assert() .success(); - let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 9836); - let result = retry(Exponential::from_millis(1000).take(10), || { - TcpStream::connect(socket).map(|stream| stream.shutdown(Shutdown::Both)) - }); - - if result.is_err() { + if wait_for_socket_connection(9000, 1000).is_err() { panic!("Homestar server/runtime failed to start in time"); } @@ -288,87 +254,37 @@ fn test_daemon_serial() -> Result<()> { .arg("--host") .arg("127.0.0.1") .arg("-p") - .arg("9836") + .arg("9000") .assert() .success() .stdout(predicate::str::contains("127.0.0.1")) .stdout(predicate::str::contains("pong")); - let _ = stop_all_bins(); + let _ = stop_homestar(); let _ = kill_homestar_daemon(); + remove_db(DB); Ok(()) } #[test] #[file_serial] -#[cfg(windows)] -fn test_signal_kill_serial() -> Result<()> { - let _ = stop_all_bins(); - - #[cfg(feature = "ipfs")] - let _ = startup_ipfs(); - - let mut homestar_proc = Command::new(BIN.as_os_str()) - .arg("start") - .arg("--db") - .arg("homestar.db") - .stdout(Stdio::piped()) - .spawn() - .unwrap(); - - let socket = SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), 3030); - let result = retry(Exponential::from_millis(1000).take(10), || { - TcpStream::connect(socket).map(|stream| stream.shutdown(Shutdown::Both)) - }); - - if result.is_err() { - panic!("Homestar server/runtime failed to start in time"); - } - - Command::new(BIN.as_os_str()) - .arg("ping") - .assert() - .success() - .stdout(predicate::str::contains("::1")) - .stdout(predicate::str::contains("pong")); - - let _ = Command::new(BIN.as_os_str()).arg("stop").output(); - let _ = kill_homestar(homestar_proc, None); - - Command::new(BIN.as_os_str()).arg("ping").assert().failure(); - - let _ = stop_all_bins(); - - Ok(()) -} - -#[test] -#[file_serial] -#[cfg(windows)] fn test_server_v4_serial() -> Result<()> { - let _ = stop_all_bins(); + const DB: &str = "test_server_v4_serial.db"; + let _ = stop_homestar(); - #[cfg(feature = "ipfs")] - let _ = startup_ipfs(); - - let mut homestar_proc = Command::new(BIN.as_os_str()) + let homestar_proc = Command::new(BIN.as_os_str()) .arg("start") .arg("-c") .arg("tests/fixtures/test_v4.toml") .arg("--db") - .arg("homestar.db") + .arg(DB) .stdout(Stdio::piped()) .spawn() .unwrap(); - let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 9835); - let result = retry(Exponential::from_millis(1000).take(10), || { - TcpStream::connect(socket).map(|stream| stream.shutdown(Shutdown::Both)) - }); - - if result.is_err() { - homestar_proc.kill().unwrap(); + if wait_for_socket_connection(9000, 1000).is_err() { + let _ = kill_homestar(homestar_proc, None); panic!("Homestar server/runtime failed to start in time"); } @@ -377,61 +293,16 @@ fn test_server_v4_serial() -> Result<()> { .arg("--host") .arg("127.0.0.1") .arg("-p") - .arg("9835") + .arg("9000") .assert() .success() .stdout(predicate::str::contains("127.0.0.1")) .stdout(predicate::str::contains("pong")); let _ = Command::new(BIN.as_os_str()).arg("stop").output(); - let _ = kill_homestar(homestar_proc, None); - let _ = stop_all_bins(); - - Ok(()) -} - -#[test] -#[file_serial] -#[cfg(not(windows))] -fn test_daemon_v4_serial() -> Result<()> { - let _ = stop_all_bins(); - - #[cfg(feature = "ipfs")] - let _ = startup_ipfs(); - - Command::new(BIN.as_os_str()) - .arg("start") - .arg("-c") - .arg("tests/fixtures/test_v4.toml") - .arg("-d") - .env("DATABASE_URL", "homestar.db") - .stdout(Stdio::piped()) - .assert() - .success(); - - let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 9835); - let result = retry(Exponential::from_millis(1000).take(10), || { - TcpStream::connect(socket).map(|stream| stream.shutdown(Shutdown::Both)) - }); - - if result.is_err() { - panic!("Homestar server/runtime failed to start in time"); - } - - Command::new(BIN.as_os_str()) - .arg("ping") - .arg("--host") - .arg("127.0.0.1") - .arg("-p") - .arg("9835") - .assert() - .success() - .stdout(predicate::str::contains("127.0.0.1")) - .stdout(predicate::str::contains("pong")); - - let _ = stop_all_bins(); - let _ = kill_homestar_daemon(); + let _ = stop_homestar(); + remove_db(DB); Ok(()) } diff --git a/homestar-runtime/tests/fixtures/test-workflow-add-one.json b/homestar-runtime/tests/fixtures/test-workflow-add-one.json index 42dc847d..5b83a711 100644 --- a/homestar-runtime/tests/fixtures/test-workflow-add-one.json +++ b/homestar-runtime/tests/fixtures/test-workflow-add-one.json @@ -17,7 +17,7 @@ }, "nnc": "", "op": "wasm/run", - "rsc": "ipfs://bafybeiabbxwf2vn4j3zm7bbojr6rt6k7o6cg6xcbhqkllubmsnvocpv7y4" + "rsc": "ipfs://bafybeig6u35v6t3f4j3zgz2jvj4erd45fbkeolioaddu3lmu6uxm3ilb7a" } }, { @@ -33,7 +33,7 @@ "args": [ { "await/ok": { - "/": "bafyrmig5jivpubiljl26w5qc4om2rxbya6h43ljanotrvp2b2opux6gtbe" + "/": "bafyrmie3vqqilbt6ghxqkvzieials254hwqr23fl6ecktt4kdmftbxejfu" } } ], @@ -41,7 +41,7 @@ }, "nnc": "", "op": "wasm/run", - "rsc": "ipfs://bafybeiabbxwf2vn4j3zm7bbojr6rt6k7o6cg6xcbhqkllubmsnvocpv7y4" + "rsc": "ipfs://bafybeig6u35v6t3f4j3zgz2jvj4erd45fbkeolioaddu3lmu6uxm3ilb7a" } } ] diff --git a/homestar-runtime/tests/fixtures/test-workflow-image-pipeline.json b/homestar-runtime/tests/fixtures/test-workflow-image-pipeline.json new file mode 100644 index 00000000..a9362b96 --- /dev/null +++ b/homestar-runtime/tests/fixtures/test-workflow-image-pipeline.json @@ -0,0 +1,75 @@ +{ + "tasks": [ + { + "cause": null, + "meta": { + "memory": 4294967296, + "time": 100000 + }, + "prf": [], + "run": { + "input": { + "args": [ + { + "/": "bafybeiejevluvtoevgk66plh5t6xiy3ikyuuxg3vgofuvpeckb6eadresm" + }, + 150, + 350, + 500, + 500 + ], + "func": "crop" + }, + "nnc": "", + "op": "wasm/run", + "rsc": "ipfs://bafybeig6u35v6t3f4j3zgz2jvj4erd45fbkeolioaddu3lmu6uxm3ilb7a" + } + }, + { + "cause": null, + "meta": { + "memory": 4294967296, + "time": 100000 + }, + "prf": [], + "run": { + "input": { + "args": [ + { + "await/ok": { + "/": "bafyrmiaadxb2oauwkak5ugyvgmi4jtw5bck2e3rvoznlegahniv654b3l4" + } + } + ], + "func": "rotate90" + }, + "nnc": "", + "op": "wasm/run", + "rsc": "ipfs://bafybeig6u35v6t3f4j3zgz2jvj4erd45fbkeolioaddu3lmu6uxm3ilb7a" + } + }, + { + "cause": null, + "meta": { + "memory": 4294967296, + "time": 100000 + }, + "prf": [], + "run": { + "input": { + "args": [ + { + "await/ok": { + "/": "bafyrmigat3k3pbwavivjg3fldsnlwaxpznechmuibnhhldfhfiwmbnbyrq" + } + } + ], + "func": "grayscale" + }, + "nnc": "", + "op": "wasm/run", + "rsc": "ipfs://bafybeig6u35v6t3f4j3zgz2jvj4erd45fbkeolioaddu3lmu6uxm3ilb7a" + } + } + ] +} diff --git a/homestar-runtime/tests/fixtures/test-workflow-jco.json b/homestar-runtime/tests/fixtures/test-workflow-jco.json new file mode 100644 index 00000000..abc2d206 --- /dev/null +++ b/homestar-runtime/tests/fixtures/test-workflow-jco.json @@ -0,0 +1,44 @@ +{ + "tasks": [ + { + "cause": null, + "meta": { + "memory": 4294967296, + "time": 100000 + }, + "prf": [], + "run": { + "input": { + "args": ["helloworld", 10], + "func": "sum" + }, + "nnc": "", + "op": "wasm/run", + "rsc": "ipfs://bafybeibawnb3pytqmky4ph37hj7y7qosqcneofjextqq55zhxfniiletfu" + } + }, + { + "cause": null, + "meta": { + "memory": 4294967296, + "time": 100000 + }, + "prf": [], + "run": { + "input": { + "args": [ + { + "await/ok": { + "/": "bafyrmigltroc4nrdaskjselusvfcuooc52k2rl7nmrf72edbauyptmx5bq" + } + } + ], + "func": "hashbytes" + }, + "nnc": "", + "op": "wasm/run", + "rsc": "ipfs://bafybeifrgndv3blrxucig2pei6uaoyiyipk5vvzds7ps47ec6pzf4ax2d4" + } + } + ] +} diff --git a/homestar-runtime/tests/fixtures/test-workflow-no-awaits1.json b/homestar-runtime/tests/fixtures/test-workflow-no-awaits1.json new file mode 100644 index 00000000..b8569a09 --- /dev/null +++ b/homestar-runtime/tests/fixtures/test-workflow-no-awaits1.json @@ -0,0 +1,29 @@ +{ + "tasks": [ + { + "cause": null, + "meta": { + "memory": 4294967296, + "time": 100000 + }, + "prf": [], + "run": { + "input": { + "args": [ + { + "/": "bafybeiejevluvtoevgk66plh5t6xiy3ikyuuxg3vgofuvpeckb6eadresm" + }, + 150, + 150, + 100, + 100 + ], + "func": "crop" + }, + "nnc": "", + "op": "wasm/run", + "rsc": "ipfs://bafybeig6u35v6t3f4j3zgz2jvj4erd45fbkeolioaddu3lmu6uxm3ilb7a" + } + } + ] +} diff --git a/homestar-runtime/tests/fixtures/test-workflow-no-awaits2.json b/homestar-runtime/tests/fixtures/test-workflow-no-awaits2.json new file mode 100644 index 00000000..e82e7f63 --- /dev/null +++ b/homestar-runtime/tests/fixtures/test-workflow-no-awaits2.json @@ -0,0 +1,54 @@ +{ + "tasks": [ + { + "cause": null, + "meta": { + "memory": 4294967296, + "time": 100000 + }, + "prf": [], + "run": { + "input": { + "args": [ + { + "/": "bafybeiejevluvtoevgk66plh5t6xiy3ikyuuxg3vgofuvpeckb6eadresm" + }, + 10, + 10, + 101, + 100 + ], + "func": "crop" + }, + "nnc": "", + "op": "wasm/run", + "rsc": "ipfs://bafybeig6u35v6t3f4j3zgz2jvj4erd45fbkeolioaddu3lmu6uxm3ilb7a" + } + }, + { + "cause": null, + "meta": { + "memory": 4294967296, + "time": 100000 + }, + "prf": [], + "run": { + "input": { + "args": [ + { + "/": "bafybeiejevluvtoevgk66plh5t6xiy3ikyuuxg3vgofuvpeckb6eadresm" + }, + 150, + 150, + 100, + 100 + ], + "func": "crop" + }, + "nnc": "", + "op": "wasm/run", + "rsc": "ipfs://bafybeig6u35v6t3f4j3zgz2jvj4erd45fbkeolioaddu3lmu6uxm3ilb7a" + } + } + ] +} diff --git a/homestar-runtime/tests/fixtures/test_gossip1.toml b/homestar-runtime/tests/fixtures/test_gossip1.toml new file mode 100644 index 00000000..a8500685 --- /dev/null +++ b/homestar-runtime/tests/fixtures/test_gossip1.toml @@ -0,0 +1,30 @@ +[node] + +[node.monitoring] +process_collector_interval = 500 +console_subscriber_port = 5550 + +# Peer ID 12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN +[node.network.keypair_config] +existing = { key_type = "ed25519", path = "./fixtures/__testkey_ed25519.pem" } + +[node.network.libp2p] +listen_address = "/ip4/127.0.0.1/tcp/7020" +node_addresses = [ + "/ip4/127.0.0.1/tcp/7021/p2p/16Uiu2HAm3g9AomQNeEctL2hPwLapap7AtPSNt8ZrBny4rLx1W5Dc", +] + +[node.network.libp2p.mdns] +enable = false + +[node.network.libp2p.rendezvous] +enable_client = false + +[node.network.metrics] +port = 3990 + +[node.network.rpc] +port = 9790 + +[node.network.webserver] +port = 7990 diff --git a/homestar-runtime/tests/fixtures/test_gossip2.toml b/homestar-runtime/tests/fixtures/test_gossip2.toml new file mode 100644 index 00000000..ccc9e9c3 --- /dev/null +++ b/homestar-runtime/tests/fixtures/test_gossip2.toml @@ -0,0 +1,30 @@ +[node] + +[node.monitoring] +process_collector_interval = 500 +console_subscriber_port = 5551 + +# Peer ID 16Uiu2HAm3g9AomQNeEctL2hPwLapap7AtPSNt8ZrBny4rLx1W5Dc +[node.network.keypair_config] +existing = { key_type = "secp256k1", path = "./fixtures/__testkey_secp256k1.der" } + +[node.network.libp2p] +listen_address = "/ip4/127.0.0.1/tcp/7021" +node_addresses = [ + "/ip4/127.0.0.1/tcp/7020/p2p/12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN", +] + +[node.network.libp2p.mdns] +enable = false + +[node.network.metrics] +port = 3991 + +[node.network.libp2p.rendezvous] +enable_client = false + +[node.network.rpc] +port = 9791 + +[node.network.webserver] +port = 7991 diff --git a/homestar-runtime/tests/fixtures/test_mdns1.toml b/homestar-runtime/tests/fixtures/test_mdns1.toml index 30e9a742..4a0ad334 100644 --- a/homestar-runtime/tests/fixtures/test_mdns1.toml +++ b/homestar-runtime/tests/fixtures/test_mdns1.toml @@ -1,16 +1,24 @@ -[monitoring] -process_collector_interval = 500 -metrics_port = 4001 -console_subscriber_port = 5560 - [node] -[node.network] -rpc_port = 9800 -websocket_port = 8000 -listen_address = "/ip4/0.0.0.0/tcp/0" -enable_rendezvous_client = false +[node.monitoring] +process_collector_interval = 500 +console_subscriber_port = 5560 # Peer ID 12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN [node.network.keypair_config] existing = { key_type = "ed25519", path = "./fixtures/__testkey_ed25519.pem" } + +[node.network.libp2p] +listen_address = "/ip4/0.0.0.0/tcp/0" + +[node.network.libp2p.rendezvous] +enable_client = false + +[node.network.metrics] +port = 4001 + +[node.network.rpc] +port = 9800 + +[node.network.webserver] +port = 8000 diff --git a/homestar-runtime/tests/fixtures/test_mdns2.toml b/homestar-runtime/tests/fixtures/test_mdns2.toml index 1eb3a72b..0aaea51a 100644 --- a/homestar-runtime/tests/fixtures/test_mdns2.toml +++ b/homestar-runtime/tests/fixtures/test_mdns2.toml @@ -1,16 +1,24 @@ -[monitoring] -process_collector_interval = 500 -metrics_port = 4002 -console_subscriber_port = 5561 - [node] -[node.network] -rpc_port = 9801 -websocket_port = 8001 -listen_address = "/ip4/0.0.0.0/tcp/0" -enable_rendezvous_client = false +[node.monitoring] +process_collector_interval = 500 +console_subscriber_port = 5561 # Peer ID 16Uiu2HAm3g9AomQNeEctL2hPwLapap7AtPSNt8ZrBny4rLx1W5Dc [node.network.keypair_config] existing = { key_type = "secp256k1", path = "./fixtures/__testkey_secp256k1.der" } + +[node.network.libp2p] +listen_address = "/ip4/0.0.0.0/tcp/0" + +[node.network.libp2p.rendezvous] +enable_client = false + +[node.network.metrics] +port = 4002 + +[node.network.rpc] +port = 9801 + +[node.network.webserver] +port = 8001 diff --git a/homestar-runtime/tests/fixtures/test_metrics.toml b/homestar-runtime/tests/fixtures/test_metrics.toml index 9c066443..718d8511 100644 --- a/homestar-runtime/tests/fixtures/test_metrics.toml +++ b/homestar-runtime/tests/fixtures/test_metrics.toml @@ -1,14 +1,18 @@ -[monitoring] -process_collector_interval = 500 -metrics_port = 4020 -console_subscriber_port = 5570 - [node] -[node.network] -rpc_port = 9810 -websocket_port = 8010 +[node.monitoring] +process_collector_interval = 500 +console_subscriber_port = 5570 # Peer ID 12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN [node.network.keypair_config] existing = { key_type = "ed25519", path = "./fixtures/__testkey_ed25519.pem" } + +[node.network.metrics] +port = 4020 + +[node.network.rpc] +port = 9810 + +[node.network.webserver] +port = 8010 diff --git a/homestar-runtime/tests/fixtures/test_network1.toml b/homestar-runtime/tests/fixtures/test_network1.toml index f3db1601..6c59eaaa 100644 --- a/homestar-runtime/tests/fixtures/test_network1.toml +++ b/homestar-runtime/tests/fixtures/test_network1.toml @@ -1,20 +1,30 @@ -[monitoring] +[node] + +[node.monitoring] process_collector_interval = 500 -metrics_port = 4030 console_subscriber_port = 5580 -[node] +# Peer ID 12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN +[node.network.keypair_config] +existing = { key_type = "ed25519", path = "./fixtures/__testkey_ed25519.pem" } -[node.network] -rpc_port = 9820 -websocket_port = 8020 +[node.network.libp2p] listen_address = "/ip4/127.0.0.1/tcp/7000" node_addresses = [ "/ip4/127.0.0.1/tcp/7001/p2p/16Uiu2HAm3g9AomQNeEctL2hPwLapap7AtPSNt8ZrBny4rLx1W5Dc", ] -enable_mdns = false -enable_rendezvous_client = false -# Peer ID 12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN -[node.network.keypair_config] -existing = { key_type = "ed25519", path = "./fixtures/__testkey_ed25519.pem" } +[node.network.libp2p.mdns] +enable = false + +[node.network.libp2p.rendezvous] +enable_client = false + +[node.network.metrics] +port = 4030 + +[node.network.rpc] +port = 9820 + +[node.network.webserver] +port = 8020 diff --git a/homestar-runtime/tests/fixtures/test_network2.toml b/homestar-runtime/tests/fixtures/test_network2.toml index f2081fbf..489142c2 100644 --- a/homestar-runtime/tests/fixtures/test_network2.toml +++ b/homestar-runtime/tests/fixtures/test_network2.toml @@ -1,20 +1,30 @@ -[monitoring] +[node] + +[node.monitoring] process_collector_interval = 500 -metrics_port = 4031 console_subscriber_port = 5581 -[node] +# Peer ID 16Uiu2HAm3g9AomQNeEctL2hPwLapap7AtPSNt8ZrBny4rLx1W5Dc +[node.network.keypair_config] +existing = { key_type = "secp256k1", path = "./fixtures/__testkey_secp256k1.der" } -[node.network] -rpc_port = 9821 -websocket_port = 8021 +[node.network.libp2p] listen_address = "/ip4/127.0.0.1/tcp/7001" node_addresses = [ "/ip4/127.0.0.1/tcp/7000/p2p/12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN", ] -enable_mdns = false -enable_rendezvous_client = false -# Peer ID 16Uiu2HAm3g9AomQNeEctL2hPwLapap7AtPSNt8ZrBny4rLx1W5Dc -[node.network.keypair_config] -existing = { key_type = "secp256k1", path = "./fixtures/__testkey_secp256k1.der" } +[node.network.libp2p.mdns] +enable = false + +[node.network.libp2p.rendezvous] +enable_client = false + +[node.network.metrics] +port = 4031 + +[node.network.rpc] +port = 9821 + +[node.network.webserver] +port = 8021 diff --git a/homestar-runtime/tests/fixtures/test_notification1.toml b/homestar-runtime/tests/fixtures/test_notification1.toml new file mode 100644 index 00000000..212b37d8 --- /dev/null +++ b/homestar-runtime/tests/fixtures/test_notification1.toml @@ -0,0 +1,30 @@ +[node] + +[node.monitoring] +process_collector_interval = 500 +console_subscriber_port = 5582 + +# Peer ID 12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN +[node.network.keypair_config] +existing = { key_type = "ed25519", path = "./fixtures/__testkey_ed25519.pem" } + +[node.network.libp2p] +listen_address = "/ip4/127.0.0.1/tcp/7010" +node_addresses = [ + "/ip4/127.0.0.1/tcp/7011/p2p/16Uiu2HAm3g9AomQNeEctL2hPwLapap7AtPSNt8ZrBny4rLx1W5Dc", +] + +[node.network.libp2p.mdns] +enable = false + +[node.network.rendezvous] +enable_client = false + +[node.network.metrics] +port = 4032 + +[node.network.rpc] +port = 9822 + +[node.network.webserver] +port = 8022 diff --git a/homestar-runtime/tests/fixtures/test_notification2.toml b/homestar-runtime/tests/fixtures/test_notification2.toml new file mode 100644 index 00000000..362ecdc2 --- /dev/null +++ b/homestar-runtime/tests/fixtures/test_notification2.toml @@ -0,0 +1,30 @@ +[node] + +[node.monitoring] +process_collector_interval = 500 +console_subscriber_port = 5583 + +# Peer ID 16Uiu2HAm3g9AomQNeEctL2hPwLapap7AtPSNt8ZrBny4rLx1W5Dc +[node.network.keypair_config] +existing = { key_type = "secp256k1", path = "./fixtures/__testkey_secp256k1.der" } + +[node.network.libp2p] +listen_address = "/ip4/127.0.0.1/tcp/7011" +node_addresses = [ + "/ip4/127.0.0.1/tcp/7010/p2p/12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN", +] + +[node.network.libp2p.mdns] +enable = false + +[node.network.libp2p.rendezvous] +enable_client = false + +[node.network.metrics] +port = 4033 + +[node.network.rpc] +port = 9823 + +[node.network.webserver] +port = 8023 diff --git a/homestar-runtime/tests/fixtures/test_rendezvous1.toml b/homestar-runtime/tests/fixtures/test_rendezvous1.toml index 6070f8dd..d80d6359 100644 --- a/homestar-runtime/tests/fixtures/test_rendezvous1.toml +++ b/homestar-runtime/tests/fixtures/test_rendezvous1.toml @@ -1,17 +1,27 @@ -[monitoring] -process_collector_interval = 500 -metrics_port = 4035 -console_subscriber_port = 5585 - [node] -[node.network] -rpc_port = 9825 -websocket_port = 8025 -listen_address = "/ip4/127.0.0.1/tcp/7000" -enable_rendezvous_server = true -enable_mdns = false +[node.monitoring] +process_collector_interval = 500 +console_subscriber_port = 5585 # Peer ID 12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN [node.network.keypair_config] existing = { key_type = "ed25519", path = "./fixtures/__testkey_ed25519.pem" } + +[node.network.libp2p] +listen_address = "/ip4/127.0.0.1/tcp/7000" + +[node.network.libp2p.mdns] +enable = false + +[node.network.libp2p.rendezvous] +enable_server = true + +[node.network.metrics] +port = 4034 + +[node.network.rpc] +port = 9824 + +[node.network.webserver] +port = 8024 diff --git a/homestar-runtime/tests/fixtures/test_rendezvous2.toml b/homestar-runtime/tests/fixtures/test_rendezvous2.toml index 94fa3109..0f96954d 100644 --- a/homestar-runtime/tests/fixtures/test_rendezvous2.toml +++ b/homestar-runtime/tests/fixtures/test_rendezvous2.toml @@ -1,13 +1,14 @@ -[monitoring] +[node] + +[node.monitoring] process_collector_interval = 500 -metrics_port = 4036 console_subscriber_port = 5586 -[node] +# Peer ID 16Uiu2HAm3g9AomQNeEctL2hPwLapap7AtPSNt8ZrBny4rLx1W5Dc +[node.network.keypair_config] +existing = { key_type = "secp256k1", path = "./fixtures/__testkey_secp256k1.der" } -[node.network] -rpc_port = 9826 -websocket_port = 8026 +[node.network.libp2p] listen_address = "/ip4/127.0.0.1/tcp/7001" announce_addresses = [ "/ip4/127.0.0.1/tcp/7001/p2p/16Uiu2HAm3g9AomQNeEctL2hPwLapap7AtPSNt8ZrBny4rLx1W5Dc", @@ -15,8 +16,15 @@ announce_addresses = [ node_addresses = [ "/ip4/127.0.0.1/tcp/7000/p2p/12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN", ] -enable_mdns = false -# Peer ID 16Uiu2HAm3g9AomQNeEctL2hPwLapap7AtPSNt8ZrBny4rLx1W5Dc -[node.network.keypair_config] -existing = { key_type = "secp256k1", path = "./fixtures/__testkey_secp256k1.der" } +[node.network.libp2p.mdns] +enable = false + +[node.network.metrics] +port = 4036 + +[node.network.rpc] +port = 9826 + +[node.network.webserver] +port = 8026 diff --git a/homestar-runtime/tests/fixtures/test_rendezvous3.toml b/homestar-runtime/tests/fixtures/test_rendezvous3.toml index 31553836..dc2c2bb0 100644 --- a/homestar-runtime/tests/fixtures/test_rendezvous3.toml +++ b/homestar-runtime/tests/fixtures/test_rendezvous3.toml @@ -1,19 +1,27 @@ -[monitoring] +[node] + +[node.monitoring] process_collector_interval = 500 -metrics_port = 4037 console_subscriber_port = 5587 -[node] +# Peer ID 12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5 +[node.network.keypair_config] +existing = { key_type = "ed25519", path = "./fixtures/__testkey_ed25519_2.pem" } -[node.network] -rpc_port = 9827 -websocket_port = 8027 +[node.network.libp2p] listen_address = "/ip4/127.0.0.1/tcp/7002" node_addresses = [ "/ip4/127.0.0.1/tcp/7000/p2p/12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN", ] -enable_mdns = false -# Peer ID 12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5 -[node.network.keypair_config] -existing = { key_type = "ed25519", path = "./fixtures/__testkey_ed25519_2.pem" } +[node.network.libp2p.mdns] +enable = false + +[node.network.metrics] +port = 4037 + +[node.network.rpc] +port = 9827 + +[node.network.webserver] +port = 8027 diff --git a/homestar-runtime/tests/fixtures/test_rendezvous4.toml b/homestar-runtime/tests/fixtures/test_rendezvous4.toml index c09f5741..af354bfe 100644 --- a/homestar-runtime/tests/fixtures/test_rendezvous4.toml +++ b/homestar-runtime/tests/fixtures/test_rendezvous4.toml @@ -1,13 +1,14 @@ -[monitoring] +[node] + +[node.monitoring] process_collector_interval = 500 -metrics_port = 4038 console_subscriber_port = 5588 -[node] +# Peer ID 12D3KooWJWoaqZhDaoEFshF7Rh1bpY9ohihFhzcW6d69Lr2NASuq +[node.network.keypair_config] +existing = { key_type = "ed25519", path = "./fixtures/__testkey_ed25519_3.pem" } -[node.network] -rpc_port = 9828 -websocket_port = 8028 +[node.network.libp2p] listen_address = "/ip4/127.0.0.1/tcp/7003" announce_addresses = [ "/ip4/127.0.0.1/tcp/7003/p2p/12D3KooWJWoaqZhDaoEFshF7Rh1bpY9ohihFhzcW6d69Lr2NASuq", @@ -15,9 +16,18 @@ announce_addresses = [ node_addresses = [ "/ip4/127.0.0.1/tcp/7000/p2p/12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN", ] -rendezvous_registration_ttl = 1 -enable_mdns = false -# Peer ID 12D3KooWJWoaqZhDaoEFshF7Rh1bpY9ohihFhzcW6d69Lr2NASuq -[node.network.keypair_config] -existing = { key_type = "ed25519", path = "./fixtures/__testkey_ed25519_3.pem" } +[node.network.libp2p.mdns] +enable = false + +[node.network.libp2p.rendezvous] +registration_ttl = 1 + +[node.network.metrics] +port = 4038 + +[node.network.rpc] +port = 9828 + +[node.network.webserver] +port = 8028 diff --git a/homestar-runtime/tests/fixtures/test_rendezvous5.toml b/homestar-runtime/tests/fixtures/test_rendezvous5.toml index 876bf37d..79048fc3 100644 --- a/homestar-runtime/tests/fixtures/test_rendezvous5.toml +++ b/homestar-runtime/tests/fixtures/test_rendezvous5.toml @@ -1,20 +1,30 @@ -[monitoring] +[node] + +[node.monitoring] process_collector_interval = 500 -metrics_port = 4039 console_subscriber_port = 5589 -[node] +# Peer ID 12D3KooWRndVhVZPCiQwHBBBdg769GyrPUW13zxwqQyf9r3ANaba +[node.network.keypair_config] +existing = { key_type = "ed25519", path = "./fixtures/__testkey_ed25519_4.pem" } -[node.network] -rpc_port = 9829 -websocket_port = 8029 +[node.network.libp2p] listen_address = "/ip4/127.0.0.1/tcp/7004" node_addresses = [ "/ip4/127.0.0.1/tcp/7000/p2p/12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN", ] -rendezvous_discovery_interval = 1 -enable_mdns = false -# Peer ID 12D3KooWRndVhVZPCiQwHBBBdg769GyrPUW13zxwqQyf9r3ANaba -[node.network.keypair_config] -existing = { key_type = "ed25519", path = "./fixtures/__testkey_ed25519_4.pem" } +[node.network.libp2p.mdns] +enable = false + +[node.network.libp2p.rendezvous] +discovery_interval = 1 + +[node.network.metrics] +port = 4039 + +[node.network.rpc] +port = 9829 + +[node.network.webserver] +port = 8029 diff --git a/homestar-runtime/tests/fixtures/test_rendezvous6.toml b/homestar-runtime/tests/fixtures/test_rendezvous6.toml index 8b0b5ed2..765c1f21 100644 --- a/homestar-runtime/tests/fixtures/test_rendezvous6.toml +++ b/homestar-runtime/tests/fixtures/test_rendezvous6.toml @@ -1,13 +1,14 @@ -[monitoring] +[node] + +[node.monitoring] process_collector_interval = 500 -metrics_port = 4038 console_subscriber_port = 5588 -[node] +# Peer ID 12D3KooWPT98FXMfDQYavZm66EeVjTqP9Nnehn1gyaydqV8L8BQw +[node.network.keypair_config] +existing = { key_type = "ed25519", path = "./fixtures/__testkey_ed25519_5.pem" } -[node.network] -rpc_port = 9828 -websocket_port = 8028 +[node.network.libp2p] listen_address = "/ip4/127.0.0.1/tcp/7005" announce_addresses = [ "/ip4/127.0.0.1/tcp/7005/p2p/12D3KooWPT98FXMfDQYavZm66EeVjTqP9Nnehn1gyaydqV8L8BQw", @@ -15,9 +16,18 @@ announce_addresses = [ node_addresses = [ "/ip4/127.0.0.1/tcp/7000/p2p/12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN", ] -rendezvous_registration_ttl = 5 -enable_mdns = false -# Peer ID 12D3KooWPT98FXMfDQYavZm66EeVjTqP9Nnehn1gyaydqV8L8BQw -[node.network.keypair_config] -existing = { key_type = "ed25519", path = "./fixtures/__testkey_ed25519_5.pem" } +[node.network.libp2p.mdns] +enable = false + +[node.network.libp2p.rendezvous] +registration_ttl = 5 + +[node.network.metrics] +port = 4040 + +[node.network.rpc] +port = 9830 + +[node.network.webserver] +port = 8030 diff --git a/homestar-runtime/tests/fixtures/test_v4.toml b/homestar-runtime/tests/fixtures/test_v4.toml index 7748c44e..ae95115f 100644 --- a/homestar-runtime/tests/fixtures/test_v4.toml +++ b/homestar-runtime/tests/fixtures/test_v4.toml @@ -1,11 +1,18 @@ -[monitoring] -process_collector_interval = 500 -metrics_port = 4045 -console_subscriber_port = 5595 - [node] +[node.monitoring] +process_collector_interval = 500 +console_subscriber_port = 5590 + [node.network] events_buffer_len = 1000 -rpc_port = 9835 -rpc_host = "127.0.0.1" + +[node.network.metrics] +port = 4045 + +[node.network.rpc] +host = "127.0.0.1" +port = 9000 + +[node.network.webserver] +port = 8031 diff --git a/homestar-runtime/tests/fixtures/test_v4_alt.toml b/homestar-runtime/tests/fixtures/test_v4_alt.toml index d9ccc9c6..bf91ec45 100644 --- a/homestar-runtime/tests/fixtures/test_v4_alt.toml +++ b/homestar-runtime/tests/fixtures/test_v4_alt.toml @@ -1,11 +1,18 @@ -[monitoring] +[node] + +[node.monitoring] process_collector_interval = 500 -metrics_port = 4046 console_subscriber_port = 5596 -[node] - [node.network] events_buffer_len = 1000 -rpc_port = 9836 -rpc_host = "127.0.0.1" + +[node.network.metrics] +port = 4041 + +[node.network.rpc] +host = "127.0.0.1" +port = 9836 + +[node.network.webserver] +port = 8040 diff --git a/homestar-runtime/tests/fixtures/test_v6.toml b/homestar-runtime/tests/fixtures/test_v6.toml index 7a4a15e4..e09ff622 100644 --- a/homestar-runtime/tests/fixtures/test_v6.toml +++ b/homestar-runtime/tests/fixtures/test_v6.toml @@ -1,12 +1,18 @@ -[monitoring] +[node] + +[node.monitoring] process_collector_interval = 500 -metrics_port = 4047 console_subscriber_port = 5597 - -[node] - [node.network] events_buffer_len = 1000 -rpc_port = 9837 -rpc_host = "::1" + +[node.network.metrics] +port = 4042 + +[node.network.rpc] +host = "::1" +port = 9837 + +[node.network.webserver] +port = 8050 diff --git a/homestar-runtime/tests/fixtures/test_windows_v4.toml b/homestar-runtime/tests/fixtures/test_windows_v4.toml new file mode 100644 index 00000000..df78b182 --- /dev/null +++ b/homestar-runtime/tests/fixtures/test_windows_v4.toml @@ -0,0 +1,18 @@ +[node] + +[node.monitoring] +process_collector_interval = 500 +console_subscriber_port = 5591 + +[node.network] +events_buffer_len = 1000 + +[node.network.metrics] +port = 4046 + +[node.network.rpc] +host = "127.0.0.1" +port = 9001 + +[node.network.webserver] +port = 8032 diff --git a/homestar-runtime/tests/fixtures/test_workflow.toml b/homestar-runtime/tests/fixtures/test_workflow.toml deleted file mode 100644 index 549f337a..00000000 --- a/homestar-runtime/tests/fixtures/test_workflow.toml +++ /dev/null @@ -1,10 +0,0 @@ -[monitoring] -process_collector_interval = 500 -metrics_port = 4050 -console_subscriber_port = 5600 - -[node] - -[node.network] -events_buffer_len = 1000 -rpc_port = 9840 diff --git a/homestar-runtime/tests/fixtures/test_workflow1.toml b/homestar-runtime/tests/fixtures/test_workflow1.toml new file mode 100644 index 00000000..52531978 --- /dev/null +++ b/homestar-runtime/tests/fixtures/test_workflow1.toml @@ -0,0 +1,17 @@ +[node] + +[node.monitoring] +process_collector_interval = 500 +console_subscriber_port = 5600 + +[node.network] +events_buffer_len = 1000 + +[node.network.metrics] +port = 4050 + +[node.network.rpc] +port = 9840 + +[node.network.webserver] +port = 8060 diff --git a/homestar-runtime/tests/fixtures/test_workflow2.toml b/homestar-runtime/tests/fixtures/test_workflow2.toml new file mode 100644 index 00000000..120e31c2 --- /dev/null +++ b/homestar-runtime/tests/fixtures/test_workflow2.toml @@ -0,0 +1,17 @@ +[node] + +[node.monitoring] +process_collector_interval = 500 +console_subscriber_port = 5600 + +[node.network] +events_buffer_len = 1000 + +[node.network.metrics] +port = 4070 + +[node.network.rpc] +port = 9860 + +[node.network.webserver] +port = 8061 diff --git a/homestar-runtime/tests/main.rs b/homestar-runtime/tests/main.rs index e957b31f..466774db 100644 --- a/homestar-runtime/tests/main.rs +++ b/homestar-runtime/tests/main.rs @@ -1,4 +1,7 @@ pub(crate) mod cli; +#[cfg(feature = "monitoring")] pub(crate) mod metrics; pub(crate) mod network; pub(crate) mod utils; +#[cfg(all(feature = "websocket-notify", feature = "test-utils"))] +pub(crate) mod webserver; diff --git a/homestar-runtime/tests/metrics.rs b/homestar-runtime/tests/metrics.rs index 3252996c..5dbb19e2 100644 --- a/homestar-runtime/tests/metrics.rs +++ b/homestar-runtime/tests/metrics.rs @@ -1,11 +1,10 @@ -use crate::utils::{kill_homestar, stop_homestar, BIN_NAME}; +use crate::utils::{kill_homestar, stop_homestar, wait_for_socket_connection, BIN_NAME}; use anyhow::Result; use once_cell::sync::Lazy; use reqwest::StatusCode; use retry::{delay::Exponential, retry, OperationResult}; use serial_test::file_serial; use std::{ - net::{IpAddr, Ipv4Addr, Shutdown, SocketAddr, TcpStream}, path::PathBuf, process::{Command, Stdio}, }; @@ -13,9 +12,12 @@ use std::{ static BIN: Lazy = Lazy::new(|| assert_cmd::cargo::cargo_bin(BIN_NAME)); const METRICS_URL: &str = "http://localhost:4020"; +#[cfg(feature = "monitoring")] #[test] #[file_serial] fn test_metrics_serial() -> Result<()> { + use crate::utils::remove_db; + fn sample_metrics() -> Option { let body = retry( Exponential::from_millis(500).take(20), @@ -37,10 +39,11 @@ fn test_metrics_serial() -> Result<()> { metrics .samples .iter() - .find(|sample| sample.metric.as_str() == "system_used_memory_bytes") - .and_then(|sample| Some(sample.value.to_owned())) + .find(|sample| sample.metric.as_str() == "homestar_system_used_memory_bytes") + .map(|sample| sample.value.to_owned()) } + const DB: &str = "test_metrics_serial.db"; let _ = stop_homestar(); let mut homestar_proc = Command::new(BIN.as_os_str()) @@ -48,18 +51,13 @@ fn test_metrics_serial() -> Result<()> { .arg("-c") .arg("tests/fixtures/test_metrics.toml") .arg("--db") - .arg("homestar.db") + .arg(DB) .stdout(Stdio::piped()) .spawn() .unwrap(); - let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 4020); - let result = retry(Exponential::from_millis(1000).take(10), || { - TcpStream::connect(socket).map(|stream| stream.shutdown(Shutdown::Both)) - }); - - if result.is_err() { - homestar_proc.kill().unwrap(); + if wait_for_socket_connection(4020, 1000).is_err() { + let _ = kill_homestar(homestar_proc, None); panic!("Homestar server/runtime failed to start in time"); } @@ -91,6 +89,7 @@ fn test_metrics_serial() -> Result<()> { let _ = kill_homestar(homestar_proc, None); let _ = stop_homestar(); + remove_db(DB); Ok(()) } diff --git a/homestar-runtime/tests/network.rs b/homestar-runtime/tests/network.rs index 8a4a346c..c52eb19f 100644 --- a/homestar-runtime/tests/network.rs +++ b/homestar-runtime/tests/network.rs @@ -1,5 +1,6 @@ use crate::utils::{ - check_lines_for, count_lines_where, kill_homestar, retrieve_output, stop_homestar, BIN_NAME, + check_lines_for, count_lines_where, kill_homestar, remove_db, retrieve_output, stop_homestar, + wait_for_socket_connection, wait_for_socket_connection_v6, BIN_NAME, }; use anyhow::Result; use once_cell::sync::Lazy; @@ -11,12 +12,18 @@ use std::{ time::Duration, }; +#[cfg(feature = "websocket-notify")] +mod gossip; +#[cfg(feature = "websocket-notify")] +mod notification; + #[allow(dead_code)] static BIN: Lazy = Lazy::new(|| assert_cmd::cargo::cargo_bin(BIN_NAME)); #[test] #[file_serial] fn test_libp2p_generates_peer_id_serial() -> Result<()> { + const DB: &str = "test_libp2p_generates_peer_id_serial.db"; let _ = stop_homestar(); let homestar_proc = Command::new(BIN.as_os_str()) @@ -24,11 +31,16 @@ fn test_libp2p_generates_peer_id_serial() -> Result<()> { .arg("-c") .arg("tests/fixtures/test_network1.toml") .arg("--db") - .arg("homestar1.db") + .arg(DB) .stdout(Stdio::piped()) .spawn() .unwrap(); + if wait_for_socket_connection_v6(9820, 1000).is_err() { + let _ = kill_homestar(homestar_proc, None); + panic!("Homestar server/runtime failed to start in time"); + } + let dead_proc = kill_homestar(homestar_proc, None); let stdout = retrieve_output(dead_proc); let logs_expected = check_lines_for( @@ -41,12 +53,15 @@ fn test_libp2p_generates_peer_id_serial() -> Result<()> { assert!(logs_expected); + remove_db(DB); + Ok(()) } #[test] #[file_serial] fn test_libp2p_listens_on_address_serial() -> Result<()> { + const DB: &str = "test_libp2p_listens_on_address_serial.db"; let _ = stop_homestar(); let homestar_proc = Command::new(BIN.as_os_str()) @@ -54,29 +69,38 @@ fn test_libp2p_listens_on_address_serial() -> Result<()> { .arg("-c") .arg("tests/fixtures/test_network1.toml") .arg("--db") - .arg("homestar1.db") + .arg(DB) .stdout(Stdio::piped()) .spawn() .unwrap(); + if wait_for_socket_connection_v6(9820, 1000).is_err() { + let _ = kill_homestar(homestar_proc, None); + panic!("Homestar server/runtime failed to start in time"); + } + let dead_proc = kill_homestar(homestar_proc, None); let stdout = retrieve_output(dead_proc); let logs_expected = check_lines_for( stdout, vec![ "local node is listening", - "/ip4/127.0.0.1/tcp/7000/p2p/12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN", + "/ip4/127.0.0.1/tcp/7000", + "12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN", ], ); assert!(logs_expected); + remove_db(DB); + Ok(()) } #[test] #[file_serial] fn test_rpc_listens_on_address_serial() -> Result<()> { + const DB: &str = "test_rpc_listens_on_address_serial.db"; let _ = stop_homestar(); let homestar_proc = Command::new(BIN.as_os_str()) @@ -84,24 +108,31 @@ fn test_rpc_listens_on_address_serial() -> Result<()> { .arg("-c") .arg("tests/fixtures/test_network1.toml") .arg("--db") - .arg("homestar1.db") + .arg(DB) .stdout(Stdio::piped()) .spawn() .unwrap(); + if wait_for_socket_connection_v6(9820, 1000).is_err() { + let _ = kill_homestar(homestar_proc, None); + panic!("Homestar server/runtime failed to start in time"); + } + let dead_proc = kill_homestar(homestar_proc, None); let stdout = retrieve_output(dead_proc); let logs_expected = check_lines_for(stdout, vec!["RPC server listening", "[::1]:9820"]); assert!(logs_expected); + remove_db(DB); + Ok(()) } -#[cfg(feature = "websocket-server")] #[test] #[file_serial] fn test_websocket_listens_on_address_serial() -> Result<()> { + const DB: &str = "test_websocket_listens_on_address_serial.db"; let _ = stop_homestar(); let homestar_proc = Command::new(BIN.as_os_str()) @@ -109,24 +140,32 @@ fn test_websocket_listens_on_address_serial() -> Result<()> { .arg("-c") .arg("tests/fixtures/test_network1.toml") .arg("--db") - .arg("homestar1.db") + .arg(DB) .stdout(Stdio::piped()) .spawn() .unwrap(); + if wait_for_socket_connection_v6(9820, 1000).is_err() { + let _ = kill_homestar(homestar_proc, None); + panic!("Homestar server/runtime failed to start in time"); + } + let dead_proc = kill_homestar(homestar_proc, None); let stdout = retrieve_output(dead_proc); - let logs_expected = - check_lines_for(stdout, vec!["websocket server listening", "127.0.0.1:8020"]); + let logs_expected = check_lines_for(stdout, vec!["webserver listening", "127.0.0.1:8020"]); assert!(logs_expected); + remove_db(DB); + Ok(()) } #[test] #[file_serial] fn test_libp2p_connect_known_peers_serial() -> Result<()> { + const DB1: &str = "test_libp2p_connect_known_peers_serial1.db"; + const DB2: &str = "test_libp2p_connect_known_peers_serial2.db"; let _ = stop_homestar(); // Start two nodes configured to listen at 127.0.0.1 each with their own port. @@ -140,11 +179,16 @@ fn test_libp2p_connect_known_peers_serial() -> Result<()> { .arg("-c") .arg("tests/fixtures/test_network1.toml") .arg("--db") - .arg("homestar1.db") + .arg(DB1) .stdout(Stdio::piped()) .spawn() .unwrap(); + if wait_for_socket_connection_v6(9820, 1000).is_err() { + let _ = kill_homestar(homestar_proc1, None); + panic!("Homestar server/runtime failed to start in time"); + } + let homestar_proc2 = Command::new(BIN.as_os_str()) .env( "RUST_LOG", @@ -154,11 +198,16 @@ fn test_libp2p_connect_known_peers_serial() -> Result<()> { .arg("-c") .arg("tests/fixtures/test_network2.toml") .arg("--db") - .arg("homestar2.db") + .arg(DB2) .stdout(Stdio::piped()) .spawn() .unwrap(); + if wait_for_socket_connection_v6(9821, 1000).is_err() { + let _ = kill_homestar(homestar_proc2, None); + panic!("Homestar server/runtime failed to start in time"); + } + // Collect logs for five seconds then kill proceses. let dead_proc1 = kill_homestar(homestar_proc1, Some(Duration::from_secs(5))); let dead_proc2 = kill_homestar(homestar_proc2, Some(Duration::from_secs(5))); @@ -229,12 +278,17 @@ fn test_libp2p_connect_known_peers_serial() -> Result<()> { assert!(one_in_dht_routing_table); assert!(two_connected_to_one); + remove_db(DB1); + remove_db(DB2); + Ok(()) } #[test] #[file_serial] fn test_libp2p_connect_after_mdns_discovery_serial() -> Result<()> { + const DB1: &str = "test_libp2p_connect_after_mdns_discovery_serial1.db"; + const DB2: &str = "test_libp2p_connect_after_mdns_discovery_serial2.db"; let _ = stop_homestar(); // Start two nodes each configured to listen at 0.0.0.0 with no known peers. @@ -248,11 +302,16 @@ fn test_libp2p_connect_after_mdns_discovery_serial() -> Result<()> { .arg("-c") .arg("tests/fixtures/test_mdns1.toml") .arg("--db") - .arg("homestar1.db") + .arg(DB1) .stdout(Stdio::piped()) .spawn() .unwrap(); + if wait_for_socket_connection_v6(9800, 1000).is_err() { + let _ = kill_homestar(homestar_proc1, None); + panic!("Homestar server/runtime failed to start in time"); + } + let homestar_proc2 = Command::new(BIN.as_os_str()) .env( "RUST_LOG", @@ -262,11 +321,16 @@ fn test_libp2p_connect_after_mdns_discovery_serial() -> Result<()> { .arg("-c") .arg("tests/fixtures/test_mdns2.toml") .arg("--db") - .arg("homestar2.db") + .arg(DB2) .stdout(Stdio::piped()) .spawn() .unwrap(); + if wait_for_socket_connection_v6(9801, 1000).is_err() { + let _ = kill_homestar(homestar_proc2, None); + panic!("Homestar server/runtime failed to start in time"); + } + // Collect logs for seven seconds then kill processes. let dead_proc1 = kill_homestar(homestar_proc1, Some(Duration::from_secs(7))); let dead_proc2 = kill_homestar(homestar_proc2, Some(Duration::from_secs(7))); @@ -337,12 +401,18 @@ fn test_libp2p_connect_after_mdns_discovery_serial() -> Result<()> { assert!(one_addded_to_dht); assert!(one_in_dht_routing_table); + remove_db(DB1); + remove_db(DB2); + Ok(()) } #[test] #[file_serial] fn test_libp2p_connect_rendezvous_discovery_serial() -> Result<()> { + const DB1: &str = "test_libp2p_connect_rendezvous_discovery_serial1.db"; + const DB2: &str = "test_libp2p_connect_rendezvous_discovery_serial2.db"; + const DB3: &str = "test_libp2p_connect_rendezvous_discovery_serial3.db"; let _ = stop_homestar(); // Start a rendezvous server @@ -355,11 +425,16 @@ fn test_libp2p_connect_rendezvous_discovery_serial() -> Result<()> { .arg("-c") .arg("tests/fixtures/test_rendezvous1.toml") .arg("--db") - .arg("homestar1.db") + .arg(DB1) .stdout(Stdio::piped()) .spawn() .unwrap(); + if wait_for_socket_connection(8024, 1000).is_err() { + let _ = kill_homestar(rendezvous_server, None); + panic!("Homestar server/runtime failed to start in time"); + } + // Start a peer that will register with the rendezvous server let rendezvous_client1 = Command::new(BIN.as_os_str()) .env( @@ -370,13 +445,18 @@ fn test_libp2p_connect_rendezvous_discovery_serial() -> Result<()> { .arg("-c") .arg("tests/fixtures/test_rendezvous2.toml") .arg("--db") - .arg("homestar2.db") + .arg(DB2) .stdout(Stdio::piped()) .spawn() .unwrap(); + if wait_for_socket_connection(8026, 1000).is_err() { + let _ = kill_homestar(rendezvous_client1, None); + panic!("Homestar server/runtime failed to start in time"); + } + // Wait for registration to complete - // TODO When we have websocket push events, listen on a registration event instead of using an arbitrary sleep + // TODO When we have WebSocket push events, listen on a registration event instead of using an arbitrary sleep thread::sleep(Duration::from_secs(2)); // Start a peer that will discover the registrant through the rendezvous server @@ -389,11 +469,16 @@ fn test_libp2p_connect_rendezvous_discovery_serial() -> Result<()> { .arg("-c") .arg("tests/fixtures/test_rendezvous3.toml") .arg("--db") - .arg("homestar3.db") + .arg(DB3) .stdout(Stdio::piped()) .spawn() .unwrap(); + if wait_for_socket_connection(8027, 1000).is_err() { + let _ = kill_homestar(rendezvous_client2, None); + panic!("Homestar server/runtime failed to start in time"); + } + // Collect logs for five seconds then kill proceses. let dead_server = kill_homestar(rendezvous_server, Some(Duration::from_secs(5))); let _ = kill_homestar(rendezvous_client1, Some(Duration::from_secs(5))); @@ -455,12 +540,18 @@ fn test_libp2p_connect_rendezvous_discovery_serial() -> Result<()> { assert!(one_in_dht_routing_table); assert!(two_connected_to_one); + remove_db(DB1); + remove_db(DB2); + remove_db(DB3); + Ok(()) } #[test] #[file_serial] fn test_libp2p_disconnect_mdns_discovery_serial() -> Result<()> { + const DB1: &str = "test_libp2p_disconnect_mdns_discovery_serial1.db"; + const DB2: &str = "test_libp2p_disconnect_mdns_discovery_serial2.db"; let _ = stop_homestar(); // Start two nodes each configured to listen at 0.0.0.0 with no known peers. @@ -474,11 +565,16 @@ fn test_libp2p_disconnect_mdns_discovery_serial() -> Result<()> { .arg("-c") .arg("tests/fixtures/test_mdns1.toml") .arg("--db") - .arg("homestar1.db") + .arg(DB1) .stdout(Stdio::piped()) .spawn() .unwrap(); + if wait_for_socket_connection(8000, 1000).is_err() { + let _ = kill_homestar(homestar_proc1, None); + panic!("Homestar server/runtime failed to start in time"); + } + let homestar_proc2 = Command::new(BIN.as_os_str()) .env( "RUST_LOG", @@ -488,11 +584,16 @@ fn test_libp2p_disconnect_mdns_discovery_serial() -> Result<()> { .arg("-c") .arg("tests/fixtures/test_mdns2.toml") .arg("--db") - .arg("homestar2.db") + .arg(DB2) .stdout(Stdio::piped()) .spawn() .unwrap(); + if wait_for_socket_connection(8001, 1000).is_err() { + let _ = kill_homestar(homestar_proc2, None); + panic!("Homestar server/runtime failed to start in time"); + } + // Kill node two after seven seconds. let _ = kill_homestar(homestar_proc2, Some(Duration::from_secs(7))); @@ -523,12 +624,17 @@ fn test_libp2p_disconnect_mdns_discovery_serial() -> Result<()> { assert!(two_disconnected_from_one); assert!(two_removed_from_dht_table); + remove_db(DB1); + remove_db(DB2); + Ok(()) } #[test] #[file_serial] fn test_libp2p_disconnect_known_peers_serial() -> Result<()> { + const DB1: &str = "test_libp2p_disconnect_known_peers_serial1.db"; + const DB2: &str = "test_libp2p_disconnect_known_peers_serial2.db"; let _ = stop_homestar(); // Start two nodes configured to listen at 127.0.0.1 each with their own port. @@ -542,11 +648,16 @@ fn test_libp2p_disconnect_known_peers_serial() -> Result<()> { .arg("-c") .arg("tests/fixtures/test_network1.toml") .arg("--db") - .arg("homestar1.db") + .arg(DB1) .stdout(Stdio::piped()) .spawn() .unwrap(); + if wait_for_socket_connection_v6(9820, 1000).is_err() { + let _ = kill_homestar(homestar_proc1, None); + panic!("Homestar server/runtime failed to start in time"); + } + let homestar_proc2 = Command::new(BIN.as_os_str()) .env( "RUST_LOG", @@ -556,11 +667,16 @@ fn test_libp2p_disconnect_known_peers_serial() -> Result<()> { .arg("-c") .arg("tests/fixtures/test_network2.toml") .arg("--db") - .arg("homestar2.db") + .arg(DB2) .stdout(Stdio::piped()) .spawn() .unwrap(); + if wait_for_socket_connection_v6(9821, 1000).is_err() { + let _ = kill_homestar(homestar_proc2, None); + panic!("Homestar server/runtime failed to start in time"); + } + // Kill node two after seven seconds. let _ = kill_homestar(homestar_proc2, Some(Duration::from_secs(7))); @@ -589,7 +705,10 @@ fn test_libp2p_disconnect_known_peers_serial() -> Result<()> { ); assert!(two_disconnected_from_one); - assert_eq!(false, two_removed_from_dht_table); + assert!(!two_removed_from_dht_table); + + remove_db(DB1); + remove_db(DB2); Ok(()) } @@ -597,6 +716,9 @@ fn test_libp2p_disconnect_known_peers_serial() -> Result<()> { #[test] #[file_serial] fn test_libp2p_disconnect_rendezvous_discovery_serial() -> Result<()> { + const DB1: &str = "test_libp2p_disconnect_rendezvous_discovery_serial1.db"; + const DB2: &str = "test_libp2p_disconnect_rendezvous_discovery_serial2.db"; + const DB3: &str = "test_libp2p_disconnect_rendezvous_discovery_serial3.db"; let _ = stop_homestar(); // Start a rendezvous server @@ -609,11 +731,16 @@ fn test_libp2p_disconnect_rendezvous_discovery_serial() -> Result<()> { .arg("-c") .arg("tests/fixtures/test_rendezvous1.toml") .arg("--db") - .arg("homestar1.db") + .arg(DB1) .stdout(Stdio::piped()) .spawn() .unwrap(); + if wait_for_socket_connection(8024, 1000).is_err() { + let _ = kill_homestar(rendezvous_server, None); + panic!("Homestar server/runtime failed to start in time"); + } + // Start a peer that will register with the rendezvous server let rendezvous_client1 = Command::new(BIN.as_os_str()) .env( @@ -624,13 +751,18 @@ fn test_libp2p_disconnect_rendezvous_discovery_serial() -> Result<()> { .arg("-c") .arg("tests/fixtures/test_rendezvous2.toml") .arg("--db") - .arg("homestar2.db") + .arg(DB2) .stdout(Stdio::piped()) .spawn() .unwrap(); + if wait_for_socket_connection(8026, 1000).is_err() { + let _ = kill_homestar(rendezvous_client1, None); + panic!("Homestar server/runtime failed to start in time"); + } + // Wait for registration to complete. - // TODO When we have websocket push events, listen on a registration event instead of using an arbitrary sleep. + // TODO When we have WebSocket push events, listen on a registration event instead of using an arbitrary sleep. thread::sleep(Duration::from_secs(2)); // Start a peer that will discover the registrant through the rendezvous server @@ -643,11 +775,16 @@ fn test_libp2p_disconnect_rendezvous_discovery_serial() -> Result<()> { .arg("-c") .arg("tests/fixtures/test_rendezvous3.toml") .arg("--db") - .arg("homestar3.db") + .arg(DB3) .stdout(Stdio::piped()) .spawn() .unwrap(); + if wait_for_socket_connection(8027, 1000).is_err() { + let _ = kill_homestar(rendezvous_client1, None); + panic!("Homestar server/runtime failed to start in time"); + } + // Kill server and client one after five seconds let _ = kill_homestar(rendezvous_server, Some(Duration::from_secs(5))); let _ = kill_homestar(rendezvous_client1, Some(Duration::from_secs(5))); @@ -679,12 +816,18 @@ fn test_libp2p_disconnect_rendezvous_discovery_serial() -> Result<()> { assert!(two_disconnected_from_one); assert!(two_removed_from_dht_table); + remove_db(DB1); + remove_db(DB2); + remove_db(DB3); + Ok(()) } #[test] #[file_serial] fn test_libp2p_rendezvous_renew_registration_serial() -> Result<()> { + const DB1: &str = "test_libp2p_rendezvous_renew_registration_serial1.db"; + const DB2: &str = "test_libp2p_rendezvous_renew_registration_serial2.db"; let _ = stop_homestar(); // Start a rendezvous server @@ -697,11 +840,16 @@ fn test_libp2p_rendezvous_renew_registration_serial() -> Result<()> { .arg("-c") .arg("tests/fixtures/test_rendezvous1.toml") .arg("--db") - .arg("homestar1.db") + .arg(DB1) .stdout(Stdio::piped()) .spawn() .unwrap(); + if wait_for_socket_connection(8024, 1000).is_err() { + let _ = kill_homestar(rendezvous_server, None); + panic!("Homestar server/runtime failed to start in time"); + } + // Start a peer that will renew registrations with the rendezvous server once per second let rendezvous_client1 = Command::new(BIN.as_os_str()) .env( @@ -712,11 +860,16 @@ fn test_libp2p_rendezvous_renew_registration_serial() -> Result<()> { .arg("-c") .arg("tests/fixtures/test_rendezvous4.toml") .arg("--db") - .arg("homestar4.db") + .arg(DB2) .stdout(Stdio::piped()) .spawn() .unwrap(); + if wait_for_socket_connection(8028, 1000).is_err() { + let _ = kill_homestar(rendezvous_client1, None); + panic!("Homestar server/runtime failed to start in time"); + } + // Collect logs for five seconds then kill proceses. let dead_server = kill_homestar(rendezvous_server, Some(Duration::from_secs(5))); let dead_client = kill_homestar(rendezvous_client1, Some(Duration::from_secs(5))); @@ -746,12 +899,17 @@ fn test_libp2p_rendezvous_renew_registration_serial() -> Result<()> { assert!(server_registration_count > 1); assert!(client_registration_count > 1); + remove_db(DB1); + remove_db(DB2); + Ok(()) } #[test] #[file_serial] fn test_libp2p_rendezvous_rediscovery_serial() -> Result<()> { + const DB1: &str = "test_libp2p_rendezvous_rediscovery_serial1.db"; + const DB2: &str = "test_libp2p_rendezvous_rediscovery_serial2.db"; let _ = stop_homestar(); // Start a rendezvous server @@ -764,11 +922,16 @@ fn test_libp2p_rendezvous_rediscovery_serial() -> Result<()> { .arg("-c") .arg("tests/fixtures/test_rendezvous1.toml") .arg("--db") - .arg("homestar1.db") + .arg(DB1) .stdout(Stdio::piped()) .spawn() .unwrap(); + if wait_for_socket_connection(8024, 1000).is_err() { + let _ = kill_homestar(rendezvous_server, None); + panic!("Homestar server/runtime failed to start in time"); + } + // Start a peer that will discover with the rendezvous server once per second let rendezvous_client1 = Command::new(BIN.as_os_str()) .env( @@ -779,11 +942,16 @@ fn test_libp2p_rendezvous_rediscovery_serial() -> Result<()> { .arg("-c") .arg("tests/fixtures/test_rendezvous5.toml") .arg("--db") - .arg("homestar5.db") + .arg(DB2) .stdout(Stdio::piped()) .spawn() .unwrap(); + if wait_for_socket_connection_v6(9829, 1000).is_err() { + let _ = kill_homestar(rendezvous_client1, None); + panic!("Homestar server/runtime failed to start in time"); + } + // Collect logs for five seconds then kill proceses. let dead_server = kill_homestar(rendezvous_server, Some(Duration::from_secs(5))); let dead_client = kill_homestar(rendezvous_client1, Some(Duration::from_secs(5))); @@ -813,12 +981,18 @@ fn test_libp2p_rendezvous_rediscovery_serial() -> Result<()> { assert!(server_discovery_count > 1); assert!(client_discovery_count > 1); + remove_db(DB1); + remove_db(DB2); + Ok(()) } #[test] #[file_serial] fn test_libp2p_rendezvous_rediscover_on_expiration_serial() -> Result<()> { + const DB1: &str = "test_libp2p_rendezvous_rediscover_on_expiration_serial1.db"; + const DB2: &str = "test_libp2p_rendezvous_rediscover_on_expiration_serial2.db"; + const DB3: &str = "test_libp2p_rendezvous_rediscover_on_expiration_serial3.db"; let _ = stop_homestar(); // Start a rendezvous server @@ -831,11 +1005,16 @@ fn test_libp2p_rendezvous_rediscover_on_expiration_serial() -> Result<()> { .arg("-c") .arg("tests/fixtures/test_rendezvous1.toml") .arg("--db") - .arg("homestar1.db") + .arg(DB1) .stdout(Stdio::piped()) .spawn() .unwrap(); + if wait_for_socket_connection(8024, 1000).is_err() { + let _ = kill_homestar(rendezvous_server, None); + panic!("Homestar server/runtime failed to start in time"); + } + // Start a peer that will renew registrations with the rendezvous server every five seconds let rendezvous_client1 = Command::new(BIN.as_os_str()) .env( @@ -846,13 +1025,18 @@ fn test_libp2p_rendezvous_rediscover_on_expiration_serial() -> Result<()> { .arg("-c") .arg("tests/fixtures/test_rendezvous6.toml") .arg("--db") - .arg("homestar6.db") + .arg(DB2) .stdout(Stdio::piped()) .spawn() .unwrap(); + if wait_for_socket_connection_v6(9830, 1000).is_err() { + let _ = kill_homestar(rendezvous_client1, None); + panic!("Homestar server/runtime failed to start in time"); + } + // Wait for registration to complete. - // TODO When we have websocket push events, listen on a registration event instead of using an arbitrary sleep. + // TODO When we have WebSocket push events, listen on a registration event instead of using an arbitrary sleep. thread::sleep(Duration::from_secs(2)); // Start a peer that will discover with the rendezvous server when @@ -868,11 +1052,16 @@ fn test_libp2p_rendezvous_rediscover_on_expiration_serial() -> Result<()> { .arg("-c") .arg("tests/fixtures/test_rendezvous3.toml") .arg("--db") - .arg("homestar3.db") + .arg(DB3) .stdout(Stdio::piped()) .spawn() .unwrap(); + if wait_for_socket_connection(8027, 1000).is_err() { + let _ = kill_homestar(rendezvous_client1, None); + panic!("Homestar server/runtime failed to start in time"); + } + // Collect logs for seven seconds then kill proceses. let dead_server = kill_homestar(rendezvous_server, Some(Duration::from_secs(7))); let _ = kill_homestar(rendezvous_client1, Some(Duration::from_secs(7))); @@ -903,5 +1092,9 @@ fn test_libp2p_rendezvous_rediscover_on_expiration_serial() -> Result<()> { assert!(server_discovery_count > 1); assert!(client_discovery_count > 1); + remove_db(DB1); + remove_db(DB2); + remove_db(DB3); + Ok(()) } diff --git a/homestar-runtime/tests/network/gossip.rs b/homestar-runtime/tests/network/gossip.rs new file mode 100644 index 00000000..337f9627 --- /dev/null +++ b/homestar-runtime/tests/network/gossip.rs @@ -0,0 +1,213 @@ +use crate::utils::{ + check_lines_for, kill_homestar, remove_db, retrieve_output, stop_homestar, + wait_for_socket_connection, TimeoutFutureExt, BIN_NAME, +}; +use anyhow::Result; +use homestar_runtime::{db::Database, Db, Settings}; +use itertools::Itertools; +use jsonrpsee::{ + core::client::{Subscription, SubscriptionClientT}, + rpc_params, + ws_client::WsClientBuilder, +}; +use libipld::Cid; +use once_cell::sync::Lazy; +use serial_test::file_serial; +use std::{ + net::Ipv4Addr, + path::PathBuf, + process::{Command, Stdio}, + str::FromStr, + time::Duration, +}; + +static BIN: Lazy = Lazy::new(|| assert_cmd::cargo::cargo_bin(BIN_NAME)); +const SUBSCRIBE_NETWORK_EVENTS_ENDPOINT: &str = "subscribe_network_events"; +const UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT: &str = "unsubscribe_network_events"; + +#[test] +#[file_serial] +fn test_libp2p_receipt_gossip_serial() -> Result<()> { + const DB1: &str = "homestar_test_libp2p_receipt_gossip_serial1.db"; + const DB2: &str = "homestar_test_libp2p_receipt_gossip_serial2.db"; + let _ = stop_homestar(); + let homestar_proc1 = Command::new(BIN.as_os_str()) + .env( + "RUST_LOG", + "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", + ) + .arg("start") + .arg("-c") + .arg("tests/fixtures/test_gossip1.toml") + .arg("--db") + .arg(DB1) + .stdout(Stdio::piped()) + .spawn() + .unwrap(); + + let ws_port = 7990; + if wait_for_socket_connection(ws_port, 1000).is_err() { + let _ = kill_homestar(homestar_proc1, None); + panic!("Homestar server/runtime failed to start in time"); + } + + tokio_test::block_on(async { + let ws_url = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port); + let client = WsClientBuilder::default() + .build(ws_url.clone()) + .await + .unwrap(); + + let mut sub1: Subscription> = client + .subscribe( + SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + rpc_params![], + UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + ) + .await + .unwrap(); + + let homestar_proc2 = Command::new(BIN.as_os_str()) + .env( + "RUST_LOG", + "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", + ) + .arg("start") + .arg("-c") + .arg("tests/fixtures/test_gossip2.toml") + .arg("--db") + .arg(DB2) + .stdout(Stdio::piped()) + .spawn() + .unwrap(); + + let ws_port2 = 7991; + if wait_for_socket_connection(ws_port2, 1000).is_err() { + let _ = kill_homestar(homestar_proc1, None); + panic!("Homestar server/runtime failed to start in time"); + } + + // Poll for connection established message + loop { + if let Ok(msg) = sub1.next().with_timeout(Duration::from_secs(30)).await { + let json: serde_json::Value = + serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); + + if json["type"].as_str().unwrap() == "network:connectionEstablished" { + break; + } + } else { + panic!("Node one did not establish a connection with node two in time.") + } + } + + let ws_url2 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port2); + let client2 = WsClientBuilder::default() + .build(ws_url2.clone()) + .await + .unwrap(); + + let mut sub2: Subscription> = client2 + .subscribe( + SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + rpc_params![], + UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + ) + .await + .unwrap(); + + // Run test workflow + let _ = Command::new(BIN.as_os_str()) + .arg("run") + .arg("-p") + .arg("9790") + .arg("-w") + .arg("tests/fixtures/test-workflow-add-one.json") + .output(); + + // Poll for published and received receipt messages + let mut published_cids: Vec = vec![]; + let mut received_cids: Vec = vec![]; + loop { + if let Ok(msg) = sub1.next().with_timeout(Duration::from_secs(30)).await { + let json: serde_json::Value = + serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); + + if json["type"].as_str().unwrap() == "network:publishedReceiptPubsub" { + published_cids.push( + Cid::from_str(json["data"]["cid"].as_str().unwrap()) + .expect("Unable to parse published receipt CID."), + ); + } + } else { + panic!("Node one did not publish receipt in time.") + } + + if let Ok(msg) = sub2.next().with_timeout(Duration::from_secs(30)).await { + let json: serde_json::Value = + serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); + + if json["type"].as_str().unwrap() == "network:receivedReceiptPubsub" { + received_cids.push( + Cid::from_str(json["data"]["cid"].as_str().unwrap()) + .expect("Unable to parse received receipt CID."), + ); + } + } else { + panic!("Node two did not receive receipt in time.") + } + + if published_cids.len() == 2 && received_cids.len() == 2 { + break; + } + } + + // Collect logs then kill proceses. + let dead_proc1 = kill_homestar(homestar_proc1, None); + let dead_proc2 = kill_homestar(homestar_proc2, None); + + // Retrieve logs. + let stdout1 = retrieve_output(dead_proc1); + let stdout2 = retrieve_output(dead_proc2); + + // Check node one published a receipt + let message_published = + check_lines_for(stdout1, vec!["message published on receipts topic"]); + + // Check node two received a receipt from node one + let message_received = check_lines_for( + stdout2, + vec![ + "message received on receipts topic", + "12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN", + ], + ); + + assert!(message_published); + assert!(message_received); + + let settings = + Settings::load_from_file(PathBuf::from("tests/fixtures/test_gossip2.toml")).unwrap(); + let db = Db::setup_connection_pool(settings.node(), Some(DB2.to_string())) + .expect("Failed to connect to node two database"); + + // Check database for stored receipts + let stored_receipts: Vec<_> = received_cids + .iter() + .map(|cid| { + Db::find_receipt_by_cid(*cid, &mut db.conn().unwrap()).unwrap_or_else(|_| { + panic!("Failed to find receipt with CID {} in database", *cid) + }) + }) + .collect_vec(); + + assert_eq!(stored_receipts.len(), 2) + }); + + remove_db(DB1); + remove_db(DB2); + + let _ = stop_homestar(); + + Ok(()) +} diff --git a/homestar-runtime/tests/network/notification.rs b/homestar-runtime/tests/network/notification.rs new file mode 100644 index 00000000..273efe28 --- /dev/null +++ b/homestar-runtime/tests/network/notification.rs @@ -0,0 +1,119 @@ +use crate::utils::{ + kill_homestar, remove_db, stop_homestar, wait_for_socket_connection, TimeoutFutureExt, BIN_NAME, +}; +use anyhow::Result; +use jsonrpsee::{ + core::client::{Subscription, SubscriptionClientT}, + rpc_params, + ws_client::WsClientBuilder, +}; +use once_cell::sync::Lazy; +use serial_test::file_serial; +use std::{ + net::Ipv4Addr, + path::PathBuf, + process::{Command, Stdio}, + time::Duration, +}; + +static BIN: Lazy = Lazy::new(|| assert_cmd::cargo::cargo_bin(BIN_NAME)); +const SUBSCRIBE_NETWORK_EVENTS_ENDPOINT: &str = "subscribe_network_events"; +const UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT: &str = "unsubscribe_network_events"; + +#[test] +#[file_serial] +fn test_connection_notifications_serial() -> Result<()> { + const DB1: &str = "test_connection_notifications_serial1.db"; + const DB2: &str = "test_connection_notifications_serial2.db"; + let _ = stop_homestar(); + + let homestar_proc1 = Command::new(BIN.as_os_str()) + .env( + "RUST_LOG", + "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", + ) + .arg("start") + .arg("-c") + .arg("tests/fixtures/test_notification1.toml") + .arg("--db") + .arg(DB1) + .stdout(Stdio::piped()) + .spawn() + .unwrap(); + + let ws_port = 8022; + if wait_for_socket_connection(8022, 1000).is_err() { + let _ = kill_homestar(homestar_proc1, None); + panic!("Homestar server/runtime failed to start in time"); + } + + let ws_url = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port); + tokio_test::block_on(async { + tokio_tungstenite::connect_async(ws_url.clone()) + .await + .unwrap(); + + let client = WsClientBuilder::default() + .build(ws_url.clone()) + .await + .unwrap(); + let mut sub: Subscription> = client + .subscribe( + SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + rpc_params![], + UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + ) + .await + .unwrap(); + + let homestar_proc2 = Command::new(BIN.as_os_str()) + .env( + "RUST_LOG", + "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", + ) + .arg("start") + .arg("-c") + .arg("tests/fixtures/test_notification2.toml") + .arg("--db") + .arg(DB2) + .stdout(Stdio::piped()) + .spawn() + .unwrap(); + + // Poll for connection established message + loop { + if let Ok(msg) = sub.next().with_timeout(Duration::from_secs(30)).await { + let json: serde_json::Value = + serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); + + if json["type"].as_str().unwrap() == "network:connectionEstablished".to_string() { + break; + } + } else { + panic!("Node one did not receive a connection established message in time.") + } + } + + let _ = kill_homestar(homestar_proc2, None); + + // Poll for connection closed message + loop { + if let Ok(msg) = sub.next().with_timeout(Duration::from_secs(30)).await { + let json: serde_json::Value = + serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); + + if json["type"].as_str().unwrap() == "network:connectionClosed".to_string() { + break; + } + } else { + panic!("Node one did not receive a connection closed message in time.") + } + } + + let _ = kill_homestar(homestar_proc1, None); + remove_db(DB1); + remove_db(DB2); + }); + + Ok(()) +} diff --git a/homestar-runtime/tests/utils.rs b/homestar-runtime/tests/utils.rs index 7c754b94..d04762ab 100644 --- a/homestar-runtime/tests/utils.rs +++ b/homestar-runtime/tests/utils.rs @@ -9,47 +9,27 @@ use nix::{ }; use once_cell::sync::Lazy; use predicates::prelude::*; -use retry::{delay::Fixed, retry}; +#[cfg(not(windows))] +use retry::delay::Fixed; +use retry::{delay::Exponential, retry}; use std::{ - net::{IpAddr, Ipv4Addr, Shutdown, SocketAddr, TcpStream}, + fs, + future::Future, + net::{IpAddr, Ipv4Addr, Ipv6Addr, Shutdown, SocketAddr, TcpStream}, path::PathBuf, process::{Child, Command, Stdio}, time::Duration, }; -use strip_ansi_escapes; #[cfg(not(windows))] use sysinfo::PidExt; use sysinfo::{ProcessExt, SystemExt}; +use tokio::time::{timeout, Timeout}; use wait_timeout::ChildExt; /// Binary name, which is different than the crate name. pub(crate) const BIN_NAME: &str = "homestar"; static BIN: Lazy = Lazy::new(|| assert_cmd::cargo::cargo_bin(BIN_NAME)); -const IPFS: &str = "ipfs"; - -/// Start-up IPFS daemon for tests with the feature turned-on. -pub(crate) fn startup_ipfs() -> Result<()> { - let path = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join(".ipfs"); - println!("starting ipfs daemon...{}", path.to_str().unwrap()); - let mut ipfs_daemon = Command::new(IPFS) - .args(["--offline", "daemon", "--init"]) - .stdout(Stdio::piped()) - .spawn()?; - - // wait for ipfs daemon to start by testing for a connection - let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 5001); - let result = retry(Fixed::from_millis(500), || { - TcpStream::connect(socket).map(|stream| stream.shutdown(Shutdown::Both)) - }); - - if let Err(err) = result { - ipfs_daemon.kill().unwrap(); - panic!("`ipfs daemon` failed to start: {:?}", err); - } else { - Ok(()) - } -} /// Stop the Homestar server/binary. pub(crate) fn stop_homestar() -> Result<()> { @@ -63,28 +43,6 @@ pub(crate) fn stop_homestar() -> Result<()> { Ok(()) } -/// Stop the IPFS binary. -pub(crate) fn stop_ipfs() -> Result<()> { - let path = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join(".ipfs"); - Command::new(IPFS) - .args(["--repo-dir", path.to_str().unwrap(), "shutdown"]) - .stdout(Stdio::null()) - .stderr(Stdio::null()) - .status() - .context("failed to stop IPFS daemon")?; - rm_rf::ensure_removed(path).unwrap(); - - Ok(()) -} - -/// Stop all binaries. -pub(crate) fn stop_all_bins() -> Result<()> { - let _ = stop_ipfs(); - let _ = stop_homestar(); - - Ok(()) -} - /// Retrieve process output. pub(crate) fn retrieve_output(proc: Child) -> String { let output = proc.wait_with_output().expect("failed to wait on child"); @@ -95,13 +53,13 @@ pub(crate) fn retrieve_output(proc: Child) -> String { /// Check process output for all predicates in any line pub(crate) fn check_lines_for(output: String, predicates: Vec<&str>) -> bool { output - .split("\n") + .split('\n') .map(|line| line_contains(line, &predicates)) - .fold(false, |acc, curr| acc || curr) + .any(|curr| curr) } pub(crate) fn count_lines_where(output: String, predicates: Vec<&str>) -> i32 { - output.split("\n").fold(0, |count, line| { + output.split('\n').fold(0, |count, line| { if line_contains(line, &predicates) { count + 1 } else { @@ -116,10 +74,9 @@ pub(crate) fn extract_timestamps_where( output: String, predicates: Vec<&str>, ) -> Vec> { - output.split("\n").fold(vec![], |mut timestamps, line| { + output.split('\n').fold(vec![], |mut timestamps, line| { if line_contains(line, &predicates) { - match extract_label(&line, "ts").and_then(|val| DateTime::parse_from_rfc3339(val).ok()) - { + match extract_label(line, "ts").and_then(|val| DateTime::parse_from_rfc3339(val).ok()) { Some(datetime) => { timestamps.push(datetime); timestamps @@ -136,11 +93,11 @@ pub(crate) fn extract_timestamps_where( } /// Check process output line for all predicates -fn line_contains(line: &str, predicates: &Vec<&str>) -> bool { +fn line_contains(line: &str, predicates: &[&str]) -> bool { predicates .iter() .map(|pred| predicate::str::contains(*pred).eval(line)) - .fold(true, |acc, curr| acc && curr) + .all(|curr| curr) } /// Extract label value from process output line @@ -215,3 +172,55 @@ pub(crate) fn kill_homestar_daemon() -> Result<()> { Ok(()) } + +/// Remove sqlite database and associated temporary files +pub(crate) fn remove_db(name: &str) { + let _ = fs::remove_file(name); + let _ = fs::remove_file(format!("{name}-shm")); + let _ = fs::remove_file(format!("{name}-wal")); +} + +/// Wait for socket connection or timeout +pub(crate) fn wait_for_socket_connection(port: u16, exp_retry_base: u64) -> Result<(), ()> { + let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), port); + let result = retry(Exponential::from_millis(exp_retry_base).take(10), || { + TcpStream::connect(socket).map(|stream| stream.shutdown(Shutdown::Both)) + }); + + result.map_or_else(|_| Err(()), |_| Ok(())) +} + +/// Wait for socket connection or timeout (ipv6) +pub(crate) fn wait_for_socket_connection_v6(port: u16, exp_retry_base: u64) -> Result<(), ()> { + let socket = SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), port); + let result = retry(Exponential::from_millis(exp_retry_base).take(10), || { + TcpStream::connect(socket).map(|stream| stream.shutdown(Shutdown::Both)) + }); + + result.map_or_else(|_| Err(()), |_| Ok(())) +} + +/// Helper extension trait which allows to limit execution time for the futures. +/// It is helpful in tests to ensure that no future will ever get stuck forever. +pub(crate) trait TimeoutFutureExt: Future + Sized { + /// Returns a reasonable value that can be used as a future timeout with a certain + /// degree of confidence that timeout won't be triggered by the test specifics. + fn default_timeout() -> Duration { + // If some future wasn't done in 60 seconds, it's either a poorly written test + // or (most likely) a bug related to some future never actually being completed. + const TIMEOUT_SECONDS: u64 = 60; + Duration::from_secs(TIMEOUT_SECONDS) + } + + /// Adds a fixed timeout to the future. + fn with_default_timeout(self) -> Timeout { + self.with_timeout(Self::default_timeout()) + } + + /// Adds a custom timeout to the future. + fn with_timeout(self, timeout_value: Duration) -> Timeout { + timeout(timeout_value, self) + } +} + +impl TimeoutFutureExt for U where U: Future + Sized {} diff --git a/homestar-runtime/tests/webserver.rs b/homestar-runtime/tests/webserver.rs new file mode 100644 index 00000000..6e3bb537 --- /dev/null +++ b/homestar-runtime/tests/webserver.rs @@ -0,0 +1,195 @@ +use crate::utils::{ + kill_homestar, remove_db, stop_homestar, wait_for_socket_connection, TimeoutFutureExt, BIN_NAME, +}; +use anyhow::Result; +use jsonrpsee::{ + core::client::{Subscription, SubscriptionClientT}, + rpc_params, + ws_client::WsClientBuilder, +}; +use once_cell::sync::Lazy; +use serial_test::file_serial; +use std::{ + fs, + net::Ipv4Addr, + path::PathBuf, + process::{Command, Stdio}, + time::Duration, +}; + +static BIN: Lazy = Lazy::new(|| assert_cmd::cargo::cargo_bin(BIN_NAME)); +const SUBSCRIBE_RUN_WORKFLOW_ENDPOINT: &str = "subscribe_run_workflow"; +const UNSUBSCRIBE_RUN_WORKFLOW_ENDPOINT: &str = "unsubscribe_run_workflow"; +const AWAIT_CID: &str = "bafyrmih5bwjinspvn5ktpcaxqvvxkmhxrznhuu3qqmu45jnpmo3ab72vaq"; + +#[test] +#[file_serial] +fn test_workflow_run_serial() -> Result<()> { + const DB: &str = "ws_homestar_test_workflow_run.db"; + let _ = stop_homestar(); + let _ = fs::remove_file(DB); + + let homestar_proc = Command::new(BIN.as_os_str()) + .arg("start") + .arg("-c") + .arg("tests/fixtures/test_workflow2.toml") + .arg("--db") + .arg(DB) + .stdout(Stdio::piped()) + .spawn() + .unwrap(); + + let ws_port = 8061; + if wait_for_socket_connection(ws_port, 1000).is_err() { + let _ = kill_homestar(homestar_proc, None); + panic!("Homestar server/runtime failed to start in time"); + } + + let ws_url = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port); + + tokio_test::block_on(async { + let workflow_str = + fs::read_to_string("tests/fixtures/test-workflow-image-pipeline.json").unwrap(); + let json: serde_json::Value = serde_json::from_str(&workflow_str).unwrap(); + let json_string = serde_json::to_string(&json).unwrap(); + let run_str = format!(r#"{{"name": "test","workflow": {}}}"#, json_string); + let run: serde_json::Value = serde_json::from_str(&run_str).unwrap(); + + let client1 = WsClientBuilder::default() + .build(ws_url.clone()) + .await + .unwrap(); + + let mut sub1: Subscription> = client1 + .subscribe( + SUBSCRIBE_RUN_WORKFLOW_ENDPOINT, + rpc_params![run.clone()], + UNSUBSCRIBE_RUN_WORKFLOW_ENDPOINT, + ) + .await + .unwrap(); + + // we have 3 operations + let mut received_cids = 0; + loop { + if let Ok(msg) = sub1.next().with_timeout(Duration::from_secs(30)).await { + let json: serde_json::Value = + serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); + let check = json.get("metadata").unwrap(); + let expected = serde_json::json!({"name": "test", "replayed": false, "workflow": {"/": format!("{AWAIT_CID}")}}); + assert_eq!(check, &expected); + received_cids += 1; + } else { + panic!("Node one did not publish receipt in time.") + } + + if received_cids == 3 { + received_cids = 0; + break; + } + } + + // separate subscription, only 3 events too + let mut sub2: Subscription> = client1 + .subscribe( + SUBSCRIBE_RUN_WORKFLOW_ENDPOINT, + rpc_params![run.clone()], + UNSUBSCRIBE_RUN_WORKFLOW_ENDPOINT, + ) + .await + .unwrap(); + + loop { + if let Ok(msg) = sub2.next().with_timeout(Duration::from_secs(30)).await { + let json: serde_json::Value = + serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); + let check = json.get("metadata").unwrap(); + let expected = serde_json::json!({"name": "test", "replayed": true, "workflow": {"/": format!("{AWAIT_CID}")}}); + assert_eq!(check, &expected); + received_cids += 1; + } else { + panic!("Node one did not publish receipt in time.") + } + + if received_cids == 3 { + received_cids = 0; + break; + } + } + + let client2 = WsClientBuilder::default().build(ws_url).await.unwrap(); + let mut sub3: Subscription> = client2 + .subscribe( + SUBSCRIBE_RUN_WORKFLOW_ENDPOINT, + rpc_params![run], + UNSUBSCRIBE_RUN_WORKFLOW_ENDPOINT, + ) + .await + .unwrap(); + + let _ = sub2 + .next() + .with_timeout(Duration::from_secs(10)) + .await + .is_err(); + + loop { + if let Ok(msg) = sub3.next().with_timeout(Duration::from_secs(30)).await { + let json: serde_json::Value = + serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); + let check = json.get("metadata").unwrap(); + let expected = serde_json::json!({"name": "test", "replayed": true, "workflow": {"/": format!("{AWAIT_CID}")}}); + assert_eq!(check, &expected); + received_cids += 1; + } else { + panic!("Node one did not publish receipt in time.") + } + + if received_cids == 3 { + received_cids = 0; + break; + } + } + + let _ = sub3 + .next() + .with_timeout(Duration::from_secs(10)) + .await + .is_err(); + + let another_run_str = format!(r#"{{"name": "another_test","workflow": {}}}"#, json_string); + let another_run: serde_json::Value = serde_json::from_str(&another_run_str).unwrap(); + let mut sub4: Subscription> = client2 + .subscribe( + SUBSCRIBE_RUN_WORKFLOW_ENDPOINT, + rpc_params![another_run], + UNSUBSCRIBE_RUN_WORKFLOW_ENDPOINT, + ) + .await + .unwrap(); + + loop { + if let Ok(msg) = sub4.next().with_timeout(Duration::from_secs(30)).await { + let json: serde_json::Value = + serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); + let check = json.get("metadata").unwrap(); + let expected = serde_json::json!({"name": "another_test", "replayed": true, "workflow": {"/": format!("{AWAIT_CID}")}}); + assert_eq!(check, &expected); + received_cids += 1; + } else { + panic!("Node one did not publish receipt in time.") + } + + if received_cids == 3 { + break; + } + } + }); + + let _ = Command::new(BIN.as_os_str()).arg("stop").output(); + let _ = kill_homestar(homestar_proc, None); + let _ = stop_homestar(); + remove_db(DB); + + Ok(()) +} diff --git a/homestar-wasm/fixtures/example_add.wasm b/homestar-wasm/fixtures/example_add.wasm index ff7d18bc..47bea752 100755 Binary files a/homestar-wasm/fixtures/example_add.wasm and b/homestar-wasm/fixtures/example_add.wasm differ diff --git a/homestar-wasm/fixtures/example_add.wat b/homestar-wasm/fixtures/example_add.wat index 5969f756..042ab78d 100644 --- a/homestar-wasm/fixtures/example_add.wat +++ b/homestar-wasm/fixtures/example_add.wat @@ -8,6 +8,12 @@ (type (;6;) (func (param i32 i32 i32) (result i32))) (func (;0;) (type 0)) (func (;1;) (type 1) (param i32) (result i32) + call 2 + local.get 0 + i32.const 1 + i32.add + ) + (func (;2;) (type 0) block ;; label = @1 i32.const 0 i32.load8_u offset=1049029 @@ -17,11 +23,14 @@ i32.const 1 i32.store8 offset=1049029 end + ) + (func (;3;) (type 1) (param i32) (result i32) + call 2 local.get 0 i32.const 2 i32.add ) - (func (;2;) (type 2) (param i32 i32) (result i32) + (func (;4;) (type 2) (param i32 i32) (result i32) (local i32 i32 i32 i32 i32) i32.const 0 local.set 2 @@ -53,7 +62,7 @@ i32.add i32.const 12 i32.add - call 3 + call 5 local.tee 1 i32.eqz br_if 0 (;@1;) @@ -153,7 +162,7 @@ i32.store offset=4 local.get 2 local.get 1 - call 4 + call 6 br 1 (;@2;) end local.get 2 @@ -216,7 +225,7 @@ i32.store offset=4 local.get 1 local.get 3 - call 4 + call 6 end local.get 0 i32.const 8 @@ -225,7 +234,7 @@ end local.get 2 ) - (func (;3;) (type 1) (param i32) (result i32) + (func (;5;) (type 1) (param i32) (result i32) (local i32 i32 i32 i32 i32 i32 i32 i32 i64) block ;; label = @1 block ;; label = @2 @@ -393,7 +402,7 @@ block ;; label = @6 i32.const 0 i32.load offset=1048984 - local.tee 7 + local.tee 6 i32.const 16 local.get 0 i32.const 11 @@ -423,51 +432,51 @@ i32.and local.get 1 i32.add - local.tee 2 + local.tee 1 i32.const 3 i32.shl - local.tee 5 + local.tee 2 i32.const 1048728 i32.add i32.load local.tee 0 i32.const 8 i32.add - local.tee 6 + local.tee 7 i32.load - local.tee 1 - local.get 5 + local.tee 5 + local.get 2 i32.const 1048720 i32.add - local.tee 5 + local.tee 2 i32.eq br_if 0 (;@8;) - local.get 1 local.get 5 + local.get 2 i32.store offset=12 + local.get 2 local.get 5 - local.get 1 i32.store offset=8 br 1 (;@7;) end i32.const 0 - local.get 7 + local.get 6 i32.const -2 - local.get 2 + local.get 1 i32.rotl i32.and i32.store offset=1048984 end local.get 0 - local.get 2 + local.get 1 i32.const 3 i32.shl - local.tee 2 + local.tee 1 i32.const 3 i32.or i32.store offset=4 local.get 0 - local.get 2 + local.get 1 i32.add local.tee 0 local.get 0 @@ -475,7 +484,7 @@ i32.const 1 i32.or i32.store offset=4 - local.get 6 + local.get 7 return end local.get 2 @@ -498,40 +507,33 @@ i32.eqz br_if 10 (;@2;) local.get 0 - i32.const 0 - local.get 0 - i32.sub - i32.and i32.ctz i32.const 2 i32.shl i32.const 1048576 i32.add i32.load - local.tee 6 + local.tee 7 i32.load offset=4 i32.const -8 i32.and - local.set 1 - block ;; label = @13 - local.get 6 - i32.load offset=16 - local.tee 0 - br_if 0 (;@13;) - local.get 6 - i32.const 20 - i32.add - i32.load - local.set 0 - end - local.get 1 local.get 2 i32.sub local.set 5 block ;; label = @13 - local.get 0 - i32.eqz - br_if 0 (;@13;) + block ;; label = @14 + local.get 7 + i32.load offset=16 + local.tee 0 + br_if 0 (;@14;) + local.get 7 + i32.const 20 + i32.add + i32.load + local.tee 0 + i32.eqz + br_if 1 (;@13;) + end loop ;; label = @14 local.get 0 i32.load offset=4 @@ -542,7 +544,7 @@ local.tee 8 local.get 5 i32.lt_u - local.set 7 + local.set 6 block ;; label = @15 local.get 0 i32.load offset=16 @@ -556,47 +558,47 @@ end local.get 8 local.get 5 - local.get 7 + local.get 6 select local.set 5 local.get 0 - local.get 6 local.get 7 + local.get 6 select - local.set 6 + local.set 7 local.get 1 local.set 0 local.get 1 br_if 0 (;@14;) end end - local.get 6 - call 5 + local.get 7 + call 7 local.get 5 i32.const 16 i32.lt_u br_if 2 (;@10;) - local.get 6 + local.get 7 local.get 2 i32.const 3 i32.or i32.store offset=4 - local.get 6 + local.get 7 local.get 2 i32.add - local.tee 2 + local.tee 1 local.get 5 i32.const 1 i32.or i32.store offset=4 - local.get 2 + local.get 1 local.get 5 i32.add local.get 5 i32.store i32.const 0 i32.load offset=1048992 - local.tee 7 + local.tee 6 br_if 1 (;@11;) br 5 (;@7;) end @@ -617,16 +619,11 @@ local.get 1 i32.shl i32.and - local.tee 0 - i32.const 0 - local.get 0 - i32.sub - i32.and i32.ctz local.tee 1 i32.const 3 i32.shl - local.tee 6 + local.tee 7 i32.const 1048728 i32.add i32.load @@ -636,22 +633,22 @@ local.tee 8 i32.load local.tee 5 - local.get 6 + local.get 7 i32.const 1048720 i32.add - local.tee 6 + local.tee 7 i32.eq br_if 0 (;@13;) local.get 5 - local.get 6 + local.get 7 i32.store offset=12 - local.get 6 + local.get 7 local.get 5 i32.store offset=8 br 1 (;@12;) end i32.const 0 - local.get 7 + local.get 6 i32.const -2 local.get 1 i32.rotl @@ -666,34 +663,34 @@ local.get 0 local.get 2 i32.add - local.tee 7 + local.tee 6 local.get 1 i32.const 3 i32.shl - local.tee 1 + local.tee 5 local.get 2 i32.sub - local.tee 2 + local.tee 1 i32.const 1 i32.or i32.store offset=4 local.get 0 - local.get 1 + local.get 5 i32.add - local.get 2 + local.get 1 i32.store i32.const 0 i32.load offset=1048992 - local.tee 5 + local.tee 2 br_if 2 (;@9;) br 3 (;@8;) end - local.get 7 + local.get 6 i32.const -8 i32.and i32.const 1048720 i32.add - local.set 1 + local.set 2 i32.const 0 i32.load offset=1049000 local.set 0 @@ -703,42 +700,41 @@ i32.load offset=1048984 local.tee 8 i32.const 1 - local.get 7 + local.get 6 i32.const 3 i32.shr_u i32.shl - local.tee 7 + local.tee 6 i32.and - i32.eqz br_if 0 (;@12;) - local.get 1 - i32.load offset=8 - local.set 7 + i32.const 0 + local.get 8 + local.get 6 + i32.or + i32.store offset=1048984 + local.get 2 + local.set 6 br 1 (;@11;) end - i32.const 0 - local.get 8 - local.get 7 - i32.or - i32.store offset=1048984 - local.get 1 - local.set 7 + local.get 2 + i32.load offset=8 + local.set 6 end - local.get 1 + local.get 2 local.get 0 i32.store offset=8 - local.get 7 + local.get 6 local.get 0 i32.store offset=12 local.get 0 - local.get 1 + local.get 2 i32.store offset=12 local.get 0 - local.get 7 + local.get 6 i32.store offset=8 br 3 (;@7;) end - local.get 6 + local.get 7 local.get 5 local.get 2 i32.add @@ -746,7 +742,7 @@ i32.const 3 i32.or i32.store offset=4 - local.get 6 + local.get 7 local.get 0 i32.add local.tee 0 @@ -757,12 +753,12 @@ i32.store offset=4 br 3 (;@6;) end - local.get 5 + local.get 2 i32.const -8 i32.and i32.const 1048720 i32.add - local.set 1 + local.set 5 i32.const 0 i32.load offset=1049000 local.set 0 @@ -770,59 +766,58 @@ block ;; label = @10 i32.const 0 i32.load offset=1048984 - local.tee 6 + local.tee 7 i32.const 1 - local.get 5 + local.get 2 i32.const 3 i32.shr_u i32.shl - local.tee 5 + local.tee 2 i32.and - i32.eqz br_if 0 (;@10;) - local.get 1 - i32.load offset=8 - local.set 5 + i32.const 0 + local.get 7 + local.get 2 + i32.or + i32.store offset=1048984 + local.get 5 + local.set 2 br 1 (;@9;) end - i32.const 0 - local.get 6 local.get 5 - i32.or - i32.store offset=1048984 - local.get 1 - local.set 5 + i32.load offset=8 + local.set 2 end - local.get 1 + local.get 5 local.get 0 i32.store offset=8 - local.get 5 + local.get 2 local.get 0 i32.store offset=12 local.get 0 - local.get 1 + local.get 5 i32.store offset=12 local.get 0 - local.get 5 + local.get 2 i32.store offset=8 end i32.const 0 - local.get 7 + local.get 6 i32.store offset=1049000 i32.const 0 - local.get 2 + local.get 1 i32.store offset=1048992 local.get 8 return end i32.const 0 - local.get 2 + local.get 1 i32.store offset=1049000 i32.const 0 local.get 5 i32.store offset=1048992 end - local.get 6 + local.get 7 i32.const 8 i32.add return @@ -848,10 +843,6 @@ i32.eqz br_if 3 (;@2;) local.get 0 - i32.const 0 - local.get 0 - i32.sub - i32.and i32.ctz i32.const 2 i32.shl @@ -865,21 +856,30 @@ br_if 1 (;@3;) end loop ;; label = @4 + local.get 0 + local.get 6 local.get 0 i32.load offset=4 i32.const -8 i32.and local.tee 5 local.get 2 - i32.ge_u - local.get 5 - local.get 2 i32.sub local.tee 8 local.get 1 i32.lt_u - i32.and + local.tee 4 + select + local.set 3 + local.get 5 + local.get 2 + i32.lt_u local.set 7 + local.get 8 + local.get 1 + local.get 4 + select + local.set 8 block ;; label = @5 local.get 0 i32.load offset=16 @@ -891,13 +891,13 @@ i32.load local.set 5 end - local.get 0 local.get 6 + local.get 3 local.get 7 select local.set 6 - local.get 8 local.get 1 + local.get 8 local.get 7 select local.set 1 @@ -925,7 +925,7 @@ br_if 1 (;@2;) end local.get 6 - call 5 + call 7 block ;; label = @3 block ;; label = @4 local.get 1 @@ -957,7 +957,7 @@ br_if 0 (;@5;) local.get 0 local.get 1 - call 6 + call 8 br 2 (;@3;) end local.get 1 @@ -965,12 +965,12 @@ i32.and i32.const 1048720 i32.add - local.set 2 + local.set 5 block ;; label = @5 block ;; label = @6 i32.const 0 i32.load offset=1048984 - local.tee 5 + local.tee 2 i32.const 1 local.get 1 i32.const 3 @@ -978,29 +978,28 @@ i32.shl local.tee 1 i32.and - i32.eqz br_if 0 (;@6;) + i32.const 0 local.get 2 - i32.load offset=8 + local.get 1 + i32.or + i32.store offset=1048984 + local.get 5 local.set 1 br 1 (;@5;) end - i32.const 0 local.get 5 - local.get 1 - i32.or - i32.store offset=1048984 - local.get 2 + i32.load offset=8 local.set 1 end - local.get 2 + local.get 5 local.get 0 i32.store offset=8 local.get 1 local.get 0 i32.store offset=12 local.get 0 - local.get 2 + local.get 5 i32.store offset=12 local.get 0 local.get 1 @@ -1038,887 +1037,884 @@ block ;; label = @7 block ;; label = @8 block ;; label = @9 + i32.const 0 + i32.load offset=1048992 + local.tee 0 + local.get 2 + i32.ge_u + br_if 0 (;@9;) block ;; label = @10 + i32.const 0 + i32.load offset=1048996 + local.tee 0 + local.get 2 + i32.gt_u + br_if 0 (;@10;) + i32.const 0 + local.set 1 + local.get 2 + i32.const 65583 + i32.add + local.tee 5 + i32.const 16 + i32.shr_u + memory.grow + local.tee 0 + i32.const -1 + i32.eq + local.tee 7 + br_if 9 (;@1;) + local.get 0 + i32.const 16 + i32.shl + local.tee 6 + i32.eqz + br_if 9 (;@1;) + i32.const 0 + i32.const 0 + i32.load offset=1049008 + i32.const 0 + local.get 5 + i32.const -65536 + i32.and + local.get 7 + select + local.tee 8 + i32.add + local.tee 0 + i32.store offset=1049008 + i32.const 0 + i32.const 0 + i32.load offset=1049012 + local.tee 1 + local.get 0 + local.get 1 + local.get 0 + i32.gt_u + select + i32.store offset=1049012 block ;; label = @11 - i32.const 0 - i32.load offset=1048992 - local.tee 0 - local.get 2 - i32.ge_u - br_if 0 (;@11;) block ;; label = @12 - i32.const 0 - i32.load offset=1048996 - local.tee 0 - local.get 2 - i32.gt_u - br_if 0 (;@12;) - i32.const 0 - local.set 1 - local.get 2 - i32.const 65583 - i32.add - local.tee 5 - i32.const 16 - i32.shr_u - memory.grow - local.tee 0 - i32.const -1 - i32.eq - local.tee 6 - br_if 11 (;@1;) - local.get 0 - i32.const 16 - i32.shl - local.tee 7 - i32.eqz - br_if 11 (;@1;) - i32.const 0 - i32.const 0 - i32.load offset=1049008 - i32.const 0 - local.get 5 - i32.const -65536 - i32.and - local.get 6 - select - local.tee 8 - i32.add - local.tee 0 - i32.store offset=1049008 - i32.const 0 - i32.const 0 - i32.load offset=1049012 - local.tee 1 - local.get 0 - local.get 1 - local.get 0 - i32.gt_u - select - i32.store offset=1049012 + block ;; label = @13 + i32.const 0 + i32.load offset=1049004 + local.tee 1 + i32.eqz + br_if 0 (;@13;) + i32.const 1048704 + local.set 0 + loop ;; label = @14 + local.get 0 + i32.load + local.tee 5 + local.get 0 + i32.load offset=4 + local.tee 7 + i32.add + local.get 6 + i32.eq + br_if 2 (;@12;) + local.get 0 + i32.load offset=8 + local.tee 0 + br_if 0 (;@14;) + br 3 (;@11;) + end + end block ;; label = @13 block ;; label = @14 - block ;; label = @15 - i32.const 0 - i32.load offset=1049004 - local.tee 1 - i32.eqz - br_if 0 (;@15;) - i32.const 1048704 - local.set 0 - loop ;; label = @16 - local.get 0 - i32.load - local.tee 5 - local.get 0 - i32.load offset=4 - local.tee 6 - i32.add - local.get 7 - i32.eq - br_if 2 (;@14;) - local.get 0 - i32.load offset=8 - local.tee 0 - br_if 0 (;@16;) - br 3 (;@13;) - end - end i32.const 0 i32.load offset=1049020 local.tee 0 i32.eqz - br_if 4 (;@10;) + br_if 0 (;@14;) local.get 0 - local.get 7 - i32.gt_u - br_if 4 (;@10;) - br 11 (;@3;) + local.get 6 + i32.le_u + br_if 1 (;@13;) end - local.get 0 - i32.load offset=12 - br_if 0 (;@13;) - local.get 5 - local.get 1 - i32.gt_u - br_if 0 (;@13;) - local.get 1 - local.get 7 - i32.lt_u - br_if 4 (;@9;) + i32.const 0 + local.get 6 + i32.store offset=1049020 end i32.const 0 + i32.const 4095 + i32.store offset=1049024 + i32.const 0 + local.get 8 + i32.store offset=1048708 + i32.const 0 + local.get 6 + i32.store offset=1048704 + i32.const 0 + i32.const 1048720 + i32.store offset=1048732 + i32.const 0 + i32.const 1048728 + i32.store offset=1048740 + i32.const 0 + i32.const 1048720 + i32.store offset=1048728 + i32.const 0 + i32.const 1048736 + i32.store offset=1048748 + i32.const 0 + i32.const 1048728 + i32.store offset=1048736 + i32.const 0 + i32.const 1048744 + i32.store offset=1048756 + i32.const 0 + i32.const 1048736 + i32.store offset=1048744 + i32.const 0 + i32.const 1048752 + i32.store offset=1048764 + i32.const 0 + i32.const 1048744 + i32.store offset=1048752 + i32.const 0 + i32.const 1048760 + i32.store offset=1048772 + i32.const 0 + i32.const 1048752 + i32.store offset=1048760 + i32.const 0 + i32.const 1048768 + i32.store offset=1048780 + i32.const 0 + i32.const 1048760 + i32.store offset=1048768 + i32.const 0 + i32.const 1048776 + i32.store offset=1048788 + i32.const 0 + i32.const 1048768 + i32.store offset=1048776 + i32.const 0 + i32.const 0 + i32.store offset=1048716 + i32.const 0 + i32.const 1048784 + i32.store offset=1048796 + i32.const 0 + i32.const 1048776 + i32.store offset=1048784 + i32.const 0 + i32.const 1048784 + i32.store offset=1048792 + i32.const 0 + i32.const 1048792 + i32.store offset=1048804 + i32.const 0 + i32.const 1048792 + i32.store offset=1048800 i32.const 0 - i32.load offset=1049020 + i32.const 1048800 + i32.store offset=1048812 + i32.const 0 + i32.const 1048800 + i32.store offset=1048808 + i32.const 0 + i32.const 1048808 + i32.store offset=1048820 + i32.const 0 + i32.const 1048808 + i32.store offset=1048816 + i32.const 0 + i32.const 1048816 + i32.store offset=1048828 + i32.const 0 + i32.const 1048816 + i32.store offset=1048824 + i32.const 0 + i32.const 1048824 + i32.store offset=1048836 + i32.const 0 + i32.const 1048824 + i32.store offset=1048832 + i32.const 0 + i32.const 1048832 + i32.store offset=1048844 + i32.const 0 + i32.const 1048832 + i32.store offset=1048840 + i32.const 0 + i32.const 1048840 + i32.store offset=1048852 + i32.const 0 + i32.const 1048840 + i32.store offset=1048848 + i32.const 0 + i32.const 1048848 + i32.store offset=1048860 + i32.const 0 + i32.const 1048856 + i32.store offset=1048868 + i32.const 0 + i32.const 1048848 + i32.store offset=1048856 + i32.const 0 + i32.const 1048864 + i32.store offset=1048876 + i32.const 0 + i32.const 1048856 + i32.store offset=1048864 + i32.const 0 + i32.const 1048872 + i32.store offset=1048884 + i32.const 0 + i32.const 1048864 + i32.store offset=1048872 + i32.const 0 + i32.const 1048880 + i32.store offset=1048892 + i32.const 0 + i32.const 1048872 + i32.store offset=1048880 + i32.const 0 + i32.const 1048888 + i32.store offset=1048900 + i32.const 0 + i32.const 1048880 + i32.store offset=1048888 + i32.const 0 + i32.const 1048896 + i32.store offset=1048908 + i32.const 0 + i32.const 1048888 + i32.store offset=1048896 + i32.const 0 + i32.const 1048904 + i32.store offset=1048916 + i32.const 0 + i32.const 1048896 + i32.store offset=1048904 + i32.const 0 + i32.const 1048912 + i32.store offset=1048924 + i32.const 0 + i32.const 1048904 + i32.store offset=1048912 + i32.const 0 + i32.const 1048920 + i32.store offset=1048932 + i32.const 0 + i32.const 1048912 + i32.store offset=1048920 + i32.const 0 + i32.const 1048928 + i32.store offset=1048940 + i32.const 0 + i32.const 1048920 + i32.store offset=1048928 + i32.const 0 + i32.const 1048936 + i32.store offset=1048948 + i32.const 0 + i32.const 1048928 + i32.store offset=1048936 + i32.const 0 + i32.const 1048944 + i32.store offset=1048956 + i32.const 0 + i32.const 1048936 + i32.store offset=1048944 + i32.const 0 + i32.const 1048952 + i32.store offset=1048964 + i32.const 0 + i32.const 1048944 + i32.store offset=1048952 + i32.const 0 + i32.const 1048960 + i32.store offset=1048972 + i32.const 0 + i32.const 1048952 + i32.store offset=1048960 + i32.const 0 + i32.const 1048968 + i32.store offset=1048980 + i32.const 0 + i32.const 1048960 + i32.store offset=1048968 + i32.const 0 + local.get 6 + i32.store offset=1049004 + i32.const 0 + i32.const 1048968 + i32.store offset=1048976 + i32.const 0 + local.get 8 + i32.const -40 + i32.add local.tee 0 - local.get 7 + i32.store offset=1048996 + local.get 6 + local.get 0 + i32.const 1 + i32.or + i32.store offset=4 + local.get 6 local.get 0 - local.get 7 - i32.lt_u - select - i32.store offset=1049020 - local.get 7 - local.get 8 i32.add - local.set 5 - i32.const 1048704 - local.set 0 + i32.const 40 + i32.store offset=4 + i32.const 0 + i32.const 2097152 + i32.store offset=1049016 + br 10 (;@2;) + end + local.get 0 + i32.load offset=12 + br_if 0 (;@11;) + local.get 5 + local.get 1 + i32.gt_u + br_if 0 (;@11;) + local.get 1 + local.get 6 + i32.lt_u + br_if 3 (;@8;) + end + i32.const 0 + i32.const 0 + i32.load offset=1049020 + local.tee 0 + local.get 6 + local.get 0 + local.get 6 + i32.lt_u + select + i32.store offset=1049020 + local.get 6 + local.get 8 + i32.add + local.set 5 + i32.const 1048704 + local.set 0 + block ;; label = @11 + block ;; label = @12 block ;; label = @13 - block ;; label = @14 - block ;; label = @15 - loop ;; label = @16 - local.get 0 - i32.load - local.get 5 - i32.eq - br_if 1 (;@15;) - local.get 0 - i32.load offset=8 - local.tee 0 - br_if 0 (;@16;) - br 2 (;@14;) - end - end - local.get 0 - i32.load offset=12 - i32.eqz - br_if 1 (;@13;) - end - i32.const 1048704 - local.set 0 - block ;; label = @14 - loop ;; label = @15 - block ;; label = @16 - local.get 0 - i32.load - local.tee 5 - local.get 1 - i32.gt_u - br_if 0 (;@16;) - local.get 5 - local.get 0 - i32.load offset=4 - i32.add - local.tee 5 - local.get 1 - i32.gt_u - br_if 2 (;@14;) - end - local.get 0 - i32.load offset=8 - local.set 0 - br 0 (;@15;) - end - end - i32.const 0 - local.get 7 - i32.store offset=1049004 - i32.const 0 - local.get 8 - i32.const -40 - i32.add - local.tee 0 - i32.store offset=1048996 - local.get 7 - local.get 0 - i32.const 1 - i32.or - i32.store offset=4 - local.get 7 - local.get 0 - i32.add - i32.const 40 - i32.store offset=4 - i32.const 0 - i32.const 2097152 - i32.store offset=1049016 - local.get 1 - local.get 5 - i32.const -32 - i32.add - i32.const -8 - i32.and - i32.const -8 - i32.add - local.tee 0 - local.get 0 - local.get 1 - i32.const 16 - i32.add - i32.lt_u - select - local.tee 6 - i32.const 27 - i32.store offset=4 - i32.const 0 - i64.load offset=1048704 align=4 - local.set 9 - local.get 6 - i32.const 16 - i32.add - i32.const 0 - i64.load offset=1048712 align=4 - i64.store align=4 - local.get 6 - local.get 9 - i64.store offset=8 align=4 - i32.const 0 - local.get 8 - i32.store offset=1048708 - i32.const 0 - local.get 7 - i32.store offset=1048704 - i32.const 0 - local.get 6 - i32.const 8 - i32.add - i32.store offset=1048712 - i32.const 0 - i32.const 0 - i32.store offset=1048716 - local.get 6 - i32.const 28 - i32.add - local.set 0 loop ;; label = @14 local.get 0 - i32.const 7 - i32.store + i32.load + local.get 5 + i32.eq + br_if 1 (;@13;) local.get 0 - i32.const 4 - i32.add + i32.load offset=8 local.tee 0 - local.get 5 - i32.lt_u br_if 0 (;@14;) + br 2 (;@12;) end - local.get 6 - local.get 1 - i32.eq - br_if 11 (;@2;) - local.get 6 - local.get 6 - i32.load offset=4 - i32.const -2 - i32.and - i32.store offset=4 - local.get 1 - local.get 6 - local.get 1 - i32.sub - local.tee 0 - i32.const 1 - i32.or - i32.store offset=4 - local.get 6 - local.get 0 - i32.store + end + local.get 0 + i32.load offset=12 + i32.eqz + br_if 1 (;@11;) + end + i32.const 1048704 + local.set 0 + block ;; label = @12 + loop ;; label = @13 block ;; label = @14 local.get 0 - i32.const 256 - i32.lt_u - br_if 0 (;@14;) + i32.load + local.tee 5 local.get 1 + i32.gt_u + br_if 0 (;@14;) + local.get 5 local.get 0 - call 6 - br 12 (;@2;) + i32.load offset=4 + i32.add + local.tee 5 + local.get 1 + i32.gt_u + br_if 2 (;@12;) end local.get 0 - i32.const -8 + i32.load offset=8 + local.set 0 + br 0 (;@13;) + end + end + i32.const 0 + local.get 6 + i32.store offset=1049004 + i32.const 0 + local.get 8 + i32.const -40 + i32.add + local.tee 0 + i32.store offset=1048996 + local.get 6 + local.get 0 + i32.const 1 + i32.or + i32.store offset=4 + local.get 6 + local.get 0 + i32.add + i32.const 40 + i32.store offset=4 + i32.const 0 + i32.const 2097152 + i32.store offset=1049016 + local.get 1 + local.get 5 + i32.const -32 + i32.add + i32.const -8 + i32.and + i32.const -8 + i32.add + local.tee 0 + local.get 0 + local.get 1 + i32.const 16 + i32.add + i32.lt_u + select + local.tee 7 + i32.const 27 + i32.store offset=4 + i32.const 0 + i64.load offset=1048704 align=4 + local.set 9 + local.get 7 + i32.const 16 + i32.add + i32.const 0 + i64.load offset=1048712 align=4 + i64.store align=4 + local.get 7 + local.get 9 + i64.store offset=8 align=4 + i32.const 0 + local.get 8 + i32.store offset=1048708 + i32.const 0 + local.get 6 + i32.store offset=1048704 + i32.const 0 + local.get 7 + i32.const 8 + i32.add + i32.store offset=1048712 + i32.const 0 + i32.const 0 + i32.store offset=1048716 + local.get 7 + i32.const 28 + i32.add + local.set 0 + loop ;; label = @12 + local.get 0 + i32.const 7 + i32.store + local.get 0 + i32.const 4 + i32.add + local.tee 0 + local.get 5 + i32.lt_u + br_if 0 (;@12;) + end + local.get 7 + local.get 1 + i32.eq + br_if 9 (;@2;) + local.get 7 + local.get 7 + i32.load offset=4 + i32.const -2 + i32.and + i32.store offset=4 + local.get 1 + local.get 7 + local.get 1 + i32.sub + local.tee 0 + i32.const 1 + i32.or + i32.store offset=4 + local.get 7 + local.get 0 + i32.store + block ;; label = @12 + local.get 0 + i32.const 256 + i32.lt_u + br_if 0 (;@12;) + local.get 1 + local.get 0 + call 8 + br 10 (;@2;) + end + local.get 0 + i32.const -8 + i32.and + i32.const 1048720 + i32.add + local.set 5 + block ;; label = @12 + block ;; label = @13 + i32.const 0 + i32.load offset=1048984 + local.tee 6 + i32.const 1 + local.get 0 + i32.const 3 + i32.shr_u + i32.shl + local.tee 0 i32.and - i32.const 1048720 - i32.add - local.set 5 - block ;; label = @14 - block ;; label = @15 - i32.const 0 - i32.load offset=1048984 - local.tee 7 - i32.const 1 - local.get 0 - i32.const 3 - i32.shr_u - i32.shl - local.tee 0 - i32.and - i32.eqz - br_if 0 (;@15;) - local.get 5 - i32.load offset=8 - local.set 0 - br 1 (;@14;) - end - i32.const 0 - local.get 7 - local.get 0 - i32.or - i32.store offset=1048984 - local.get 5 - local.set 0 - end - local.get 5 - local.get 1 - i32.store offset=8 + br_if 0 (;@13;) + i32.const 0 + local.get 6 local.get 0 - local.get 1 - i32.store offset=12 - local.get 1 + i32.or + i32.store offset=1048984 local.get 5 - i32.store offset=12 - local.get 1 - local.get 0 - i32.store offset=8 - br 11 (;@2;) + local.set 0 + br 1 (;@12;) end - local.get 0 - local.get 7 - i32.store - local.get 0 - local.get 0 - i32.load offset=4 - local.get 8 - i32.add - i32.store offset=4 - local.get 7 - local.get 2 - i32.const 3 - i32.or - i32.store offset=4 local.get 5 - local.get 7 - local.get 2 - i32.add - local.tee 0 - i32.sub - local.set 2 + i32.load offset=8 + local.set 0 + end + local.get 5 + local.get 1 + i32.store offset=8 + local.get 0 + local.get 1 + i32.store offset=12 + local.get 1 + local.get 5 + i32.store offset=12 + local.get 1 + local.get 0 + i32.store offset=8 + br 9 (;@2;) + end + local.get 0 + local.get 6 + i32.store + local.get 0 + local.get 0 + i32.load offset=4 + local.get 8 + i32.add + i32.store offset=4 + local.get 6 + local.get 2 + i32.const 3 + i32.or + i32.store offset=4 + local.get 5 + local.get 6 + local.get 2 + i32.add + local.tee 0 + i32.sub + local.set 1 + local.get 5 + i32.const 0 + i32.load offset=1049004 + i32.eq + br_if 3 (;@7;) + local.get 5 + i32.const 0 + i32.load offset=1049000 + i32.eq + br_if 4 (;@6;) + block ;; label = @11 + local.get 5 + i32.load offset=4 + local.tee 2 + i32.const 3 + i32.and + i32.const 1 + i32.ne + br_if 0 (;@11;) + block ;; label = @12 block ;; label = @13 - local.get 5 - i32.const 0 - i32.load offset=1049004 - i32.eq + local.get 2 + i32.const -8 + i32.and + local.tee 7 + i32.const 256 + i32.lt_u br_if 0 (;@13;) local.get 5 - i32.const 0 - i32.load offset=1049000 - i32.eq - br_if 5 (;@8;) + call 7 + br 1 (;@12;) + end + block ;; label = @13 local.get 5 - i32.load offset=4 - local.tee 1 - i32.const 3 - i32.and - i32.const 1 - i32.ne - br_if 8 (;@5;) - block ;; label = @14 - block ;; label = @15 - local.get 1 - i32.const -8 - i32.and - local.tee 6 - i32.const 256 - i32.lt_u - br_if 0 (;@15;) - local.get 5 - call 5 - br 1 (;@14;) - end - block ;; label = @15 - local.get 5 - i32.const 12 - i32.add - i32.load - local.tee 8 - local.get 5 - i32.const 8 - i32.add - i32.load - local.tee 4 - i32.eq - br_if 0 (;@15;) - local.get 4 - local.get 8 - i32.store offset=12 - local.get 8 - local.get 4 - i32.store offset=8 - br 1 (;@14;) - end - i32.const 0 - i32.const 0 - i32.load offset=1048984 - i32.const -2 - local.get 1 - i32.const 3 - i32.shr_u - i32.rotl - i32.and - i32.store offset=1048984 - end - local.get 6 - local.get 2 + i32.const 12 i32.add - local.set 2 + i32.load + local.tee 8 local.get 5 - local.get 6 + i32.const 8 i32.add - local.tee 5 - i32.load offset=4 - local.set 1 - br 8 (;@5;) + i32.load + local.tee 4 + i32.eq + br_if 0 (;@13;) + local.get 4 + local.get 8 + i32.store offset=12 + local.get 8 + local.get 4 + i32.store offset=8 + br 1 (;@12;) end i32.const 0 - local.get 0 - i32.store offset=1049004 - i32.const 0 i32.const 0 - i32.load offset=1048996 - local.get 2 - i32.add - local.tee 2 - i32.store offset=1048996 - local.get 0 + i32.load offset=1048984 + i32.const -2 local.get 2 - i32.const 1 - i32.or - i32.store offset=4 - br 8 (;@4;) + i32.const 3 + i32.shr_u + i32.rotl + i32.and + i32.store offset=1048984 end - i32.const 0 - local.get 0 - local.get 2 - i32.sub - local.tee 1 - i32.store offset=1048996 - i32.const 0 - i32.const 0 - i32.load offset=1049004 - local.tee 0 - local.get 2 - i32.add - local.tee 5 - i32.store offset=1049004 - local.get 5 + local.get 7 local.get 1 - i32.const 1 - i32.or - i32.store offset=4 - local.get 0 - local.get 2 - i32.const 3 - i32.or - i32.store offset=4 - local.get 0 - i32.const 8 i32.add local.set 1 - br 10 (;@1;) + local.get 5 + local.get 7 + i32.add + local.tee 5 + i32.load offset=4 + local.set 2 end - i32.const 0 - i32.load offset=1049000 - local.set 1 - local.get 0 - local.get 2 - i32.sub - local.tee 5 - i32.const 16 - i32.lt_u - br_if 3 (;@7;) - i32.const 0 local.get 5 - i32.store offset=1048992 - i32.const 0 - local.get 1 local.get 2 - i32.add - local.tee 7 - i32.store offset=1049000 - local.get 7 - local.get 5 + i32.const -2 + i32.and + i32.store offset=4 + local.get 0 + local.get 1 i32.const 1 i32.or i32.store offset=4 - local.get 1 local.get 0 + local.get 1 i32.add - local.get 5 + local.get 1 i32.store + block ;; label = @11 + local.get 1 + i32.const 256 + i32.lt_u + br_if 0 (;@11;) + local.get 0 + local.get 1 + call 8 + br 8 (;@3;) + end local.get 1 - local.get 2 - i32.const 3 - i32.or - i32.store offset=4 - br 4 (;@6;) + i32.const -8 + i32.and + i32.const 1048720 + i32.add + local.set 5 + block ;; label = @11 + block ;; label = @12 + i32.const 0 + i32.load offset=1048984 + local.tee 2 + i32.const 1 + local.get 1 + i32.const 3 + i32.shr_u + i32.shl + local.tee 1 + i32.and + br_if 0 (;@12;) + i32.const 0 + local.get 2 + local.get 1 + i32.or + i32.store offset=1048984 + local.get 5 + local.set 1 + br 1 (;@11;) + end + local.get 5 + i32.load offset=8 + local.set 1 + end + local.get 5 + local.get 0 + i32.store offset=8 + local.get 1 + local.get 0 + i32.store offset=12 + local.get 0 + local.get 5 + i32.store offset=12 + local.get 0 + local.get 1 + i32.store offset=8 + br 7 (;@3;) end i32.const 0 - local.get 7 - i32.store offset=1049020 - br 6 (;@3;) + local.get 0 + local.get 2 + i32.sub + local.tee 1 + i32.store offset=1048996 + i32.const 0 + i32.const 0 + i32.load offset=1049004 + local.tee 0 + local.get 2 + i32.add + local.tee 5 + i32.store offset=1049004 + local.get 5 + local.get 1 + i32.const 1 + i32.or + i32.store offset=4 + local.get 0 + local.get 2 + i32.const 3 + i32.or + i32.store offset=4 + local.get 0 + i32.const 8 + i32.add + local.set 1 + br 8 (;@1;) end + i32.const 0 + i32.load offset=1049000 + local.set 1 local.get 0 - local.get 6 - local.get 8 - i32.add - i32.store offset=4 + local.get 2 + i32.sub + local.tee 5 + i32.const 16 + i32.lt_u + br_if 3 (;@5;) i32.const 0 - i32.load offset=1049004 + local.get 5 + i32.store offset=1048992 i32.const 0 - i32.load offset=1048996 - local.get 8 + local.get 1 + local.get 2 i32.add - call 7 - br 6 (;@2;) + local.tee 6 + i32.store offset=1049000 + local.get 6 + local.get 5 + i32.const 1 + i32.or + i32.store offset=4 + local.get 1 + local.get 0 + i32.add + local.get 5 + i32.store + local.get 1 + local.get 2 + i32.const 3 + i32.or + i32.store offset=4 + br 4 (;@4;) end - i32.const 0 local.get 0 - i32.store offset=1049000 - i32.const 0 - i32.const 0 - i32.load offset=1048992 - local.get 2 + local.get 7 + local.get 8 i32.add - local.tee 2 - i32.store offset=1048992 - local.get 0 - local.get 2 - i32.const 1 - i32.or i32.store offset=4 - local.get 0 - local.get 2 + i32.const 0 + i32.load offset=1049004 + i32.const 0 + i32.load offset=1048996 + local.get 8 i32.add - local.get 2 - i32.store - br 3 (;@4;) + call 9 + br 5 (;@2;) end i32.const 0 - i32.const 0 - i32.store offset=1049000 + local.get 0 + i32.store offset=1049004 i32.const 0 i32.const 0 - i32.store offset=1048992 + i32.load offset=1048996 local.get 1 - local.get 0 - i32.const 3 - i32.or - i32.store offset=4 - local.get 1 - local.get 0 i32.add - local.tee 0 + local.tee 1 + i32.store offset=1048996 local.get 0 - i32.load offset=4 + local.get 1 i32.const 1 i32.or i32.store offset=4 + br 3 (;@3;) end + i32.const 0 + local.get 0 + i32.store offset=1049000 + i32.const 0 + i32.const 0 + i32.load offset=1048992 local.get 1 - i32.const 8 i32.add - return - end - local.get 5 - local.get 1 - i32.const -2 - i32.and - i32.store offset=4 - local.get 0 - local.get 2 - i32.const 1 - i32.or - i32.store offset=4 - local.get 0 - local.get 2 - i32.add - local.get 2 - i32.store - block ;; label = @5 - local.get 2 - i32.const 256 - i32.lt_u - br_if 0 (;@5;) + local.tee 1 + i32.store offset=1048992 local.get 0 - local.get 2 - call 6 - br 1 (;@4;) - end - local.get 2 - i32.const -8 - i32.and - i32.const 1048720 - i32.add - local.set 1 - block ;; label = @5 - block ;; label = @6 - i32.const 0 - i32.load offset=1048984 - local.tee 5 - i32.const 1 - local.get 2 - i32.const 3 - i32.shr_u - i32.shl - local.tee 2 - i32.and - i32.eqz - br_if 0 (;@6;) - local.get 1 - i32.load offset=8 - local.set 2 - br 1 (;@5;) - end - i32.const 0 - local.get 5 - local.get 2 + local.get 1 + i32.const 1 i32.or - i32.store offset=1048984 + i32.store offset=4 + local.get 0 local.get 1 - local.set 2 + i32.add + local.get 1 + i32.store + br 2 (;@3;) end + i32.const 0 + i32.const 0 + i32.store offset=1049000 + i32.const 0 + i32.const 0 + i32.store offset=1048992 local.get 1 local.get 0 - i32.store offset=8 - local.get 2 - local.get 0 - i32.store offset=12 - local.get 0 + i32.const 3 + i32.or + i32.store offset=4 local.get 1 - i32.store offset=12 local.get 0 - local.get 2 - i32.store offset=8 + i32.add + local.tee 0 + local.get 0 + i32.load offset=4 + i32.const 1 + i32.or + i32.store offset=4 end - local.get 7 + local.get 1 i32.const 8 i32.add return - end - i32.const 0 - i32.const 4095 - i32.store offset=1049024 - i32.const 0 - local.get 8 - i32.store offset=1048708 - i32.const 0 - local.get 7 - i32.store offset=1048704 - i32.const 0 - i32.const 1048720 - i32.store offset=1048732 - i32.const 0 - i32.const 1048728 - i32.store offset=1048740 - i32.const 0 - i32.const 1048720 - i32.store offset=1048728 - i32.const 0 - i32.const 1048736 - i32.store offset=1048748 - i32.const 0 - i32.const 1048728 - i32.store offset=1048736 - i32.const 0 - i32.const 1048744 - i32.store offset=1048756 - i32.const 0 - i32.const 1048736 - i32.store offset=1048744 - i32.const 0 - i32.const 1048752 - i32.store offset=1048764 - i32.const 0 - i32.const 1048744 - i32.store offset=1048752 - i32.const 0 - i32.const 1048760 - i32.store offset=1048772 - i32.const 0 - i32.const 1048752 - i32.store offset=1048760 - i32.const 0 - i32.const 1048768 - i32.store offset=1048780 - i32.const 0 - i32.const 1048760 - i32.store offset=1048768 - i32.const 0 - i32.const 1048776 - i32.store offset=1048788 - i32.const 0 - i32.const 1048768 - i32.store offset=1048776 - i32.const 0 - i32.const 0 - i32.store offset=1048716 - i32.const 0 - i32.const 1048784 - i32.store offset=1048796 - i32.const 0 - i32.const 1048776 - i32.store offset=1048784 - i32.const 0 - i32.const 1048784 - i32.store offset=1048792 - i32.const 0 - i32.const 1048792 - i32.store offset=1048804 - i32.const 0 - i32.const 1048792 - i32.store offset=1048800 - i32.const 0 - i32.const 1048800 - i32.store offset=1048812 - i32.const 0 - i32.const 1048800 - i32.store offset=1048808 - i32.const 0 - i32.const 1048808 - i32.store offset=1048820 - i32.const 0 - i32.const 1048808 - i32.store offset=1048816 - i32.const 0 - i32.const 1048816 - i32.store offset=1048828 - i32.const 0 - i32.const 1048816 - i32.store offset=1048824 - i32.const 0 - i32.const 1048824 - i32.store offset=1048836 - i32.const 0 - i32.const 1048824 - i32.store offset=1048832 - i32.const 0 - i32.const 1048832 - i32.store offset=1048844 - i32.const 0 - i32.const 1048832 - i32.store offset=1048840 - i32.const 0 - i32.const 1048840 - i32.store offset=1048852 - i32.const 0 - i32.const 1048840 - i32.store offset=1048848 - i32.const 0 - i32.const 1048848 - i32.store offset=1048860 - i32.const 0 - i32.const 1048856 - i32.store offset=1048868 - i32.const 0 - i32.const 1048848 - i32.store offset=1048856 - i32.const 0 - i32.const 1048864 - i32.store offset=1048876 - i32.const 0 - i32.const 1048856 - i32.store offset=1048864 - i32.const 0 - i32.const 1048872 - i32.store offset=1048884 - i32.const 0 - i32.const 1048864 - i32.store offset=1048872 - i32.const 0 - i32.const 1048880 - i32.store offset=1048892 - i32.const 0 - i32.const 1048872 - i32.store offset=1048880 - i32.const 0 - i32.const 1048888 - i32.store offset=1048900 - i32.const 0 - i32.const 1048880 - i32.store offset=1048888 - i32.const 0 - i32.const 1048896 - i32.store offset=1048908 - i32.const 0 - i32.const 1048888 - i32.store offset=1048896 - i32.const 0 - i32.const 1048904 - i32.store offset=1048916 - i32.const 0 - i32.const 1048896 - i32.store offset=1048904 - i32.const 0 - i32.const 1048912 - i32.store offset=1048924 - i32.const 0 - i32.const 1048904 - i32.store offset=1048912 - i32.const 0 - i32.const 1048920 - i32.store offset=1048932 - i32.const 0 - i32.const 1048912 - i32.store offset=1048920 - i32.const 0 - i32.const 1048928 - i32.store offset=1048940 - i32.const 0 - i32.const 1048920 - i32.store offset=1048928 - i32.const 0 - i32.const 1048936 - i32.store offset=1048948 - i32.const 0 - i32.const 1048928 - i32.store offset=1048936 - i32.const 0 - i32.const 1048944 - i32.store offset=1048956 - i32.const 0 - i32.const 1048936 - i32.store offset=1048944 - i32.const 0 - i32.const 1048952 - i32.store offset=1048964 - i32.const 0 - i32.const 1048944 - i32.store offset=1048952 - i32.const 0 - i32.const 1048960 - i32.store offset=1048972 - i32.const 0 - i32.const 1048952 - i32.store offset=1048960 - i32.const 0 - i32.const 1048968 - i32.store offset=1048980 - i32.const 0 - i32.const 1048960 - i32.store offset=1048968 - i32.const 0 - local.get 7 - i32.store offset=1049004 - i32.const 0 - i32.const 1048968 - i32.store offset=1048976 - i32.const 0 - local.get 8 - i32.const -40 - i32.add - local.tee 0 - i32.store offset=1048996 - local.get 7 - local.get 0 - i32.const 1 - i32.or - i32.store offset=4 - local.get 7 - local.get 0 + end + local.get 6 + i32.const 8 i32.add - i32.const 40 - i32.store offset=4 - i32.const 0 - i32.const 2097152 - i32.store offset=1049016 + return end i32.const 0 local.set 1 @@ -1959,7 +1955,7 @@ end local.get 1 ) - (func (;4;) (type 3) (param i32 i32) + (func (;6;) (type 3) (param i32 i32) (local i32 i32 i32 i32) local.get 0 local.get 1 @@ -2026,7 +2022,7 @@ i32.lt_u br_if 0 (;@4;) local.get 0 - call 5 + call 7 br 1 (;@3;) end block ;; label = @4 @@ -2061,92 +2057,99 @@ i32.and i32.store offset=1048984 end - block ;; label = @3 - local.get 2 - i32.load offset=4 - local.tee 3 - i32.const 2 - i32.and - i32.eqz - br_if 0 (;@3;) - local.get 2 - local.get 3 - i32.const -2 - i32.and - i32.store offset=4 - local.get 0 - local.get 1 - i32.const 1 - i32.or - i32.store offset=4 - local.get 0 - local.get 1 - i32.add - local.get 1 - i32.store - br 2 (;@1;) - end block ;; label = @3 block ;; label = @4 - local.get 2 - i32.const 0 - i32.load offset=1049004 - i32.eq - br_if 0 (;@4;) - local.get 2 - i32.const 0 - i32.load offset=1049000 - i32.eq - br_if 1 (;@3;) - local.get 3 - i32.const -8 - i32.and - local.tee 4 - local.get 1 - i32.add - local.set 1 block ;; label = @5 - block ;; label = @6 - local.get 4 - i32.const 256 - i32.lt_u - br_if 0 (;@6;) - local.get 2 - call 5 - br 1 (;@5;) - end - block ;; label = @6 - local.get 2 - i32.const 12 - i32.add - i32.load - local.tee 4 - local.get 2 - i32.const 8 - i32.add - i32.load - local.tee 2 - i32.eq - br_if 0 (;@6;) - local.get 2 - local.get 4 - i32.store offset=12 - local.get 4 - local.get 2 - i32.store offset=8 - br 1 (;@5;) - end + local.get 2 + i32.load offset=4 + local.tee 3 + i32.const 2 + i32.and + br_if 0 (;@5;) + local.get 2 i32.const 0 + i32.load offset=1049004 + i32.eq + br_if 2 (;@3;) + local.get 2 i32.const 0 - i32.load offset=1048984 - i32.const -2 + i32.load offset=1049000 + i32.eq + br_if 4 (;@1;) local.get 3 - i32.const 3 - i32.shr_u - i32.rotl + i32.const -8 i32.and - i32.store offset=1048984 + local.tee 4 + local.get 1 + i32.add + local.set 1 + block ;; label = @6 + block ;; label = @7 + local.get 4 + i32.const 256 + i32.lt_u + br_if 0 (;@7;) + local.get 2 + call 7 + br 1 (;@6;) + end + block ;; label = @7 + local.get 2 + i32.const 12 + i32.add + i32.load + local.tee 4 + local.get 2 + i32.const 8 + i32.add + i32.load + local.tee 2 + i32.eq + br_if 0 (;@7;) + local.get 2 + local.get 4 + i32.store offset=12 + local.get 4 + local.get 2 + i32.store offset=8 + br 1 (;@6;) + end + i32.const 0 + i32.const 0 + i32.load offset=1048984 + i32.const -2 + local.get 3 + i32.const 3 + i32.shr_u + i32.rotl + i32.and + i32.store offset=1048984 + end + local.get 0 + local.get 1 + i32.const 1 + i32.or + i32.store offset=4 + local.get 0 + local.get 1 + i32.add + local.get 1 + i32.store + local.get 0 + i32.const 0 + i32.load offset=1049000 + i32.ne + br_if 1 (;@4;) + i32.const 0 + local.get 1 + i32.store offset=1048992 + return end + local.get 2 + local.get 3 + i32.const -2 + i32.and + i32.store offset=4 local.get 0 local.get 1 i32.const 1 @@ -2157,125 +2160,114 @@ i32.add local.get 1 i32.store + end + block ;; label = @4 + local.get 1 + i32.const 256 + i32.lt_u + br_if 0 (;@4;) local.get 0 - i32.const 0 - i32.load offset=1049000 - i32.ne - br_if 3 (;@1;) - i32.const 0 local.get 1 - i32.store offset=1048992 - br 2 (;@2;) + call 8 + return end - i32.const 0 - local.get 0 - i32.store offset=1049004 - i32.const 0 - i32.const 0 - i32.load offset=1048996 local.get 1 + i32.const -8 + i32.and + i32.const 1048720 i32.add - local.tee 1 - i32.store offset=1048996 + local.set 2 + block ;; label = @4 + block ;; label = @5 + i32.const 0 + i32.load offset=1048984 + local.tee 3 + i32.const 1 + local.get 1 + i32.const 3 + i32.shr_u + i32.shl + local.tee 1 + i32.and + br_if 0 (;@5;) + i32.const 0 + local.get 3 + local.get 1 + i32.or + i32.store offset=1048984 + local.get 2 + local.set 1 + br 1 (;@4;) + end + local.get 2 + i32.load offset=8 + local.set 1 + end + local.get 2 local.get 0 + i32.store offset=8 local.get 1 - i32.const 1 - i32.or - i32.store offset=4 local.get 0 - i32.const 0 - i32.load offset=1049000 - i32.ne - br_if 1 (;@2;) - i32.const 0 - i32.const 0 - i32.store offset=1048992 - i32.const 0 - i32.const 0 - i32.store offset=1049000 + i32.store offset=12 + local.get 0 + local.get 2 + i32.store offset=12 + local.get 0 + local.get 1 + i32.store offset=8 return end i32.const 0 local.get 0 - i32.store offset=1049000 + i32.store offset=1049004 i32.const 0 i32.const 0 - i32.load offset=1048992 + i32.load offset=1048996 local.get 1 i32.add local.tee 1 - i32.store offset=1048992 - local.get 0 - local.get 1 - i32.const 1 - i32.or - i32.store offset=4 + i32.store offset=1048996 local.get 0 local.get 1 - i32.add - local.get 1 - i32.store - return - end - return - end - block ;; label = @1 - local.get 1 - i32.const 256 - i32.lt_u - br_if 0 (;@1;) - local.get 0 - local.get 1 - call 6 - return - end - local.get 1 - i32.const -8 - i32.and - i32.const 1048720 - i32.add - local.set 2 - block ;; label = @1 - block ;; label = @2 - i32.const 0 - i32.load offset=1048984 - local.tee 3 i32.const 1 - local.get 1 - i32.const 3 - i32.shr_u - i32.shl - local.tee 1 - i32.and - i32.eqz + i32.or + i32.store offset=4 + local.get 0 + i32.const 0 + i32.load offset=1049000 + i32.ne br_if 0 (;@2;) - local.get 2 - i32.load offset=8 - local.set 1 - br 1 (;@1;) + i32.const 0 + i32.const 0 + i32.store offset=1048992 + i32.const 0 + i32.const 0 + i32.store offset=1049000 end - i32.const 0 - local.get 3 - local.get 1 - i32.or - i32.store offset=1048984 - local.get 2 - local.set 1 + return end - local.get 2 + i32.const 0 local.get 0 - i32.store offset=8 + i32.store offset=1049000 + i32.const 0 + i32.const 0 + i32.load offset=1048992 local.get 1 + i32.add + local.tee 1 + i32.store offset=1048992 local.get 0 - i32.store offset=12 - local.get 0 - local.get 2 - i32.store offset=12 + local.get 1 + i32.const 1 + i32.or + i32.store offset=4 local.get 0 local.get 1 - i32.store offset=8 + i32.add + local.get 1 + i32.store ) - (func (;5;) (type 4) (param i32) + (func (;7;) (type 4) (param i32) (local i32 i32 i32 i32 i32) local.get 0 i32.load offset=24 @@ -2436,7 +2428,7 @@ return end ) - (func (;6;) (type 3) (param i32 i32) + (func (;8;) (type 3) (param i32 i32) (local i32 i32 i32 i32) i32.const 31 local.set 2 @@ -2477,45 +2469,44 @@ i32.add local.set 3 block ;; label = @1 + block ;; label = @2 + i32.const 0 + i32.load offset=1048988 + local.tee 4 + i32.const 1 + local.get 2 + i32.shl + local.tee 5 + i32.and + br_if 0 (;@2;) + i32.const 0 + local.get 4 + local.get 5 + i32.or + i32.store offset=1048988 + local.get 3 + local.get 0 + i32.store + local.get 0 + local.get 3 + i32.store offset=24 + br 1 (;@1;) + end block ;; label = @2 block ;; label = @3 block ;; label = @4 - block ;; label = @5 - i32.const 0 - i32.load offset=1048988 - local.tee 4 - i32.const 1 - local.get 2 - i32.shl - local.tee 5 - i32.and - i32.eqz - br_if 0 (;@5;) - local.get 3 - i32.load - local.tee 4 - i32.load offset=4 - i32.const -8 - i32.and - local.get 1 - i32.ne - br_if 1 (;@4;) - local.get 4 - local.set 2 - br 2 (;@3;) - end - i32.const 0 - local.get 4 - local.get 5 - i32.or - i32.store offset=1048988 - local.get 3 - local.get 0 - i32.store - local.get 0 local.get 3 - i32.store offset=24 - br 3 (;@1;) + i32.load + local.tee 4 + i32.load offset=4 + i32.const -8 + i32.and + local.get 1 + i32.ne + br_if 0 (;@4;) + local.get 4 + local.set 2 + br 1 (;@3;) end local.get 1 i32.const 0 @@ -2595,7 +2586,7 @@ local.get 0 i32.store offset=8 ) - (func (;7;) (type 3) (param i32 i32) + (func (;9;) (type 3) (param i32 i32) (local i32 i32) i32.const 0 local.get 0 @@ -2606,6 +2597,7 @@ local.tee 2 i32.const -8 i32.add + local.tee 3 i32.store offset=1049004 i32.const 0 local.get 0 @@ -2615,15 +2607,13 @@ i32.add i32.const 8 i32.add - local.tee 3 + local.tee 2 i32.store offset=1048996 - local.get 2 - i32.const -4 - i32.add local.get 3 + local.get 2 i32.const 1 i32.or - i32.store + i32.store offset=4 local.get 0 local.get 1 i32.add @@ -2633,7 +2623,7 @@ i32.const 2097152 i32.store offset=1049016 ) - (func (;8;) (type 4) (param i32) + (func (;10;) (type 4) (param i32) (local i32 i32 i32 i32 i32) local.get 0 i32.const -8 @@ -2651,139 +2641,118 @@ local.set 3 block ;; label = @1 block ;; label = @2 - local.get 2 - i32.const 1 - i32.and - br_if 0 (;@2;) - local.get 2 - i32.const 3 - i32.and - i32.eqz - br_if 1 (;@1;) - local.get 1 - i32.load - local.tee 2 - local.get 0 - i32.add - local.set 0 - block ;; label = @3 - local.get 1 - local.get 2 - i32.sub - local.tee 1 - i32.const 0 - i32.load offset=1049000 - i32.ne - br_if 0 (;@3;) - local.get 3 - i32.load offset=4 - i32.const 3 - i32.and - i32.const 3 - i32.ne - br_if 1 (;@2;) - i32.const 0 - local.get 0 - i32.store offset=1048992 - local.get 3 - local.get 3 - i32.load offset=4 - i32.const -2 - i32.and - i32.store offset=4 - local.get 1 - local.get 0 - i32.const 1 - i32.or - i32.store offset=4 - local.get 1 - local.get 0 - i32.add - local.get 0 - i32.store - return - end - block ;; label = @3 - local.get 2 - i32.const 256 - i32.lt_u - br_if 0 (;@3;) - local.get 1 - call 5 - br 1 (;@2;) - end - block ;; label = @3 - local.get 1 - i32.const 12 - i32.add - i32.load - local.tee 4 - local.get 1 - i32.const 8 - i32.add - i32.load - local.tee 5 - i32.eq - br_if 0 (;@3;) - local.get 5 - local.get 4 - i32.store offset=12 - local.get 4 - local.get 5 - i32.store offset=8 - br 1 (;@2;) - end - i32.const 0 - i32.const 0 - i32.load offset=1048984 - i32.const -2 - local.get 2 - i32.const 3 - i32.shr_u - i32.rotl - i32.and - i32.store offset=1048984 - end - block ;; label = @2 - block ;; label = @3 - local.get 3 - i32.load offset=4 - local.tee 2 - i32.const 2 - i32.and - i32.eqz - br_if 0 (;@3;) - local.get 3 - local.get 2 - i32.const -2 - i32.and - i32.store offset=4 - local.get 1 - local.get 0 - i32.const 1 - i32.or - i32.store offset=4 - local.get 1 - local.get 0 - i32.add - local.get 0 - i32.store - br 1 (;@2;) - end block ;; label = @3 + block ;; label = @4 + local.get 2 + i32.const 1 + i32.and + br_if 0 (;@4;) + local.get 2 + i32.const 3 + i32.and + i32.eqz + br_if 1 (;@3;) + local.get 1 + i32.load + local.tee 2 + local.get 0 + i32.add + local.set 0 + block ;; label = @5 + local.get 1 + local.get 2 + i32.sub + local.tee 1 + i32.const 0 + i32.load offset=1049000 + i32.ne + br_if 0 (;@5;) + local.get 3 + i32.load offset=4 + i32.const 3 + i32.and + i32.const 3 + i32.ne + br_if 1 (;@4;) + i32.const 0 + local.get 0 + i32.store offset=1048992 + local.get 3 + local.get 3 + i32.load offset=4 + i32.const -2 + i32.and + i32.store offset=4 + local.get 1 + local.get 0 + i32.const 1 + i32.or + i32.store offset=4 + local.get 3 + local.get 0 + i32.store + return + end + block ;; label = @5 + local.get 2 + i32.const 256 + i32.lt_u + br_if 0 (;@5;) + local.get 1 + call 7 + br 1 (;@4;) + end + block ;; label = @5 + local.get 1 + i32.const 12 + i32.add + i32.load + local.tee 4 + local.get 1 + i32.const 8 + i32.add + i32.load + local.tee 5 + i32.eq + br_if 0 (;@5;) + local.get 5 + local.get 4 + i32.store offset=12 + local.get 4 + local.get 5 + i32.store offset=8 + br 1 (;@4;) + end + i32.const 0 + i32.const 0 + i32.load offset=1048984 + i32.const -2 + local.get 2 + i32.const 3 + i32.shr_u + i32.rotl + i32.and + i32.store offset=1048984 + end block ;; label = @4 block ;; label = @5 block ;; label = @6 + local.get 3 + i32.load offset=4 + local.tee 2 + i32.const 2 + i32.and + br_if 0 (;@6;) local.get 3 i32.const 0 i32.load offset=1049004 i32.eq - br_if 0 (;@6;) + br_if 2 (;@4;) local.get 3 i32.const 0 i32.load offset=1049000 i32.eq - br_if 1 (;@5;) + br_if 5 (;@1;) local.get 2 i32.const -8 i32.and @@ -2798,7 +2767,7 @@ i32.lt_u br_if 0 (;@8;) local.get 3 - call 5 + call 7 br 1 (;@7;) end block ;; label = @8 @@ -2847,133 +2816,126 @@ i32.const 0 i32.load offset=1049000 i32.ne - br_if 4 (;@2;) + br_if 1 (;@5;) i32.const 0 local.get 0 i32.store offset=1048992 return end - i32.const 0 - local.get 1 - i32.store offset=1049004 - i32.const 0 - i32.const 0 - i32.load offset=1048996 - local.get 0 - i32.add - local.tee 0 - i32.store offset=1048996 + local.get 3 + local.get 2 + i32.const -2 + i32.and + i32.store offset=4 local.get 1 local.get 0 i32.const 1 i32.or i32.store offset=4 local.get 1 - i32.const 0 - i32.load offset=1049000 - i32.eq - br_if 1 (;@4;) - br 2 (;@3;) + local.get 0 + i32.add + local.get 0 + i32.store end - i32.const 0 + local.get 0 + i32.const 256 + i32.lt_u + br_if 2 (;@2;) local.get 1 - i32.store offset=1049000 + local.get 0 + call 8 i32.const 0 i32.const 0 - i32.load offset=1048992 - local.get 0 + i32.load offset=1049024 + i32.const -1 i32.add - local.tee 0 - i32.store offset=1048992 - local.get 1 - local.get 0 - i32.const 1 - i32.or - i32.store offset=4 + local.tee 1 + i32.store offset=1049024 local.get 1 - local.get 0 - i32.add - local.get 0 - i32.store + br_if 1 (;@3;) + call 11 return end i32.const 0 + local.get 1 + i32.store offset=1049004 i32.const 0 - i32.store offset=1048992 i32.const 0 + i32.load offset=1048996 + local.get 0 + i32.add + local.tee 0 + i32.store offset=1048996 + local.get 1 + local.get 0 + i32.const 1 + i32.or + i32.store offset=4 + block ;; label = @4 + local.get 1 + i32.const 0 + i32.load offset=1049000 + i32.ne + br_if 0 (;@4;) + i32.const 0 + i32.const 0 + i32.store offset=1048992 + i32.const 0 + i32.const 0 + i32.store offset=1049000 + end + local.get 0 i32.const 0 - i32.store offset=1049000 - end - local.get 0 - i32.const 0 - i32.load offset=1049016 - i32.le_u - br_if 1 (;@1;) - i32.const 0 - i32.load offset=1049004 - local.tee 0 - i32.eqz - br_if 1 (;@1;) - block ;; label = @3 + i32.load offset=1049016 + i32.le_u + br_if 0 (;@3;) i32.const 0 - i32.load offset=1048996 - i32.const 41 - i32.lt_u + i32.load offset=1049004 + local.tee 0 + i32.eqz br_if 0 (;@3;) - i32.const 1048704 - local.set 1 - loop ;; label = @4 - block ;; label = @5 + block ;; label = @4 + i32.const 0 + i32.load offset=1048996 + i32.const 41 + i32.lt_u + br_if 0 (;@4;) + i32.const 1048704 + local.set 1 + loop ;; label = @5 + block ;; label = @6 + local.get 1 + i32.load + local.tee 3 + local.get 0 + i32.gt_u + br_if 0 (;@6;) + local.get 3 + local.get 1 + i32.load offset=4 + i32.add + local.get 0 + i32.gt_u + br_if 2 (;@4;) + end local.get 1 - i32.load - local.tee 3 - local.get 0 - i32.gt_u + i32.load offset=8 + local.tee 1 br_if 0 (;@5;) - local.get 3 - local.get 1 - i32.load offset=4 - i32.add - local.get 0 - i32.gt_u - br_if 2 (;@3;) end - local.get 1 - i32.load offset=8 - local.tee 1 - br_if 0 (;@4;) end + call 11 + i32.const 0 + i32.load offset=1048996 + i32.const 0 + i32.load offset=1049016 + i32.le_u + br_if 0 (;@3;) + i32.const 0 + i32.const -1 + i32.store offset=1049016 end - call 9 - i32.const 0 - i32.load offset=1048996 - i32.const 0 - i32.load offset=1049016 - i32.le_u - br_if 1 (;@1;) - i32.const 0 - i32.const -1 - i32.store offset=1049016 - return - end - block ;; label = @2 - local.get 0 - i32.const 256 - i32.lt_u - br_if 0 (;@2;) - local.get 1 - local.get 0 - call 6 - i32.const 0 - i32.const 0 - i32.load offset=1049024 - i32.const -1 - i32.add - local.tee 1 - i32.store offset=1049024 - local.get 1 - br_if 1 (;@1;) - call 9 return end local.get 0 @@ -2994,19 +2956,18 @@ i32.shl local.tee 0 i32.and - i32.eqz br_if 0 (;@3;) + i32.const 0 + local.get 2 + local.get 0 + i32.or + i32.store offset=1048984 local.get 3 - i32.load offset=8 local.set 0 br 1 (;@2;) end - i32.const 0 - local.get 2 - local.get 0 - i32.or - i32.store offset=1048984 local.get 3 + i32.load offset=8 local.set 0 end local.get 3 @@ -3021,9 +2982,30 @@ local.get 1 local.get 0 i32.store offset=8 + return end + i32.const 0 + local.get 1 + i32.store offset=1049000 + i32.const 0 + i32.const 0 + i32.load offset=1048992 + local.get 0 + i32.add + local.tee 0 + i32.store offset=1048992 + local.get 1 + local.get 0 + i32.const 1 + i32.or + i32.store offset=4 + local.get 1 + local.get 0 + i32.add + local.get 0 + i32.store ) - (func (;9;) (type 0) + (func (;11;) (type 0) (local i32 i32) i32.const 0 local.set 0 @@ -3055,7 +3037,7 @@ select i32.store offset=1049024 ) - (func (;10;) (type 5) (param i32 i32 i32 i32) (result i32) + (func (;12;) (type 5) (param i32 i32 i32 i32) (result i32) (local i32 i32 i32 i32 i32) block ;; label = @1 block ;; label = @2 @@ -3079,7 +3061,7 @@ br_if 0 (;@8;) local.get 2 local.get 3 - call 2 + call 4 local.tee 2 i32.eqz br_if 5 (;@3;) @@ -3091,10 +3073,10 @@ local.get 3 i32.lt_u select - call 12 + call 14 local.set 3 local.get 0 - call 8 + call 10 local.get 3 return end @@ -3137,15 +3119,15 @@ local.get 0 i32.const -8 i32.add - local.set 6 + local.tee 6 + local.get 2 + i32.add + local.set 7 local.get 2 local.get 1 i32.ge_u br_if 1 (;@13;) - local.get 6 - local.get 2 - i32.add - local.tee 7 + local.get 7 i32.const 0 i32.load offset=1049004 i32.eq @@ -3180,7 +3162,7 @@ i32.lt_u br_if 2 (;@12;) local.get 7 - call 5 + call 7 br 3 (;@11;) end local.get 1 @@ -3225,18 +3207,15 @@ i32.const 3 i32.or i32.store offset=4 - local.get 1 - local.get 3 - i32.add - local.tee 2 - local.get 2 + local.get 7 + local.get 7 i32.load offset=4 i32.const 1 i32.or i32.store offset=4 local.get 1 local.get 3 - call 4 + call 6 local.get 0 return end @@ -3294,8 +3273,8 @@ i32.const 3 i32.or i32.store offset=4 - local.get 1 - local.get 3 + local.get 6 + local.get 2 i32.add local.tee 2 local.get 2 @@ -3305,7 +3284,7 @@ i32.store offset=4 local.get 1 local.get 3 - call 4 + call 6 local.get 0 return end @@ -3367,8 +3346,8 @@ i32.const 1 i32.or i32.store offset=4 - local.get 1 - local.get 3 + local.get 6 + local.get 2 i32.add local.tee 2 local.get 3 @@ -3399,7 +3378,7 @@ br_if 7 (;@1;) end local.get 3 - call 3 + call 5 local.tee 1 i32.eqz br_if 4 (;@3;) @@ -3423,10 +3402,10 @@ local.get 3 i32.lt_u select - call 12 + call 14 local.set 3 local.get 0 - call 8 + call 10 local.get 3 return end @@ -3439,7 +3418,7 @@ br_if 1 (;@5;) local.get 2 local.get 3 - call 2 + call 4 local.set 0 br 2 (;@4;) end @@ -3465,7 +3444,7 @@ br 3 (;@2;) end local.get 3 - call 3 + call 5 local.set 0 end local.get 0 @@ -3505,13 +3484,13 @@ i32.store offset=1049004 local.get 0 ) - (func (;11;) (type 6) (param i32 i32 i32) (result i32) + (func (;13;) (type 6) (param i32 i32 i32) (result i32) (local i32 i32 i32 i32 i32 i32 i32 i32) block ;; label = @1 block ;; label = @2 local.get 2 - i32.const 15 - i32.gt_u + i32.const 16 + i32.ge_u br_if 0 (;@2;) local.get 0 local.set 3 @@ -3686,19 +3665,20 @@ end local.get 0 ) - (func (;12;) (type 6) (param i32 i32 i32) (result i32) + (func (;14;) (type 6) (param i32 i32 i32) (result i32) local.get 0 local.get 1 local.get 2 - call 11 + call 13 ) (memory (;0;) 17) (global (;0;) (mut i32) i32.const 1048576) (global (;1;) i32 i32.const 1049030) (global (;2;) i32 i32.const 1049040) (export "memory" (memory 0)) - (export "add-two" (func 1)) - (export "cabi_realloc" (func 10)) + (export "add-one" (func 1)) + (export "add-two" (func 3)) + (export "cabi_realloc" (func 12)) (export "__data_end" (global 1)) (export "__heap_base" (global 2)) ) diff --git a/homestar-wasm/fixtures/example_add_component.wasm b/homestar-wasm/fixtures/example_add_component.wasm index 808a4a67..4d0f6c69 100644 Binary files a/homestar-wasm/fixtures/example_add_component.wasm and b/homestar-wasm/fixtures/example_add_component.wasm differ diff --git a/homestar-wasm/fixtures/example_add_component.wat b/homestar-wasm/fixtures/example_add_component.wat index a2e56260..95d691cd 100644 --- a/homestar-wasm/fixtures/example_add_component.wat +++ b/homestar-wasm/fixtures/example_add_component.wat @@ -9,6 +9,12 @@ (type (;6;) (func (param i32 i32 i32) (result i32))) (func (;0;) (type 0)) (func (;1;) (type 1) (param i32) (result i32) + call 2 + local.get 0 + i32.const 1 + i32.add + ) + (func (;2;) (type 0) block ;; label = @1 i32.const 0 i32.load8_u offset=1049029 @@ -18,11 +24,14 @@ i32.const 1 i32.store8 offset=1049029 end + ) + (func (;3;) (type 1) (param i32) (result i32) + call 2 local.get 0 i32.const 2 i32.add ) - (func (;2;) (type 2) (param i32 i32) (result i32) + (func (;4;) (type 2) (param i32 i32) (result i32) (local i32 i32 i32 i32 i32) i32.const 0 local.set 2 @@ -54,7 +63,7 @@ i32.add i32.const 12 i32.add - call 3 + call 5 local.tee 1 i32.eqz br_if 0 (;@1;) @@ -154,7 +163,7 @@ i32.store offset=4 local.get 2 local.get 1 - call 4 + call 6 br 1 (;@2;) end local.get 2 @@ -217,7 +226,7 @@ i32.store offset=4 local.get 1 local.get 3 - call 4 + call 6 end local.get 0 i32.const 8 @@ -226,7 +235,7 @@ end local.get 2 ) - (func (;3;) (type 1) (param i32) (result i32) + (func (;5;) (type 1) (param i32) (result i32) (local i32 i32 i32 i32 i32 i32 i32 i32 i64) block ;; label = @1 block ;; label = @2 @@ -394,7 +403,7 @@ block ;; label = @6 i32.const 0 i32.load offset=1048984 - local.tee 7 + local.tee 6 i32.const 16 local.get 0 i32.const 11 @@ -424,51 +433,51 @@ i32.and local.get 1 i32.add - local.tee 2 + local.tee 1 i32.const 3 i32.shl - local.tee 5 + local.tee 2 i32.const 1048728 i32.add i32.load local.tee 0 i32.const 8 i32.add - local.tee 6 + local.tee 7 i32.load - local.tee 1 - local.get 5 + local.tee 5 + local.get 2 i32.const 1048720 i32.add - local.tee 5 + local.tee 2 i32.eq br_if 0 (;@8;) - local.get 1 local.get 5 + local.get 2 i32.store offset=12 + local.get 2 local.get 5 - local.get 1 i32.store offset=8 br 1 (;@7;) end i32.const 0 - local.get 7 + local.get 6 i32.const -2 - local.get 2 + local.get 1 i32.rotl i32.and i32.store offset=1048984 end local.get 0 - local.get 2 + local.get 1 i32.const 3 i32.shl - local.tee 2 + local.tee 1 i32.const 3 i32.or i32.store offset=4 local.get 0 - local.get 2 + local.get 1 i32.add local.tee 0 local.get 0 @@ -476,7 +485,7 @@ i32.const 1 i32.or i32.store offset=4 - local.get 6 + local.get 7 return end local.get 2 @@ -499,40 +508,33 @@ i32.eqz br_if 10 (;@2;) local.get 0 - i32.const 0 - local.get 0 - i32.sub - i32.and i32.ctz i32.const 2 i32.shl i32.const 1048576 i32.add i32.load - local.tee 6 + local.tee 7 i32.load offset=4 i32.const -8 i32.and - local.set 1 - block ;; label = @13 - local.get 6 - i32.load offset=16 - local.tee 0 - br_if 0 (;@13;) - local.get 6 - i32.const 20 - i32.add - i32.load - local.set 0 - end - local.get 1 local.get 2 i32.sub local.set 5 block ;; label = @13 - local.get 0 - i32.eqz - br_if 0 (;@13;) + block ;; label = @14 + local.get 7 + i32.load offset=16 + local.tee 0 + br_if 0 (;@14;) + local.get 7 + i32.const 20 + i32.add + i32.load + local.tee 0 + i32.eqz + br_if 1 (;@13;) + end loop ;; label = @14 local.get 0 i32.load offset=4 @@ -543,7 +545,7 @@ local.tee 8 local.get 5 i32.lt_u - local.set 7 + local.set 6 block ;; label = @15 local.get 0 i32.load offset=16 @@ -557,47 +559,47 @@ end local.get 8 local.get 5 - local.get 7 + local.get 6 select local.set 5 local.get 0 - local.get 6 local.get 7 + local.get 6 select - local.set 6 + local.set 7 local.get 1 local.set 0 local.get 1 br_if 0 (;@14;) end end - local.get 6 - call 5 + local.get 7 + call 7 local.get 5 i32.const 16 i32.lt_u br_if 2 (;@10;) - local.get 6 + local.get 7 local.get 2 i32.const 3 i32.or i32.store offset=4 - local.get 6 + local.get 7 local.get 2 i32.add - local.tee 2 + local.tee 1 local.get 5 i32.const 1 i32.or i32.store offset=4 - local.get 2 + local.get 1 local.get 5 i32.add local.get 5 i32.store i32.const 0 i32.load offset=1048992 - local.tee 7 + local.tee 6 br_if 1 (;@11;) br 5 (;@7;) end @@ -618,16 +620,11 @@ local.get 1 i32.shl i32.and - local.tee 0 - i32.const 0 - local.get 0 - i32.sub - i32.and i32.ctz local.tee 1 i32.const 3 i32.shl - local.tee 6 + local.tee 7 i32.const 1048728 i32.add i32.load @@ -637,22 +634,22 @@ local.tee 8 i32.load local.tee 5 - local.get 6 + local.get 7 i32.const 1048720 i32.add - local.tee 6 + local.tee 7 i32.eq br_if 0 (;@13;) local.get 5 - local.get 6 + local.get 7 i32.store offset=12 - local.get 6 + local.get 7 local.get 5 i32.store offset=8 br 1 (;@12;) end i32.const 0 - local.get 7 + local.get 6 i32.const -2 local.get 1 i32.rotl @@ -667,34 +664,34 @@ local.get 0 local.get 2 i32.add - local.tee 7 + local.tee 6 local.get 1 i32.const 3 i32.shl - local.tee 1 + local.tee 5 local.get 2 i32.sub - local.tee 2 + local.tee 1 i32.const 1 i32.or i32.store offset=4 local.get 0 - local.get 1 + local.get 5 i32.add - local.get 2 + local.get 1 i32.store i32.const 0 i32.load offset=1048992 - local.tee 5 + local.tee 2 br_if 2 (;@9;) br 3 (;@8;) end - local.get 7 + local.get 6 i32.const -8 i32.and i32.const 1048720 i32.add - local.set 1 + local.set 2 i32.const 0 i32.load offset=1049000 local.set 0 @@ -704,42 +701,41 @@ i32.load offset=1048984 local.tee 8 i32.const 1 - local.get 7 + local.get 6 i32.const 3 i32.shr_u i32.shl - local.tee 7 + local.tee 6 i32.and - i32.eqz br_if 0 (;@12;) - local.get 1 - i32.load offset=8 - local.set 7 + i32.const 0 + local.get 8 + local.get 6 + i32.or + i32.store offset=1048984 + local.get 2 + local.set 6 br 1 (;@11;) end - i32.const 0 - local.get 8 - local.get 7 - i32.or - i32.store offset=1048984 - local.get 1 - local.set 7 + local.get 2 + i32.load offset=8 + local.set 6 end - local.get 1 + local.get 2 local.get 0 i32.store offset=8 - local.get 7 + local.get 6 local.get 0 i32.store offset=12 local.get 0 - local.get 1 + local.get 2 i32.store offset=12 local.get 0 - local.get 7 + local.get 6 i32.store offset=8 br 3 (;@7;) end - local.get 6 + local.get 7 local.get 5 local.get 2 i32.add @@ -747,7 +743,7 @@ i32.const 3 i32.or i32.store offset=4 - local.get 6 + local.get 7 local.get 0 i32.add local.tee 0 @@ -758,12 +754,12 @@ i32.store offset=4 br 3 (;@6;) end - local.get 5 + local.get 2 i32.const -8 i32.and i32.const 1048720 i32.add - local.set 1 + local.set 5 i32.const 0 i32.load offset=1049000 local.set 0 @@ -771,59 +767,58 @@ block ;; label = @10 i32.const 0 i32.load offset=1048984 - local.tee 6 + local.tee 7 i32.const 1 - local.get 5 + local.get 2 i32.const 3 i32.shr_u i32.shl - local.tee 5 + local.tee 2 i32.and - i32.eqz br_if 0 (;@10;) - local.get 1 - i32.load offset=8 - local.set 5 + i32.const 0 + local.get 7 + local.get 2 + i32.or + i32.store offset=1048984 + local.get 5 + local.set 2 br 1 (;@9;) end - i32.const 0 - local.get 6 local.get 5 - i32.or - i32.store offset=1048984 - local.get 1 - local.set 5 + i32.load offset=8 + local.set 2 end - local.get 1 + local.get 5 local.get 0 i32.store offset=8 - local.get 5 + local.get 2 local.get 0 i32.store offset=12 local.get 0 - local.get 1 + local.get 5 i32.store offset=12 local.get 0 - local.get 5 + local.get 2 i32.store offset=8 end i32.const 0 - local.get 7 + local.get 6 i32.store offset=1049000 i32.const 0 - local.get 2 + local.get 1 i32.store offset=1048992 local.get 8 return end i32.const 0 - local.get 2 + local.get 1 i32.store offset=1049000 i32.const 0 local.get 5 i32.store offset=1048992 end - local.get 6 + local.get 7 i32.const 8 i32.add return @@ -849,10 +844,6 @@ i32.eqz br_if 3 (;@2;) local.get 0 - i32.const 0 - local.get 0 - i32.sub - i32.and i32.ctz i32.const 2 i32.shl @@ -866,21 +857,30 @@ br_if 1 (;@3;) end loop ;; label = @4 + local.get 0 + local.get 6 local.get 0 i32.load offset=4 i32.const -8 i32.and local.tee 5 local.get 2 - i32.ge_u - local.get 5 - local.get 2 i32.sub local.tee 8 local.get 1 i32.lt_u - i32.and + local.tee 4 + select + local.set 3 + local.get 5 + local.get 2 + i32.lt_u local.set 7 + local.get 8 + local.get 1 + local.get 4 + select + local.set 8 block ;; label = @5 local.get 0 i32.load offset=16 @@ -892,13 +892,13 @@ i32.load local.set 5 end - local.get 0 local.get 6 + local.get 3 local.get 7 select local.set 6 - local.get 8 local.get 1 + local.get 8 local.get 7 select local.set 1 @@ -926,7 +926,7 @@ br_if 1 (;@2;) end local.get 6 - call 5 + call 7 block ;; label = @3 block ;; label = @4 local.get 1 @@ -958,7 +958,7 @@ br_if 0 (;@5;) local.get 0 local.get 1 - call 6 + call 8 br 2 (;@3;) end local.get 1 @@ -966,12 +966,12 @@ i32.and i32.const 1048720 i32.add - local.set 2 + local.set 5 block ;; label = @5 block ;; label = @6 i32.const 0 i32.load offset=1048984 - local.tee 5 + local.tee 2 i32.const 1 local.get 1 i32.const 3 @@ -979,29 +979,28 @@ i32.shl local.tee 1 i32.and - i32.eqz br_if 0 (;@6;) + i32.const 0 local.get 2 - i32.load offset=8 + local.get 1 + i32.or + i32.store offset=1048984 + local.get 5 local.set 1 br 1 (;@5;) end - i32.const 0 local.get 5 - local.get 1 - i32.or - i32.store offset=1048984 - local.get 2 + i32.load offset=8 local.set 1 end - local.get 2 + local.get 5 local.get 0 i32.store offset=8 local.get 1 local.get 0 i32.store offset=12 local.get 0 - local.get 2 + local.get 5 i32.store offset=12 local.get 0 local.get 1 @@ -1039,887 +1038,884 @@ block ;; label = @7 block ;; label = @8 block ;; label = @9 + i32.const 0 + i32.load offset=1048992 + local.tee 0 + local.get 2 + i32.ge_u + br_if 0 (;@9;) block ;; label = @10 + i32.const 0 + i32.load offset=1048996 + local.tee 0 + local.get 2 + i32.gt_u + br_if 0 (;@10;) + i32.const 0 + local.set 1 + local.get 2 + i32.const 65583 + i32.add + local.tee 5 + i32.const 16 + i32.shr_u + memory.grow + local.tee 0 + i32.const -1 + i32.eq + local.tee 7 + br_if 9 (;@1;) + local.get 0 + i32.const 16 + i32.shl + local.tee 6 + i32.eqz + br_if 9 (;@1;) + i32.const 0 + i32.const 0 + i32.load offset=1049008 + i32.const 0 + local.get 5 + i32.const -65536 + i32.and + local.get 7 + select + local.tee 8 + i32.add + local.tee 0 + i32.store offset=1049008 + i32.const 0 + i32.const 0 + i32.load offset=1049012 + local.tee 1 + local.get 0 + local.get 1 + local.get 0 + i32.gt_u + select + i32.store offset=1049012 block ;; label = @11 - i32.const 0 - i32.load offset=1048992 - local.tee 0 - local.get 2 - i32.ge_u - br_if 0 (;@11;) block ;; label = @12 - i32.const 0 - i32.load offset=1048996 - local.tee 0 - local.get 2 - i32.gt_u - br_if 0 (;@12;) - i32.const 0 - local.set 1 - local.get 2 - i32.const 65583 - i32.add - local.tee 5 - i32.const 16 - i32.shr_u - memory.grow - local.tee 0 - i32.const -1 - i32.eq - local.tee 6 - br_if 11 (;@1;) - local.get 0 - i32.const 16 - i32.shl - local.tee 7 - i32.eqz - br_if 11 (;@1;) - i32.const 0 - i32.const 0 - i32.load offset=1049008 - i32.const 0 - local.get 5 - i32.const -65536 - i32.and - local.get 6 - select - local.tee 8 - i32.add - local.tee 0 - i32.store offset=1049008 - i32.const 0 - i32.const 0 - i32.load offset=1049012 - local.tee 1 - local.get 0 - local.get 1 - local.get 0 - i32.gt_u - select - i32.store offset=1049012 + block ;; label = @13 + i32.const 0 + i32.load offset=1049004 + local.tee 1 + i32.eqz + br_if 0 (;@13;) + i32.const 1048704 + local.set 0 + loop ;; label = @14 + local.get 0 + i32.load + local.tee 5 + local.get 0 + i32.load offset=4 + local.tee 7 + i32.add + local.get 6 + i32.eq + br_if 2 (;@12;) + local.get 0 + i32.load offset=8 + local.tee 0 + br_if 0 (;@14;) + br 3 (;@11;) + end + end block ;; label = @13 block ;; label = @14 - block ;; label = @15 - i32.const 0 - i32.load offset=1049004 - local.tee 1 - i32.eqz - br_if 0 (;@15;) - i32.const 1048704 - local.set 0 - loop ;; label = @16 - local.get 0 - i32.load - local.tee 5 - local.get 0 - i32.load offset=4 - local.tee 6 - i32.add - local.get 7 - i32.eq - br_if 2 (;@14;) - local.get 0 - i32.load offset=8 - local.tee 0 - br_if 0 (;@16;) - br 3 (;@13;) - end - end i32.const 0 i32.load offset=1049020 local.tee 0 i32.eqz - br_if 4 (;@10;) + br_if 0 (;@14;) local.get 0 - local.get 7 - i32.gt_u - br_if 4 (;@10;) - br 11 (;@3;) + local.get 6 + i32.le_u + br_if 1 (;@13;) end - local.get 0 - i32.load offset=12 - br_if 0 (;@13;) - local.get 5 - local.get 1 - i32.gt_u - br_if 0 (;@13;) - local.get 1 - local.get 7 - i32.lt_u - br_if 4 (;@9;) + i32.const 0 + local.get 6 + i32.store offset=1049020 end i32.const 0 + i32.const 4095 + i32.store offset=1049024 + i32.const 0 + local.get 8 + i32.store offset=1048708 + i32.const 0 + local.get 6 + i32.store offset=1048704 + i32.const 0 + i32.const 1048720 + i32.store offset=1048732 + i32.const 0 + i32.const 1048728 + i32.store offset=1048740 + i32.const 0 + i32.const 1048720 + i32.store offset=1048728 + i32.const 0 + i32.const 1048736 + i32.store offset=1048748 + i32.const 0 + i32.const 1048728 + i32.store offset=1048736 + i32.const 0 + i32.const 1048744 + i32.store offset=1048756 + i32.const 0 + i32.const 1048736 + i32.store offset=1048744 + i32.const 0 + i32.const 1048752 + i32.store offset=1048764 + i32.const 0 + i32.const 1048744 + i32.store offset=1048752 + i32.const 0 + i32.const 1048760 + i32.store offset=1048772 + i32.const 0 + i32.const 1048752 + i32.store offset=1048760 + i32.const 0 + i32.const 1048768 + i32.store offset=1048780 + i32.const 0 + i32.const 1048760 + i32.store offset=1048768 + i32.const 0 + i32.const 1048776 + i32.store offset=1048788 + i32.const 0 + i32.const 1048768 + i32.store offset=1048776 + i32.const 0 + i32.const 0 + i32.store offset=1048716 + i32.const 0 + i32.const 1048784 + i32.store offset=1048796 + i32.const 0 + i32.const 1048776 + i32.store offset=1048784 + i32.const 0 + i32.const 1048784 + i32.store offset=1048792 + i32.const 0 + i32.const 1048792 + i32.store offset=1048804 + i32.const 0 + i32.const 1048792 + i32.store offset=1048800 i32.const 0 - i32.load offset=1049020 + i32.const 1048800 + i32.store offset=1048812 + i32.const 0 + i32.const 1048800 + i32.store offset=1048808 + i32.const 0 + i32.const 1048808 + i32.store offset=1048820 + i32.const 0 + i32.const 1048808 + i32.store offset=1048816 + i32.const 0 + i32.const 1048816 + i32.store offset=1048828 + i32.const 0 + i32.const 1048816 + i32.store offset=1048824 + i32.const 0 + i32.const 1048824 + i32.store offset=1048836 + i32.const 0 + i32.const 1048824 + i32.store offset=1048832 + i32.const 0 + i32.const 1048832 + i32.store offset=1048844 + i32.const 0 + i32.const 1048832 + i32.store offset=1048840 + i32.const 0 + i32.const 1048840 + i32.store offset=1048852 + i32.const 0 + i32.const 1048840 + i32.store offset=1048848 + i32.const 0 + i32.const 1048848 + i32.store offset=1048860 + i32.const 0 + i32.const 1048856 + i32.store offset=1048868 + i32.const 0 + i32.const 1048848 + i32.store offset=1048856 + i32.const 0 + i32.const 1048864 + i32.store offset=1048876 + i32.const 0 + i32.const 1048856 + i32.store offset=1048864 + i32.const 0 + i32.const 1048872 + i32.store offset=1048884 + i32.const 0 + i32.const 1048864 + i32.store offset=1048872 + i32.const 0 + i32.const 1048880 + i32.store offset=1048892 + i32.const 0 + i32.const 1048872 + i32.store offset=1048880 + i32.const 0 + i32.const 1048888 + i32.store offset=1048900 + i32.const 0 + i32.const 1048880 + i32.store offset=1048888 + i32.const 0 + i32.const 1048896 + i32.store offset=1048908 + i32.const 0 + i32.const 1048888 + i32.store offset=1048896 + i32.const 0 + i32.const 1048904 + i32.store offset=1048916 + i32.const 0 + i32.const 1048896 + i32.store offset=1048904 + i32.const 0 + i32.const 1048912 + i32.store offset=1048924 + i32.const 0 + i32.const 1048904 + i32.store offset=1048912 + i32.const 0 + i32.const 1048920 + i32.store offset=1048932 + i32.const 0 + i32.const 1048912 + i32.store offset=1048920 + i32.const 0 + i32.const 1048928 + i32.store offset=1048940 + i32.const 0 + i32.const 1048920 + i32.store offset=1048928 + i32.const 0 + i32.const 1048936 + i32.store offset=1048948 + i32.const 0 + i32.const 1048928 + i32.store offset=1048936 + i32.const 0 + i32.const 1048944 + i32.store offset=1048956 + i32.const 0 + i32.const 1048936 + i32.store offset=1048944 + i32.const 0 + i32.const 1048952 + i32.store offset=1048964 + i32.const 0 + i32.const 1048944 + i32.store offset=1048952 + i32.const 0 + i32.const 1048960 + i32.store offset=1048972 + i32.const 0 + i32.const 1048952 + i32.store offset=1048960 + i32.const 0 + i32.const 1048968 + i32.store offset=1048980 + i32.const 0 + i32.const 1048960 + i32.store offset=1048968 + i32.const 0 + local.get 6 + i32.store offset=1049004 + i32.const 0 + i32.const 1048968 + i32.store offset=1048976 + i32.const 0 + local.get 8 + i32.const -40 + i32.add local.tee 0 - local.get 7 + i32.store offset=1048996 + local.get 6 + local.get 0 + i32.const 1 + i32.or + i32.store offset=4 + local.get 6 local.get 0 - local.get 7 - i32.lt_u - select - i32.store offset=1049020 - local.get 7 - local.get 8 i32.add - local.set 5 - i32.const 1048704 - local.set 0 + i32.const 40 + i32.store offset=4 + i32.const 0 + i32.const 2097152 + i32.store offset=1049016 + br 10 (;@2;) + end + local.get 0 + i32.load offset=12 + br_if 0 (;@11;) + local.get 5 + local.get 1 + i32.gt_u + br_if 0 (;@11;) + local.get 1 + local.get 6 + i32.lt_u + br_if 3 (;@8;) + end + i32.const 0 + i32.const 0 + i32.load offset=1049020 + local.tee 0 + local.get 6 + local.get 0 + local.get 6 + i32.lt_u + select + i32.store offset=1049020 + local.get 6 + local.get 8 + i32.add + local.set 5 + i32.const 1048704 + local.set 0 + block ;; label = @11 + block ;; label = @12 block ;; label = @13 - block ;; label = @14 - block ;; label = @15 - loop ;; label = @16 - local.get 0 - i32.load - local.get 5 - i32.eq - br_if 1 (;@15;) - local.get 0 - i32.load offset=8 - local.tee 0 - br_if 0 (;@16;) - br 2 (;@14;) - end - end - local.get 0 - i32.load offset=12 - i32.eqz - br_if 1 (;@13;) - end - i32.const 1048704 - local.set 0 - block ;; label = @14 - loop ;; label = @15 - block ;; label = @16 - local.get 0 - i32.load - local.tee 5 - local.get 1 - i32.gt_u - br_if 0 (;@16;) - local.get 5 - local.get 0 - i32.load offset=4 - i32.add - local.tee 5 - local.get 1 - i32.gt_u - br_if 2 (;@14;) - end - local.get 0 - i32.load offset=8 - local.set 0 - br 0 (;@15;) - end - end - i32.const 0 - local.get 7 - i32.store offset=1049004 - i32.const 0 - local.get 8 - i32.const -40 - i32.add - local.tee 0 - i32.store offset=1048996 - local.get 7 - local.get 0 - i32.const 1 - i32.or - i32.store offset=4 - local.get 7 - local.get 0 - i32.add - i32.const 40 - i32.store offset=4 - i32.const 0 - i32.const 2097152 - i32.store offset=1049016 - local.get 1 - local.get 5 - i32.const -32 - i32.add - i32.const -8 - i32.and - i32.const -8 - i32.add - local.tee 0 - local.get 0 - local.get 1 - i32.const 16 - i32.add - i32.lt_u - select - local.tee 6 - i32.const 27 - i32.store offset=4 - i32.const 0 - i64.load offset=1048704 align=4 - local.set 9 - local.get 6 - i32.const 16 - i32.add - i32.const 0 - i64.load offset=1048712 align=4 - i64.store align=4 - local.get 6 - local.get 9 - i64.store offset=8 align=4 - i32.const 0 - local.get 8 - i32.store offset=1048708 - i32.const 0 - local.get 7 - i32.store offset=1048704 - i32.const 0 - local.get 6 - i32.const 8 - i32.add - i32.store offset=1048712 - i32.const 0 - i32.const 0 - i32.store offset=1048716 - local.get 6 - i32.const 28 - i32.add - local.set 0 loop ;; label = @14 local.get 0 - i32.const 7 - i32.store + i32.load + local.get 5 + i32.eq + br_if 1 (;@13;) local.get 0 - i32.const 4 - i32.add + i32.load offset=8 local.tee 0 - local.get 5 - i32.lt_u br_if 0 (;@14;) + br 2 (;@12;) end - local.get 6 - local.get 1 - i32.eq - br_if 11 (;@2;) - local.get 6 - local.get 6 - i32.load offset=4 - i32.const -2 - i32.and - i32.store offset=4 - local.get 1 - local.get 6 - local.get 1 - i32.sub - local.tee 0 - i32.const 1 - i32.or - i32.store offset=4 - local.get 6 - local.get 0 - i32.store + end + local.get 0 + i32.load offset=12 + i32.eqz + br_if 1 (;@11;) + end + i32.const 1048704 + local.set 0 + block ;; label = @12 + loop ;; label = @13 block ;; label = @14 local.get 0 - i32.const 256 - i32.lt_u - br_if 0 (;@14;) + i32.load + local.tee 5 local.get 1 + i32.gt_u + br_if 0 (;@14;) + local.get 5 local.get 0 - call 6 - br 12 (;@2;) + i32.load offset=4 + i32.add + local.tee 5 + local.get 1 + i32.gt_u + br_if 2 (;@12;) end local.get 0 - i32.const -8 + i32.load offset=8 + local.set 0 + br 0 (;@13;) + end + end + i32.const 0 + local.get 6 + i32.store offset=1049004 + i32.const 0 + local.get 8 + i32.const -40 + i32.add + local.tee 0 + i32.store offset=1048996 + local.get 6 + local.get 0 + i32.const 1 + i32.or + i32.store offset=4 + local.get 6 + local.get 0 + i32.add + i32.const 40 + i32.store offset=4 + i32.const 0 + i32.const 2097152 + i32.store offset=1049016 + local.get 1 + local.get 5 + i32.const -32 + i32.add + i32.const -8 + i32.and + i32.const -8 + i32.add + local.tee 0 + local.get 0 + local.get 1 + i32.const 16 + i32.add + i32.lt_u + select + local.tee 7 + i32.const 27 + i32.store offset=4 + i32.const 0 + i64.load offset=1048704 align=4 + local.set 9 + local.get 7 + i32.const 16 + i32.add + i32.const 0 + i64.load offset=1048712 align=4 + i64.store align=4 + local.get 7 + local.get 9 + i64.store offset=8 align=4 + i32.const 0 + local.get 8 + i32.store offset=1048708 + i32.const 0 + local.get 6 + i32.store offset=1048704 + i32.const 0 + local.get 7 + i32.const 8 + i32.add + i32.store offset=1048712 + i32.const 0 + i32.const 0 + i32.store offset=1048716 + local.get 7 + i32.const 28 + i32.add + local.set 0 + loop ;; label = @12 + local.get 0 + i32.const 7 + i32.store + local.get 0 + i32.const 4 + i32.add + local.tee 0 + local.get 5 + i32.lt_u + br_if 0 (;@12;) + end + local.get 7 + local.get 1 + i32.eq + br_if 9 (;@2;) + local.get 7 + local.get 7 + i32.load offset=4 + i32.const -2 + i32.and + i32.store offset=4 + local.get 1 + local.get 7 + local.get 1 + i32.sub + local.tee 0 + i32.const 1 + i32.or + i32.store offset=4 + local.get 7 + local.get 0 + i32.store + block ;; label = @12 + local.get 0 + i32.const 256 + i32.lt_u + br_if 0 (;@12;) + local.get 1 + local.get 0 + call 8 + br 10 (;@2;) + end + local.get 0 + i32.const -8 + i32.and + i32.const 1048720 + i32.add + local.set 5 + block ;; label = @12 + block ;; label = @13 + i32.const 0 + i32.load offset=1048984 + local.tee 6 + i32.const 1 + local.get 0 + i32.const 3 + i32.shr_u + i32.shl + local.tee 0 i32.and - i32.const 1048720 - i32.add - local.set 5 - block ;; label = @14 - block ;; label = @15 - i32.const 0 - i32.load offset=1048984 - local.tee 7 - i32.const 1 - local.get 0 - i32.const 3 - i32.shr_u - i32.shl - local.tee 0 - i32.and - i32.eqz - br_if 0 (;@15;) - local.get 5 - i32.load offset=8 - local.set 0 - br 1 (;@14;) - end - i32.const 0 - local.get 7 - local.get 0 - i32.or - i32.store offset=1048984 - local.get 5 - local.set 0 - end - local.get 5 - local.get 1 - i32.store offset=8 + br_if 0 (;@13;) + i32.const 0 + local.get 6 local.get 0 - local.get 1 - i32.store offset=12 - local.get 1 + i32.or + i32.store offset=1048984 local.get 5 - i32.store offset=12 - local.get 1 - local.get 0 - i32.store offset=8 - br 11 (;@2;) + local.set 0 + br 1 (;@12;) end - local.get 0 - local.get 7 - i32.store - local.get 0 - local.get 0 - i32.load offset=4 - local.get 8 - i32.add - i32.store offset=4 - local.get 7 - local.get 2 - i32.const 3 - i32.or - i32.store offset=4 local.get 5 - local.get 7 - local.get 2 - i32.add - local.tee 0 - i32.sub - local.set 2 + i32.load offset=8 + local.set 0 + end + local.get 5 + local.get 1 + i32.store offset=8 + local.get 0 + local.get 1 + i32.store offset=12 + local.get 1 + local.get 5 + i32.store offset=12 + local.get 1 + local.get 0 + i32.store offset=8 + br 9 (;@2;) + end + local.get 0 + local.get 6 + i32.store + local.get 0 + local.get 0 + i32.load offset=4 + local.get 8 + i32.add + i32.store offset=4 + local.get 6 + local.get 2 + i32.const 3 + i32.or + i32.store offset=4 + local.get 5 + local.get 6 + local.get 2 + i32.add + local.tee 0 + i32.sub + local.set 1 + local.get 5 + i32.const 0 + i32.load offset=1049004 + i32.eq + br_if 3 (;@7;) + local.get 5 + i32.const 0 + i32.load offset=1049000 + i32.eq + br_if 4 (;@6;) + block ;; label = @11 + local.get 5 + i32.load offset=4 + local.tee 2 + i32.const 3 + i32.and + i32.const 1 + i32.ne + br_if 0 (;@11;) + block ;; label = @12 block ;; label = @13 - local.get 5 - i32.const 0 - i32.load offset=1049004 - i32.eq + local.get 2 + i32.const -8 + i32.and + local.tee 7 + i32.const 256 + i32.lt_u br_if 0 (;@13;) local.get 5 - i32.const 0 - i32.load offset=1049000 - i32.eq - br_if 5 (;@8;) + call 7 + br 1 (;@12;) + end + block ;; label = @13 local.get 5 - i32.load offset=4 - local.tee 1 - i32.const 3 - i32.and - i32.const 1 - i32.ne - br_if 8 (;@5;) - block ;; label = @14 - block ;; label = @15 - local.get 1 - i32.const -8 - i32.and - local.tee 6 - i32.const 256 - i32.lt_u - br_if 0 (;@15;) - local.get 5 - call 5 - br 1 (;@14;) - end - block ;; label = @15 - local.get 5 - i32.const 12 - i32.add - i32.load - local.tee 8 - local.get 5 - i32.const 8 - i32.add - i32.load - local.tee 4 - i32.eq - br_if 0 (;@15;) - local.get 4 - local.get 8 - i32.store offset=12 - local.get 8 - local.get 4 - i32.store offset=8 - br 1 (;@14;) - end - i32.const 0 - i32.const 0 - i32.load offset=1048984 - i32.const -2 - local.get 1 - i32.const 3 - i32.shr_u - i32.rotl - i32.and - i32.store offset=1048984 - end - local.get 6 - local.get 2 + i32.const 12 i32.add - local.set 2 + i32.load + local.tee 8 local.get 5 - local.get 6 + i32.const 8 i32.add - local.tee 5 - i32.load offset=4 - local.set 1 - br 8 (;@5;) + i32.load + local.tee 4 + i32.eq + br_if 0 (;@13;) + local.get 4 + local.get 8 + i32.store offset=12 + local.get 8 + local.get 4 + i32.store offset=8 + br 1 (;@12;) end i32.const 0 - local.get 0 - i32.store offset=1049004 - i32.const 0 i32.const 0 - i32.load offset=1048996 - local.get 2 - i32.add - local.tee 2 - i32.store offset=1048996 - local.get 0 + i32.load offset=1048984 + i32.const -2 local.get 2 - i32.const 1 - i32.or - i32.store offset=4 - br 8 (;@4;) + i32.const 3 + i32.shr_u + i32.rotl + i32.and + i32.store offset=1048984 end - i32.const 0 - local.get 0 - local.get 2 - i32.sub - local.tee 1 - i32.store offset=1048996 - i32.const 0 - i32.const 0 - i32.load offset=1049004 - local.tee 0 - local.get 2 - i32.add - local.tee 5 - i32.store offset=1049004 - local.get 5 + local.get 7 local.get 1 - i32.const 1 - i32.or - i32.store offset=4 - local.get 0 - local.get 2 - i32.const 3 - i32.or - i32.store offset=4 - local.get 0 - i32.const 8 i32.add local.set 1 - br 10 (;@1;) + local.get 5 + local.get 7 + i32.add + local.tee 5 + i32.load offset=4 + local.set 2 end - i32.const 0 - i32.load offset=1049000 - local.set 1 - local.get 0 - local.get 2 - i32.sub - local.tee 5 - i32.const 16 - i32.lt_u - br_if 3 (;@7;) - i32.const 0 local.get 5 - i32.store offset=1048992 - i32.const 0 - local.get 1 local.get 2 - i32.add - local.tee 7 - i32.store offset=1049000 - local.get 7 - local.get 5 + i32.const -2 + i32.and + i32.store offset=4 + local.get 0 + local.get 1 i32.const 1 i32.or i32.store offset=4 - local.get 1 local.get 0 + local.get 1 i32.add - local.get 5 + local.get 1 i32.store + block ;; label = @11 + local.get 1 + i32.const 256 + i32.lt_u + br_if 0 (;@11;) + local.get 0 + local.get 1 + call 8 + br 8 (;@3;) + end local.get 1 - local.get 2 - i32.const 3 - i32.or - i32.store offset=4 - br 4 (;@6;) + i32.const -8 + i32.and + i32.const 1048720 + i32.add + local.set 5 + block ;; label = @11 + block ;; label = @12 + i32.const 0 + i32.load offset=1048984 + local.tee 2 + i32.const 1 + local.get 1 + i32.const 3 + i32.shr_u + i32.shl + local.tee 1 + i32.and + br_if 0 (;@12;) + i32.const 0 + local.get 2 + local.get 1 + i32.or + i32.store offset=1048984 + local.get 5 + local.set 1 + br 1 (;@11;) + end + local.get 5 + i32.load offset=8 + local.set 1 + end + local.get 5 + local.get 0 + i32.store offset=8 + local.get 1 + local.get 0 + i32.store offset=12 + local.get 0 + local.get 5 + i32.store offset=12 + local.get 0 + local.get 1 + i32.store offset=8 + br 7 (;@3;) end i32.const 0 - local.get 7 - i32.store offset=1049020 - br 6 (;@3;) + local.get 0 + local.get 2 + i32.sub + local.tee 1 + i32.store offset=1048996 + i32.const 0 + i32.const 0 + i32.load offset=1049004 + local.tee 0 + local.get 2 + i32.add + local.tee 5 + i32.store offset=1049004 + local.get 5 + local.get 1 + i32.const 1 + i32.or + i32.store offset=4 + local.get 0 + local.get 2 + i32.const 3 + i32.or + i32.store offset=4 + local.get 0 + i32.const 8 + i32.add + local.set 1 + br 8 (;@1;) end + i32.const 0 + i32.load offset=1049000 + local.set 1 local.get 0 - local.get 6 - local.get 8 - i32.add - i32.store offset=4 + local.get 2 + i32.sub + local.tee 5 + i32.const 16 + i32.lt_u + br_if 3 (;@5;) i32.const 0 - i32.load offset=1049004 + local.get 5 + i32.store offset=1048992 i32.const 0 - i32.load offset=1048996 - local.get 8 + local.get 1 + local.get 2 i32.add - call 7 - br 6 (;@2;) + local.tee 6 + i32.store offset=1049000 + local.get 6 + local.get 5 + i32.const 1 + i32.or + i32.store offset=4 + local.get 1 + local.get 0 + i32.add + local.get 5 + i32.store + local.get 1 + local.get 2 + i32.const 3 + i32.or + i32.store offset=4 + br 4 (;@4;) end - i32.const 0 local.get 0 - i32.store offset=1049000 - i32.const 0 - i32.const 0 - i32.load offset=1048992 - local.get 2 + local.get 7 + local.get 8 i32.add - local.tee 2 - i32.store offset=1048992 - local.get 0 - local.get 2 - i32.const 1 - i32.or i32.store offset=4 - local.get 0 - local.get 2 + i32.const 0 + i32.load offset=1049004 + i32.const 0 + i32.load offset=1048996 + local.get 8 i32.add - local.get 2 - i32.store - br 3 (;@4;) + call 9 + br 5 (;@2;) end i32.const 0 - i32.const 0 - i32.store offset=1049000 + local.get 0 + i32.store offset=1049004 i32.const 0 i32.const 0 - i32.store offset=1048992 + i32.load offset=1048996 local.get 1 - local.get 0 - i32.const 3 - i32.or - i32.store offset=4 - local.get 1 - local.get 0 i32.add - local.tee 0 + local.tee 1 + i32.store offset=1048996 local.get 0 - i32.load offset=4 + local.get 1 i32.const 1 i32.or i32.store offset=4 + br 3 (;@3;) end + i32.const 0 + local.get 0 + i32.store offset=1049000 + i32.const 0 + i32.const 0 + i32.load offset=1048992 local.get 1 - i32.const 8 i32.add - return - end - local.get 5 - local.get 1 - i32.const -2 - i32.and - i32.store offset=4 - local.get 0 - local.get 2 - i32.const 1 - i32.or - i32.store offset=4 - local.get 0 - local.get 2 - i32.add - local.get 2 - i32.store - block ;; label = @5 - local.get 2 - i32.const 256 - i32.lt_u - br_if 0 (;@5;) + local.tee 1 + i32.store offset=1048992 local.get 0 - local.get 2 - call 6 - br 1 (;@4;) - end - local.get 2 - i32.const -8 - i32.and - i32.const 1048720 - i32.add - local.set 1 - block ;; label = @5 - block ;; label = @6 - i32.const 0 - i32.load offset=1048984 - local.tee 5 - i32.const 1 - local.get 2 - i32.const 3 - i32.shr_u - i32.shl - local.tee 2 - i32.and - i32.eqz - br_if 0 (;@6;) - local.get 1 - i32.load offset=8 - local.set 2 - br 1 (;@5;) - end - i32.const 0 - local.get 5 - local.get 2 + local.get 1 + i32.const 1 i32.or - i32.store offset=1048984 + i32.store offset=4 + local.get 0 local.get 1 - local.set 2 + i32.add + local.get 1 + i32.store + br 2 (;@3;) end + i32.const 0 + i32.const 0 + i32.store offset=1049000 + i32.const 0 + i32.const 0 + i32.store offset=1048992 local.get 1 local.get 0 - i32.store offset=8 - local.get 2 - local.get 0 - i32.store offset=12 - local.get 0 + i32.const 3 + i32.or + i32.store offset=4 local.get 1 - i32.store offset=12 local.get 0 - local.get 2 - i32.store offset=8 + i32.add + local.tee 0 + local.get 0 + i32.load offset=4 + i32.const 1 + i32.or + i32.store offset=4 end - local.get 7 + local.get 1 i32.const 8 i32.add return - end - i32.const 0 - i32.const 4095 - i32.store offset=1049024 - i32.const 0 - local.get 8 - i32.store offset=1048708 - i32.const 0 - local.get 7 - i32.store offset=1048704 - i32.const 0 - i32.const 1048720 - i32.store offset=1048732 - i32.const 0 - i32.const 1048728 - i32.store offset=1048740 - i32.const 0 - i32.const 1048720 - i32.store offset=1048728 - i32.const 0 - i32.const 1048736 - i32.store offset=1048748 - i32.const 0 - i32.const 1048728 - i32.store offset=1048736 - i32.const 0 - i32.const 1048744 - i32.store offset=1048756 - i32.const 0 - i32.const 1048736 - i32.store offset=1048744 - i32.const 0 - i32.const 1048752 - i32.store offset=1048764 - i32.const 0 - i32.const 1048744 - i32.store offset=1048752 - i32.const 0 - i32.const 1048760 - i32.store offset=1048772 - i32.const 0 - i32.const 1048752 - i32.store offset=1048760 - i32.const 0 - i32.const 1048768 - i32.store offset=1048780 - i32.const 0 - i32.const 1048760 - i32.store offset=1048768 - i32.const 0 - i32.const 1048776 - i32.store offset=1048788 - i32.const 0 - i32.const 1048768 - i32.store offset=1048776 - i32.const 0 - i32.const 0 - i32.store offset=1048716 - i32.const 0 - i32.const 1048784 - i32.store offset=1048796 - i32.const 0 - i32.const 1048776 - i32.store offset=1048784 - i32.const 0 - i32.const 1048784 - i32.store offset=1048792 - i32.const 0 - i32.const 1048792 - i32.store offset=1048804 - i32.const 0 - i32.const 1048792 - i32.store offset=1048800 - i32.const 0 - i32.const 1048800 - i32.store offset=1048812 - i32.const 0 - i32.const 1048800 - i32.store offset=1048808 - i32.const 0 - i32.const 1048808 - i32.store offset=1048820 - i32.const 0 - i32.const 1048808 - i32.store offset=1048816 - i32.const 0 - i32.const 1048816 - i32.store offset=1048828 - i32.const 0 - i32.const 1048816 - i32.store offset=1048824 - i32.const 0 - i32.const 1048824 - i32.store offset=1048836 - i32.const 0 - i32.const 1048824 - i32.store offset=1048832 - i32.const 0 - i32.const 1048832 - i32.store offset=1048844 - i32.const 0 - i32.const 1048832 - i32.store offset=1048840 - i32.const 0 - i32.const 1048840 - i32.store offset=1048852 - i32.const 0 - i32.const 1048840 - i32.store offset=1048848 - i32.const 0 - i32.const 1048848 - i32.store offset=1048860 - i32.const 0 - i32.const 1048856 - i32.store offset=1048868 - i32.const 0 - i32.const 1048848 - i32.store offset=1048856 - i32.const 0 - i32.const 1048864 - i32.store offset=1048876 - i32.const 0 - i32.const 1048856 - i32.store offset=1048864 - i32.const 0 - i32.const 1048872 - i32.store offset=1048884 - i32.const 0 - i32.const 1048864 - i32.store offset=1048872 - i32.const 0 - i32.const 1048880 - i32.store offset=1048892 - i32.const 0 - i32.const 1048872 - i32.store offset=1048880 - i32.const 0 - i32.const 1048888 - i32.store offset=1048900 - i32.const 0 - i32.const 1048880 - i32.store offset=1048888 - i32.const 0 - i32.const 1048896 - i32.store offset=1048908 - i32.const 0 - i32.const 1048888 - i32.store offset=1048896 - i32.const 0 - i32.const 1048904 - i32.store offset=1048916 - i32.const 0 - i32.const 1048896 - i32.store offset=1048904 - i32.const 0 - i32.const 1048912 - i32.store offset=1048924 - i32.const 0 - i32.const 1048904 - i32.store offset=1048912 - i32.const 0 - i32.const 1048920 - i32.store offset=1048932 - i32.const 0 - i32.const 1048912 - i32.store offset=1048920 - i32.const 0 - i32.const 1048928 - i32.store offset=1048940 - i32.const 0 - i32.const 1048920 - i32.store offset=1048928 - i32.const 0 - i32.const 1048936 - i32.store offset=1048948 - i32.const 0 - i32.const 1048928 - i32.store offset=1048936 - i32.const 0 - i32.const 1048944 - i32.store offset=1048956 - i32.const 0 - i32.const 1048936 - i32.store offset=1048944 - i32.const 0 - i32.const 1048952 - i32.store offset=1048964 - i32.const 0 - i32.const 1048944 - i32.store offset=1048952 - i32.const 0 - i32.const 1048960 - i32.store offset=1048972 - i32.const 0 - i32.const 1048952 - i32.store offset=1048960 - i32.const 0 - i32.const 1048968 - i32.store offset=1048980 - i32.const 0 - i32.const 1048960 - i32.store offset=1048968 - i32.const 0 - local.get 7 - i32.store offset=1049004 - i32.const 0 - i32.const 1048968 - i32.store offset=1048976 - i32.const 0 - local.get 8 - i32.const -40 - i32.add - local.tee 0 - i32.store offset=1048996 - local.get 7 - local.get 0 - i32.const 1 - i32.or - i32.store offset=4 - local.get 7 - local.get 0 + end + local.get 6 + i32.const 8 i32.add - i32.const 40 - i32.store offset=4 - i32.const 0 - i32.const 2097152 - i32.store offset=1049016 + return end i32.const 0 local.set 1 @@ -1960,7 +1956,7 @@ end local.get 1 ) - (func (;4;) (type 3) (param i32 i32) + (func (;6;) (type 3) (param i32 i32) (local i32 i32 i32 i32) local.get 0 local.get 1 @@ -2027,7 +2023,7 @@ i32.lt_u br_if 0 (;@4;) local.get 0 - call 5 + call 7 br 1 (;@3;) end block ;; label = @4 @@ -2062,92 +2058,99 @@ i32.and i32.store offset=1048984 end - block ;; label = @3 - local.get 2 - i32.load offset=4 - local.tee 3 - i32.const 2 - i32.and - i32.eqz - br_if 0 (;@3;) - local.get 2 - local.get 3 - i32.const -2 - i32.and - i32.store offset=4 - local.get 0 - local.get 1 - i32.const 1 - i32.or - i32.store offset=4 - local.get 0 - local.get 1 - i32.add - local.get 1 - i32.store - br 2 (;@1;) - end block ;; label = @3 block ;; label = @4 - local.get 2 - i32.const 0 - i32.load offset=1049004 - i32.eq - br_if 0 (;@4;) - local.get 2 - i32.const 0 - i32.load offset=1049000 - i32.eq - br_if 1 (;@3;) - local.get 3 - i32.const -8 - i32.and - local.tee 4 - local.get 1 - i32.add - local.set 1 block ;; label = @5 - block ;; label = @6 - local.get 4 - i32.const 256 - i32.lt_u - br_if 0 (;@6;) - local.get 2 - call 5 - br 1 (;@5;) - end - block ;; label = @6 - local.get 2 - i32.const 12 - i32.add - i32.load - local.tee 4 - local.get 2 - i32.const 8 - i32.add - i32.load - local.tee 2 - i32.eq - br_if 0 (;@6;) - local.get 2 - local.get 4 - i32.store offset=12 - local.get 4 - local.get 2 - i32.store offset=8 - br 1 (;@5;) - end + local.get 2 + i32.load offset=4 + local.tee 3 + i32.const 2 + i32.and + br_if 0 (;@5;) + local.get 2 i32.const 0 + i32.load offset=1049004 + i32.eq + br_if 2 (;@3;) + local.get 2 i32.const 0 - i32.load offset=1048984 - i32.const -2 + i32.load offset=1049000 + i32.eq + br_if 4 (;@1;) local.get 3 - i32.const 3 - i32.shr_u - i32.rotl + i32.const -8 i32.and - i32.store offset=1048984 + local.tee 4 + local.get 1 + i32.add + local.set 1 + block ;; label = @6 + block ;; label = @7 + local.get 4 + i32.const 256 + i32.lt_u + br_if 0 (;@7;) + local.get 2 + call 7 + br 1 (;@6;) + end + block ;; label = @7 + local.get 2 + i32.const 12 + i32.add + i32.load + local.tee 4 + local.get 2 + i32.const 8 + i32.add + i32.load + local.tee 2 + i32.eq + br_if 0 (;@7;) + local.get 2 + local.get 4 + i32.store offset=12 + local.get 4 + local.get 2 + i32.store offset=8 + br 1 (;@6;) + end + i32.const 0 + i32.const 0 + i32.load offset=1048984 + i32.const -2 + local.get 3 + i32.const 3 + i32.shr_u + i32.rotl + i32.and + i32.store offset=1048984 + end + local.get 0 + local.get 1 + i32.const 1 + i32.or + i32.store offset=4 + local.get 0 + local.get 1 + i32.add + local.get 1 + i32.store + local.get 0 + i32.const 0 + i32.load offset=1049000 + i32.ne + br_if 1 (;@4;) + i32.const 0 + local.get 1 + i32.store offset=1048992 + return end + local.get 2 + local.get 3 + i32.const -2 + i32.and + i32.store offset=4 local.get 0 local.get 1 i32.const 1 @@ -2158,125 +2161,114 @@ i32.add local.get 1 i32.store + end + block ;; label = @4 + local.get 1 + i32.const 256 + i32.lt_u + br_if 0 (;@4;) local.get 0 - i32.const 0 - i32.load offset=1049000 - i32.ne - br_if 3 (;@1;) - i32.const 0 local.get 1 - i32.store offset=1048992 - br 2 (;@2;) + call 8 + return end - i32.const 0 - local.get 0 - i32.store offset=1049004 - i32.const 0 - i32.const 0 - i32.load offset=1048996 local.get 1 + i32.const -8 + i32.and + i32.const 1048720 i32.add - local.tee 1 - i32.store offset=1048996 + local.set 2 + block ;; label = @4 + block ;; label = @5 + i32.const 0 + i32.load offset=1048984 + local.tee 3 + i32.const 1 + local.get 1 + i32.const 3 + i32.shr_u + i32.shl + local.tee 1 + i32.and + br_if 0 (;@5;) + i32.const 0 + local.get 3 + local.get 1 + i32.or + i32.store offset=1048984 + local.get 2 + local.set 1 + br 1 (;@4;) + end + local.get 2 + i32.load offset=8 + local.set 1 + end + local.get 2 local.get 0 + i32.store offset=8 local.get 1 - i32.const 1 - i32.or - i32.store offset=4 local.get 0 - i32.const 0 - i32.load offset=1049000 - i32.ne - br_if 1 (;@2;) - i32.const 0 - i32.const 0 - i32.store offset=1048992 - i32.const 0 - i32.const 0 - i32.store offset=1049000 + i32.store offset=12 + local.get 0 + local.get 2 + i32.store offset=12 + local.get 0 + local.get 1 + i32.store offset=8 return end i32.const 0 local.get 0 - i32.store offset=1049000 + i32.store offset=1049004 i32.const 0 i32.const 0 - i32.load offset=1048992 + i32.load offset=1048996 local.get 1 i32.add local.tee 1 - i32.store offset=1048992 - local.get 0 - local.get 1 - i32.const 1 - i32.or - i32.store offset=4 + i32.store offset=1048996 local.get 0 local.get 1 - i32.add - local.get 1 - i32.store - return - end - return - end - block ;; label = @1 - local.get 1 - i32.const 256 - i32.lt_u - br_if 0 (;@1;) - local.get 0 - local.get 1 - call 6 - return - end - local.get 1 - i32.const -8 - i32.and - i32.const 1048720 - i32.add - local.set 2 - block ;; label = @1 - block ;; label = @2 - i32.const 0 - i32.load offset=1048984 - local.tee 3 i32.const 1 - local.get 1 - i32.const 3 - i32.shr_u - i32.shl - local.tee 1 - i32.and - i32.eqz + i32.or + i32.store offset=4 + local.get 0 + i32.const 0 + i32.load offset=1049000 + i32.ne br_if 0 (;@2;) - local.get 2 - i32.load offset=8 - local.set 1 - br 1 (;@1;) + i32.const 0 + i32.const 0 + i32.store offset=1048992 + i32.const 0 + i32.const 0 + i32.store offset=1049000 end - i32.const 0 - local.get 3 - local.get 1 - i32.or - i32.store offset=1048984 - local.get 2 - local.set 1 + return end - local.get 2 + i32.const 0 local.get 0 - i32.store offset=8 + i32.store offset=1049000 + i32.const 0 + i32.const 0 + i32.load offset=1048992 local.get 1 + i32.add + local.tee 1 + i32.store offset=1048992 local.get 0 - i32.store offset=12 - local.get 0 - local.get 2 - i32.store offset=12 + local.get 1 + i32.const 1 + i32.or + i32.store offset=4 local.get 0 local.get 1 - i32.store offset=8 + i32.add + local.get 1 + i32.store ) - (func (;5;) (type 4) (param i32) + (func (;7;) (type 4) (param i32) (local i32 i32 i32 i32 i32) local.get 0 i32.load offset=24 @@ -2437,7 +2429,7 @@ return end ) - (func (;6;) (type 3) (param i32 i32) + (func (;8;) (type 3) (param i32 i32) (local i32 i32 i32 i32) i32.const 31 local.set 2 @@ -2478,45 +2470,44 @@ i32.add local.set 3 block ;; label = @1 + block ;; label = @2 + i32.const 0 + i32.load offset=1048988 + local.tee 4 + i32.const 1 + local.get 2 + i32.shl + local.tee 5 + i32.and + br_if 0 (;@2;) + i32.const 0 + local.get 4 + local.get 5 + i32.or + i32.store offset=1048988 + local.get 3 + local.get 0 + i32.store + local.get 0 + local.get 3 + i32.store offset=24 + br 1 (;@1;) + end block ;; label = @2 block ;; label = @3 block ;; label = @4 - block ;; label = @5 - i32.const 0 - i32.load offset=1048988 - local.tee 4 - i32.const 1 - local.get 2 - i32.shl - local.tee 5 - i32.and - i32.eqz - br_if 0 (;@5;) - local.get 3 - i32.load - local.tee 4 - i32.load offset=4 - i32.const -8 - i32.and - local.get 1 - i32.ne - br_if 1 (;@4;) - local.get 4 - local.set 2 - br 2 (;@3;) - end - i32.const 0 - local.get 4 - local.get 5 - i32.or - i32.store offset=1048988 - local.get 3 - local.get 0 - i32.store - local.get 0 local.get 3 - i32.store offset=24 - br 3 (;@1;) + i32.load + local.tee 4 + i32.load offset=4 + i32.const -8 + i32.and + local.get 1 + i32.ne + br_if 0 (;@4;) + local.get 4 + local.set 2 + br 1 (;@3;) end local.get 1 i32.const 0 @@ -2596,7 +2587,7 @@ local.get 0 i32.store offset=8 ) - (func (;7;) (type 3) (param i32 i32) + (func (;9;) (type 3) (param i32 i32) (local i32 i32) i32.const 0 local.get 0 @@ -2607,6 +2598,7 @@ local.tee 2 i32.const -8 i32.add + local.tee 3 i32.store offset=1049004 i32.const 0 local.get 0 @@ -2616,15 +2608,13 @@ i32.add i32.const 8 i32.add - local.tee 3 + local.tee 2 i32.store offset=1048996 - local.get 2 - i32.const -4 - i32.add local.get 3 + local.get 2 i32.const 1 i32.or - i32.store + i32.store offset=4 local.get 0 local.get 1 i32.add @@ -2634,7 +2624,7 @@ i32.const 2097152 i32.store offset=1049016 ) - (func (;8;) (type 4) (param i32) + (func (;10;) (type 4) (param i32) (local i32 i32 i32 i32 i32) local.get 0 i32.const -8 @@ -2652,139 +2642,118 @@ local.set 3 block ;; label = @1 block ;; label = @2 - local.get 2 - i32.const 1 - i32.and - br_if 0 (;@2;) - local.get 2 - i32.const 3 - i32.and - i32.eqz - br_if 1 (;@1;) - local.get 1 - i32.load - local.tee 2 - local.get 0 - i32.add - local.set 0 - block ;; label = @3 - local.get 1 - local.get 2 - i32.sub - local.tee 1 - i32.const 0 - i32.load offset=1049000 - i32.ne - br_if 0 (;@3;) - local.get 3 - i32.load offset=4 - i32.const 3 - i32.and - i32.const 3 - i32.ne - br_if 1 (;@2;) - i32.const 0 - local.get 0 - i32.store offset=1048992 - local.get 3 - local.get 3 - i32.load offset=4 - i32.const -2 - i32.and - i32.store offset=4 - local.get 1 - local.get 0 - i32.const 1 - i32.or - i32.store offset=4 - local.get 1 - local.get 0 - i32.add - local.get 0 - i32.store - return - end - block ;; label = @3 - local.get 2 - i32.const 256 - i32.lt_u - br_if 0 (;@3;) - local.get 1 - call 5 - br 1 (;@2;) - end - block ;; label = @3 - local.get 1 - i32.const 12 - i32.add - i32.load - local.tee 4 - local.get 1 - i32.const 8 - i32.add - i32.load - local.tee 5 - i32.eq - br_if 0 (;@3;) - local.get 5 - local.get 4 - i32.store offset=12 - local.get 4 - local.get 5 - i32.store offset=8 - br 1 (;@2;) - end - i32.const 0 - i32.const 0 - i32.load offset=1048984 - i32.const -2 - local.get 2 - i32.const 3 - i32.shr_u - i32.rotl - i32.and - i32.store offset=1048984 - end - block ;; label = @2 - block ;; label = @3 - local.get 3 - i32.load offset=4 - local.tee 2 - i32.const 2 - i32.and - i32.eqz - br_if 0 (;@3;) - local.get 3 - local.get 2 - i32.const -2 - i32.and - i32.store offset=4 - local.get 1 - local.get 0 - i32.const 1 - i32.or - i32.store offset=4 - local.get 1 - local.get 0 - i32.add - local.get 0 - i32.store - br 1 (;@2;) - end block ;; label = @3 + block ;; label = @4 + local.get 2 + i32.const 1 + i32.and + br_if 0 (;@4;) + local.get 2 + i32.const 3 + i32.and + i32.eqz + br_if 1 (;@3;) + local.get 1 + i32.load + local.tee 2 + local.get 0 + i32.add + local.set 0 + block ;; label = @5 + local.get 1 + local.get 2 + i32.sub + local.tee 1 + i32.const 0 + i32.load offset=1049000 + i32.ne + br_if 0 (;@5;) + local.get 3 + i32.load offset=4 + i32.const 3 + i32.and + i32.const 3 + i32.ne + br_if 1 (;@4;) + i32.const 0 + local.get 0 + i32.store offset=1048992 + local.get 3 + local.get 3 + i32.load offset=4 + i32.const -2 + i32.and + i32.store offset=4 + local.get 1 + local.get 0 + i32.const 1 + i32.or + i32.store offset=4 + local.get 3 + local.get 0 + i32.store + return + end + block ;; label = @5 + local.get 2 + i32.const 256 + i32.lt_u + br_if 0 (;@5;) + local.get 1 + call 7 + br 1 (;@4;) + end + block ;; label = @5 + local.get 1 + i32.const 12 + i32.add + i32.load + local.tee 4 + local.get 1 + i32.const 8 + i32.add + i32.load + local.tee 5 + i32.eq + br_if 0 (;@5;) + local.get 5 + local.get 4 + i32.store offset=12 + local.get 4 + local.get 5 + i32.store offset=8 + br 1 (;@4;) + end + i32.const 0 + i32.const 0 + i32.load offset=1048984 + i32.const -2 + local.get 2 + i32.const 3 + i32.shr_u + i32.rotl + i32.and + i32.store offset=1048984 + end block ;; label = @4 block ;; label = @5 block ;; label = @6 + local.get 3 + i32.load offset=4 + local.tee 2 + i32.const 2 + i32.and + br_if 0 (;@6;) local.get 3 i32.const 0 i32.load offset=1049004 i32.eq - br_if 0 (;@6;) + br_if 2 (;@4;) local.get 3 i32.const 0 i32.load offset=1049000 i32.eq - br_if 1 (;@5;) + br_if 5 (;@1;) local.get 2 i32.const -8 i32.and @@ -2799,7 +2768,7 @@ i32.lt_u br_if 0 (;@8;) local.get 3 - call 5 + call 7 br 1 (;@7;) end block ;; label = @8 @@ -2848,133 +2817,126 @@ i32.const 0 i32.load offset=1049000 i32.ne - br_if 4 (;@2;) + br_if 1 (;@5;) i32.const 0 local.get 0 i32.store offset=1048992 return end - i32.const 0 - local.get 1 - i32.store offset=1049004 - i32.const 0 - i32.const 0 - i32.load offset=1048996 - local.get 0 - i32.add - local.tee 0 - i32.store offset=1048996 + local.get 3 + local.get 2 + i32.const -2 + i32.and + i32.store offset=4 local.get 1 local.get 0 i32.const 1 i32.or i32.store offset=4 local.get 1 - i32.const 0 - i32.load offset=1049000 - i32.eq - br_if 1 (;@4;) - br 2 (;@3;) + local.get 0 + i32.add + local.get 0 + i32.store end - i32.const 0 + local.get 0 + i32.const 256 + i32.lt_u + br_if 2 (;@2;) local.get 1 - i32.store offset=1049000 + local.get 0 + call 8 i32.const 0 i32.const 0 - i32.load offset=1048992 - local.get 0 + i32.load offset=1049024 + i32.const -1 i32.add - local.tee 0 - i32.store offset=1048992 - local.get 1 - local.get 0 - i32.const 1 - i32.or - i32.store offset=4 + local.tee 1 + i32.store offset=1049024 local.get 1 - local.get 0 - i32.add - local.get 0 - i32.store + br_if 1 (;@3;) + call 11 return end i32.const 0 + local.get 1 + i32.store offset=1049004 i32.const 0 - i32.store offset=1048992 i32.const 0 + i32.load offset=1048996 + local.get 0 + i32.add + local.tee 0 + i32.store offset=1048996 + local.get 1 + local.get 0 + i32.const 1 + i32.or + i32.store offset=4 + block ;; label = @4 + local.get 1 + i32.const 0 + i32.load offset=1049000 + i32.ne + br_if 0 (;@4;) + i32.const 0 + i32.const 0 + i32.store offset=1048992 + i32.const 0 + i32.const 0 + i32.store offset=1049000 + end + local.get 0 i32.const 0 - i32.store offset=1049000 - end - local.get 0 - i32.const 0 - i32.load offset=1049016 - i32.le_u - br_if 1 (;@1;) - i32.const 0 - i32.load offset=1049004 - local.tee 0 - i32.eqz - br_if 1 (;@1;) - block ;; label = @3 + i32.load offset=1049016 + i32.le_u + br_if 0 (;@3;) i32.const 0 - i32.load offset=1048996 - i32.const 41 - i32.lt_u + i32.load offset=1049004 + local.tee 0 + i32.eqz br_if 0 (;@3;) - i32.const 1048704 - local.set 1 - loop ;; label = @4 - block ;; label = @5 + block ;; label = @4 + i32.const 0 + i32.load offset=1048996 + i32.const 41 + i32.lt_u + br_if 0 (;@4;) + i32.const 1048704 + local.set 1 + loop ;; label = @5 + block ;; label = @6 + local.get 1 + i32.load + local.tee 3 + local.get 0 + i32.gt_u + br_if 0 (;@6;) + local.get 3 + local.get 1 + i32.load offset=4 + i32.add + local.get 0 + i32.gt_u + br_if 2 (;@4;) + end local.get 1 - i32.load - local.tee 3 - local.get 0 - i32.gt_u + i32.load offset=8 + local.tee 1 br_if 0 (;@5;) - local.get 3 - local.get 1 - i32.load offset=4 - i32.add - local.get 0 - i32.gt_u - br_if 2 (;@3;) end - local.get 1 - i32.load offset=8 - local.tee 1 - br_if 0 (;@4;) end + call 11 + i32.const 0 + i32.load offset=1048996 + i32.const 0 + i32.load offset=1049016 + i32.le_u + br_if 0 (;@3;) + i32.const 0 + i32.const -1 + i32.store offset=1049016 end - call 9 - i32.const 0 - i32.load offset=1048996 - i32.const 0 - i32.load offset=1049016 - i32.le_u - br_if 1 (;@1;) - i32.const 0 - i32.const -1 - i32.store offset=1049016 - return - end - block ;; label = @2 - local.get 0 - i32.const 256 - i32.lt_u - br_if 0 (;@2;) - local.get 1 - local.get 0 - call 6 - i32.const 0 - i32.const 0 - i32.load offset=1049024 - i32.const -1 - i32.add - local.tee 1 - i32.store offset=1049024 - local.get 1 - br_if 1 (;@1;) - call 9 return end local.get 0 @@ -2995,19 +2957,18 @@ i32.shl local.tee 0 i32.and - i32.eqz br_if 0 (;@3;) + i32.const 0 + local.get 2 + local.get 0 + i32.or + i32.store offset=1048984 local.get 3 - i32.load offset=8 local.set 0 br 1 (;@2;) end - i32.const 0 - local.get 2 - local.get 0 - i32.or - i32.store offset=1048984 local.get 3 + i32.load offset=8 local.set 0 end local.get 3 @@ -3022,9 +2983,30 @@ local.get 1 local.get 0 i32.store offset=8 + return end + i32.const 0 + local.get 1 + i32.store offset=1049000 + i32.const 0 + i32.const 0 + i32.load offset=1048992 + local.get 0 + i32.add + local.tee 0 + i32.store offset=1048992 + local.get 1 + local.get 0 + i32.const 1 + i32.or + i32.store offset=4 + local.get 1 + local.get 0 + i32.add + local.get 0 + i32.store ) - (func (;9;) (type 0) + (func (;11;) (type 0) (local i32 i32) i32.const 0 local.set 0 @@ -3056,7 +3038,7 @@ select i32.store offset=1049024 ) - (func (;10;) (type 5) (param i32 i32 i32 i32) (result i32) + (func (;12;) (type 5) (param i32 i32 i32 i32) (result i32) (local i32 i32 i32 i32 i32) block ;; label = @1 block ;; label = @2 @@ -3080,7 +3062,7 @@ br_if 0 (;@8;) local.get 2 local.get 3 - call 2 + call 4 local.tee 2 i32.eqz br_if 5 (;@3;) @@ -3092,10 +3074,10 @@ local.get 3 i32.lt_u select - call 12 + call 14 local.set 3 local.get 0 - call 8 + call 10 local.get 3 return end @@ -3138,15 +3120,15 @@ local.get 0 i32.const -8 i32.add - local.set 6 + local.tee 6 + local.get 2 + i32.add + local.set 7 local.get 2 local.get 1 i32.ge_u br_if 1 (;@13;) - local.get 6 - local.get 2 - i32.add - local.tee 7 + local.get 7 i32.const 0 i32.load offset=1049004 i32.eq @@ -3181,7 +3163,7 @@ i32.lt_u br_if 2 (;@12;) local.get 7 - call 5 + call 7 br 3 (;@11;) end local.get 1 @@ -3226,18 +3208,15 @@ i32.const 3 i32.or i32.store offset=4 - local.get 1 - local.get 3 - i32.add - local.tee 2 - local.get 2 + local.get 7 + local.get 7 i32.load offset=4 i32.const 1 i32.or i32.store offset=4 local.get 1 local.get 3 - call 4 + call 6 local.get 0 return end @@ -3295,8 +3274,8 @@ i32.const 3 i32.or i32.store offset=4 - local.get 1 - local.get 3 + local.get 6 + local.get 2 i32.add local.tee 2 local.get 2 @@ -3306,7 +3285,7 @@ i32.store offset=4 local.get 1 local.get 3 - call 4 + call 6 local.get 0 return end @@ -3368,8 +3347,8 @@ i32.const 1 i32.or i32.store offset=4 - local.get 1 - local.get 3 + local.get 6 + local.get 2 i32.add local.tee 2 local.get 3 @@ -3400,7 +3379,7 @@ br_if 7 (;@1;) end local.get 3 - call 3 + call 5 local.tee 1 i32.eqz br_if 4 (;@3;) @@ -3424,10 +3403,10 @@ local.get 3 i32.lt_u select - call 12 + call 14 local.set 3 local.get 0 - call 8 + call 10 local.get 3 return end @@ -3440,7 +3419,7 @@ br_if 1 (;@5;) local.get 2 local.get 3 - call 2 + call 4 local.set 0 br 2 (;@4;) end @@ -3466,7 +3445,7 @@ br 3 (;@2;) end local.get 3 - call 3 + call 5 local.set 0 end local.get 0 @@ -3506,13 +3485,13 @@ i32.store offset=1049004 local.get 0 ) - (func (;11;) (type 6) (param i32 i32 i32) (result i32) + (func (;13;) (type 6) (param i32 i32 i32) (result i32) (local i32 i32 i32 i32 i32 i32 i32 i32) block ;; label = @1 block ;; label = @2 local.get 2 - i32.const 15 - i32.gt_u + i32.const 16 + i32.ge_u br_if 0 (;@2;) local.get 0 local.set 3 @@ -3687,34 +3666,38 @@ end local.get 0 ) - (func (;12;) (type 6) (param i32 i32 i32) (result i32) + (func (;14;) (type 6) (param i32 i32 i32) (result i32) local.get 0 local.get 1 local.get 2 - call 11 + call 13 ) (memory (;0;) 17) (global (;0;) (mut i32) i32.const 1048576) (global (;1;) i32 i32.const 1049030) (global (;2;) i32 i32.const 1049040) (export "memory" (memory 0)) - (export "add-two" (func 1)) - (export "cabi_realloc" (func 10)) + (export "add-one" (func 1)) + (export "add-two" (func 3)) + (export "cabi_realloc" (func 12)) (export "__data_end" (global 1)) (export "__heap_base" (global 2)) (@producers - (processed-by "wit-component" "0.14.2") - (processed-by "wit-bindgen-rust" "0.12.0") + (processed-by "wit-component" "0.17.0") + (processed-by "wit-bindgen-rust" "0.13.2") ) ) (core instance (;0;) (instantiate 0)) (alias core export 0 "memory" (core memory (;0;))) (alias core export 0 "cabi_realloc" (core func (;0;))) (type (;0;) (func (param "input" s32) (result s32))) - (alias core export 0 "add-two" (core func (;1;))) + (alias core export 0 "add-one" (core func (;1;))) (func (;0;) (type 0) (canon lift (core func 1))) - (export (;1;) "add-two" (func 0)) + (export (;1;) "add-one" (func 0)) + (alias core export 0 "add-two" (core func (;2;))) + (func (;2;) (type 0) (canon lift (core func 2))) + (export (;3;) "add-two" (func 2)) (@producers - (processed-by "wit-component" "0.14.0") + (processed-by "wit-component" "0.18.1") ) ) diff --git a/homestar-wasm/fixtures/example_test.wasm b/homestar-wasm/fixtures/example_test.wasm index e4368bb7..15306f11 100755 Binary files a/homestar-wasm/fixtures/example_test.wasm and b/homestar-wasm/fixtures/example_test.wasm differ diff --git a/homestar-wasm/fixtures/example_test_component.wasm b/homestar-wasm/fixtures/example_test_component.wasm index 033cc89a..97168009 100644 Binary files a/homestar-wasm/fixtures/example_test_component.wasm and b/homestar-wasm/fixtures/example_test_component.wasm differ diff --git a/homestar-wasm/src/wasmtime/error.rs b/homestar-wasm/src/wasmtime/error.rs index 7e327579..a11147d8 100644 --- a/homestar-wasm/src/wasmtime/error.rs +++ b/homestar-wasm/src/wasmtime/error.rs @@ -40,7 +40,7 @@ pub enum Error { #[error(transparent)] WasmRuntime(#[from] anyhow::Error), /// Failure to find Wasm function for execution. - #[error("Wasm function {0} not found")] + #[error("Wasm function {0} not found in given Wasm component/resource")] WasmFunctionNotFound(String), /// [Wat] as Wasm component error. /// diff --git a/homestar-wasm/src/wasmtime/ipld.rs b/homestar-wasm/src/wasmtime/ipld.rs index b0b1d237..3f6273c8 100644 --- a/homestar-wasm/src/wasmtime/ipld.rs +++ b/homestar-wasm/src/wasmtime/ipld.rs @@ -12,11 +12,7 @@ use crate::error::{InterpreterError, TagsError}; use atomic_refcell::{AtomicRef, AtomicRefCell, AtomicRefMut}; use itertools::{FoldWhile::Done, Itertools}; use libipld::{ - cid::{ - self, - multibase::{self, Base}, - Cid, - }, + cid::{self, multibase::Base, Cid}, Ipld, }; use rust_decimal::{ @@ -345,9 +341,6 @@ impl TryFrom for Ipld { type Error = InterpreterError; fn try_from(val: RuntimeVal) -> Result { - fn base_64_bytes(s: &str) -> Result, multibase::Error> { - Base::Base64.decode(s) - } fn cid(s: &str) -> Result { Cid::try_from(s) } @@ -360,8 +353,6 @@ impl TryFrom for Ipld { s => { if let Ok(cid) = cid(&s) { Ipld::Link(cid) - } else if let Ok(decoded) = base_64_bytes(&s) { - Ipld::Bytes(decoded) } else { Ipld::String(s) } @@ -704,11 +695,22 @@ mod test { fn try_bytes_roundtrip() { let bytes = b"hell0".to_vec(); let ipld = Ipld::Bytes(bytes.clone()); - let encoded_cid = Base::Base64.encode(bytes); - let runtime = RuntimeVal::new(Val::String(Box::from(encoded_cid))); + + let ty = test_utils::component::setup_component("(list u8)".to_string(), 8); + let val_list = ty + .unwrap_list() + .new_val(Box::new([ + Val::U8(104), + Val::U8(101), + Val::U8(108), + Val::U8(108), + Val::U8(48), + ])) + .unwrap(); + let runtime = RuntimeVal::new(val_list); assert_eq!( - RuntimeVal::try_from(ipld.clone(), &InterfaceType::Any).unwrap(), + RuntimeVal::try_from(ipld.clone(), &InterfaceType::Type(ty)).unwrap(), runtime ); diff --git a/homestar-wasm/src/wasmtime/world.rs b/homestar-wasm/src/wasmtime/world.rs index 18f95e41..6338ff12 100644 --- a/homestar-wasm/src/wasmtime/world.rs +++ b/homestar-wasm/src/wasmtime/world.rs @@ -121,7 +121,7 @@ impl Env { }, Input::Deferred(await_promise) => { bail!(Error::ResolvePromise(ResolveError::UnresolvedCid(format!( - "deferred task not yet resolved for {}: {}", + "deferred task/instruction not yet resolved or exists for promise: {}: {}", await_promise.result(), await_promise.instruction_cid() )))) diff --git a/homestar-wasm/tests/execute_wasm.rs b/homestar-wasm/tests/execute_wasm.rs index 3d3e8062..e8574a47 100644 --- a/homestar-wasm/tests/execute_wasm.rs +++ b/homestar-wasm/tests/execute_wasm.rs @@ -35,8 +35,6 @@ async fn test_wasm_exceeds_max_memory() { } else { panic!("Expected WasmRuntimeError") } - - //assert() } #[tokio::test] @@ -135,6 +133,33 @@ async fn test_append_string() { ); } +#[tokio::test] +async fn test_rotate_base64() { + let img_uri = r#"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAYAAACNiR0NAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAG9SURBVHgBrVRLTgJBEK3qaYhLtiZGxxMoN8CdISTiCYSEmLgSTkA8AXFFAgv0BkNMDDu4gXAC2vhhy9IIU2X1CGb4T5CXzCRd3f26Xv0QBOna+ykiVJjBha2A3XhMlbz8vsFsdeCOtP/CAAn4H4bAcKbGMarsgMwiYVUqIj6FHYERXBU2oHV7BYI95HsH6VKk5eUzkw0TPqfDCwK400iGWDXmw+BrJ9mSoE/X59VBZ2/vazjy4xIyzk3tat6Tp8Kh54+d5J8HgRZuhsksWjf7xssfD5npNaxsXvLV9PDz9cGxlSaB7sopA0uQbfQlEeoorAalBvvC5E4IO1KLj0L2ABGQqb+lCLAd8sgsSI5KFtxHXii3GUJxPZWuf5QhIgici7WEwavAKSsFNsB2mCQru5HQFqfW2sAGSLveLuuwBULR7X77fluSlYMVyNQ+LVlx2Z6ec8+TXzOunY5XmK07C1smo3GsTEDFFW/Nls2vBYwtH/G0R9I1gYlUAh04kSzk1g4SuasXjCJZLuWCfVbTg8AEkaAQl3fBViDuKemM0ropExWWg2K6iHYhk8NVMmhF2FazUUiMhKQkXdb9AfsesrssluqmAAAAAElFTkSuQmCC"#; + let ipld = Input::Ipld(Ipld::Map(BTreeMap::from([ + ("func".into(), Ipld::String("crop-base64".to_string())), + ( + "args".into(), + Ipld::List(vec![ + Ipld::String(img_uri.to_string()), + Ipld::Integer(10), + Ipld::Integer(10), + Ipld::Integer(50), + Ipld::Integer(50), + ]), + ), + ]))); + + let wasm = fs::read(fixtures("example_test.wasm")).unwrap(); + let mut env = World::instantiate(wasm, "crop-base64", State::default()) + .await + .unwrap(); + + let res = env.execute(ipld.parse().unwrap().try_into().unwrap()).await; + + assert!(res.is_ok()); +} + #[tokio::test] async fn test_matrix_transpose() { let ipld_inner = Ipld::List(vec![