diff --git a/.dockerignore b/.dockerignore index c15b9e4d..515c86b0 100644 --- a/.dockerignore +++ b/.dockerignore @@ -10,7 +10,9 @@ !**/migrations !diesel.toml !**/wit +!**/schemas/api.json examples homestar-functions homestar-workspace-hack +homestar-schemas diff --git a/.github/workflows/audit.yml b/.github/workflows/audit.yml index 569d60d3..0d68ae96 100644 --- a/.github/workflows/audit.yml +++ b/.github/workflows/audit.yml @@ -2,7 +2,7 @@ name: 🛡 Audit-Check on: schedule: - - cron: '0 0 * * *' + - cron: "0 0 * * *" jobs: security-audit: diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index ea070df6..e77a0c29 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -2,10 +2,10 @@ name: ☂ Code Coverage on: push: - branches: [ main ] + branches: [main] pull_request: - branches: [ '*' ] + branches: ["*"] concurrency: group: ${{ github.workflow }}-${{ github.ref }} diff --git a/.github/workflows/dependabot_pr.yaml b/.github/workflows/dependabot_pr.yaml index 3195b668..8ef9cfb1 100644 --- a/.github/workflows/dependabot_pr.yaml +++ b/.github/workflows/dependabot_pr.yaml @@ -35,7 +35,7 @@ jobs: - name: Check for modified files id: git-check - run: echo modified=$(if git diff-index --quiet HEAD --; then echo "false"; else echo "true"; fi) >> $GITHUB_OUTPUT + run: echo modified=$(if [[ $(git diff) ]]; then echo "true"; else echo "false"; fi) >> $GITHUB_OUTPUT - name: Push changes if: steps.git-check.outputs.modified == 'true' diff --git a/.github/workflows/schemas.yml b/.github/workflows/schemas.yml new file mode 100644 index 00000000..266c812c --- /dev/null +++ b/.github/workflows/schemas.yml @@ -0,0 +1,72 @@ +name: 📄 Schemas + +on: + push: + branches: [main] + # branches: [main, "**"] + +permissions: + contents: write + pull-requests: write + +jobs: + changes: + runs-on: ubuntu-latest + outputs: + rust: ${{ steps.filter.outputs.rust }} + steps: + - name: Checkout Repository + uses: actions/checkout@v4 + - uses: dorny/paths-filter@v3 + id: filter + with: + filters: | + rust: + - 'homestar-invocation/src/**' + - 'homestar-runtime/src/**' + - 'homestar-schemas/src/**' + - 'homestar-workflow/src/**' + + schemas: + needs: changes + if: ${{ needs.changes.outputs.rust == 'true' }} + env: + SCCACHE_GHA_ENABLED: "true" + RUSTC_WRAPPER: "sccache" + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 + token: ${{ secrets.HOMESTAR_UPDATE_TOKEN }} + + - name: Install Rust Toolchain + uses: dtolnay/rust-toolchain@stable + + - name: Cache Project + uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + shared-key: test-all-stable-ubuntu-latest + save-if: ${{ github.event_name == 'push' }} + + - name: Sccache + uses: mozilla-actions/sccache-action@v0.0.3 + + - name: Run generate schemas + run: cargo run -p homestar-schemas + + - name: Check for modified schemas + id: git-check + shell: bash + run: echo modified=$(if [[ $(git diff homestar-runtime/schemas/) ]]; then echo "true"; else echo "false"; fi) >> $GITHUB_OUTPUT + + - name: Push changes + if: steps.git-check.outputs.modified == 'true' + run: | + git config user.name "${GITHUB_ACTOR}" + git config user.email "${GITHUB_ACTOR}@users.noreply.github.com" + git remote set-url origin https://x-access-token:${GITHUB_TOKEN}@github.com/${GITHUB_REPOSITORY}.git + git commit -am "chore(schemas): update OpenRPC API doc and JSON schemas" + git push diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e7310957..b60f3e33 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -79,7 +79,7 @@ repos: - id: check-merge-conflict - id: trailing-whitespace - id: end-of-file-fixer - exclude: \.txt$ + exclude: \.(txt|json)$ - id: check-yaml - id: check-json - id: check-added-large-files diff --git a/Cargo.lock b/Cargo.lock index c5cbeb66..6489058b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -200,6 +200,12 @@ version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" +[[package]] +name = "arrayvec" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" + [[package]] name = "arrayvec" version = "0.7.4" @@ -564,7 +570,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23285ad32269793932e830392f2fe2f83e26488fd3ec778883a93c8323735780" dependencies = [ "arrayref", - "arrayvec", + "arrayvec 0.7.4", "constant_time_eq", ] @@ -575,7 +581,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94230421e395b9920d23df13ea5d77a20e1725331f90fbbf6df6040b33f756ae" dependencies = [ "arrayref", - "arrayvec", + "arrayvec 0.7.4", "constant_time_eq", ] @@ -586,7 +592,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0231f06152bf547e9c2b5194f247cd97aacf6dcd8b15d8e5ec0663f64580da87" dependencies = [ "arrayref", - "arrayvec", + "arrayvec 0.7.4", "cc", "cfg-if", "constant_time_eq", @@ -1539,6 +1545,17 @@ dependencies = [ "serde", ] +[[package]] +name = "derive-getters" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a2c35ab6e03642397cdda1dd58abbc05d418aef8e36297f336d5aba060fe8df" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "diesel" version = "2.1.4" @@ -2498,6 +2515,7 @@ dependencies = [ "anyhow", "async-recursion", "byte-unit", + "const_format", "diesel", "enum-as-inner", "enum-assoc", @@ -2507,6 +2525,7 @@ dependencies = [ "libipld", "libsqlite3-sys", "rand", + "schemars", "serde", "serde_json", "signature", @@ -2537,6 +2556,7 @@ dependencies = [ "daemonize", "dagga", "dashmap", + "derive-getters", "diesel", "diesel_migrations", "dotenvy", @@ -2585,6 +2605,7 @@ dependencies = [ "reqwest", "retry", "rm_rf", + "schemars", "sec1", "semver", "serde", @@ -2629,6 +2650,19 @@ dependencies = [ "syn 2.0.48", ] +[[package]] +name = "homestar-schemas" +version = "0.1.1" +dependencies = [ + "homestar-invocation", + "homestar-runtime", + "homestar-workflow", + "homestar-workspace-hack", + "schemars", + "serde", + "serde_json", +] + [[package]] name = "homestar-wasm" version = "0.1.1" @@ -2667,6 +2701,7 @@ dependencies = [ "indexmap 2.2.2", "json", "libipld", + "schemars", "serde", "serde_json", "thiserror", @@ -2678,7 +2713,7 @@ version = "0.1.0" dependencies = [ "ahash", "anyhow", - "arrayvec", + "arrayvec 0.7.4", "base64 0.13.1", "bitflags 2.4.2", "bytes", @@ -3710,7 +3745,7 @@ version = "0.45.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5cc5767727d062c4eac74dd812c998f0e488008e82cce9c33b463d38423f9ad2" dependencies = [ - "arrayvec", + "arrayvec 0.7.4", "asynchronous-codec 0.7.0", "bytes", "either", @@ -5695,7 +5730,7 @@ version = "1.34.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "755392e1a2f77afd95580d3f0d0e94ac83eeeb7167552c9b5bca549e61a94d83" dependencies = [ - "arrayvec", + "arrayvec 0.7.4", "num-traits", ] @@ -5903,6 +5938,32 @@ dependencies = [ "parking_lot", ] +[[package]] +name = "schemars" +version = "0.8.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45a28f4c49489add4ce10783f7911893516f15afe45d015608d41faca6bc4d29" +dependencies = [ + "arrayvec 0.5.2", + "dyn-clone", + "schemars_derive", + "serde", + "serde_json", + "url", +] + +[[package]] +name = "schemars_derive" +version = "0.8.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c767fd6fa65d9ccf9cf026122c1b555f2ef9a4f0cea69da4d7dbc3e258d30967" +dependencies = [ + "proc-macro2", + "quote", + "serde_derive_internals", + "syn 1.0.109", +] + [[package]] name = "scopeguard" version = "1.2.0" @@ -6006,6 +6067,17 @@ dependencies = [ "syn 2.0.48", ] +[[package]] +name = "serde_derive_internals" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85bf8229e7920a9f636479437026331ce11aa132b4dde37d121944a44d6e5f3c" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "serde_ipld_dagcbor" version = "0.4.2" diff --git a/Cargo.toml b/Cargo.toml index 84ed4124..8e47f342 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,6 +4,7 @@ members = [ "homestar-functions/*", "homestar-invocation", "homestar-runtime", + "homestar-schemas", "homestar-wasm", "homestar-workflow", "homestar-workspace-hack", @@ -34,6 +35,7 @@ chrono = { version = "0.4", default-features = false, features = [ "clock", "std", ] } +const_format = "0.2" enum-assoc = "1.1" enum-as-inner = "0.6" faststr = { version = "0.2", default-features = false, features = ["serde"] } @@ -51,7 +53,11 @@ libsqlite3-sys = { version = "0.27", default-features = false, features = [ "bundled", ] } rand = { version = "0.8", default-features = false } +schemars = { version = "0.8", features = ["arrayvec", "url"] } serde = { version = "1.0", default-features = false, features = ["derive"] } +serde_json = { version = "1.0", default-features = false, features = [ + "raw_value", +] } serde_ipld_dagcbor = { version = "0.4", default-features = false, features = [ "std", ] } diff --git a/README.md b/README.md index 7d86a90d..cd3ed092 100644 --- a/README.md +++ b/README.md @@ -154,6 +154,10 @@ represents the `Homestar` runtime. We recommend diving into each package's own writing and compiling [Wasm component][wasm-component] modules using [wit-bindgen][wit-bindgen]. +- [homestar-schemas](./homestar-schemas) + +`homestar-schemas` is a crate for generating OpenRPC docs and JSON Schemas that document the [homestar-runtime](./homestar-runtime) JSON-RPC API, workflows, and receipts. + - [examples/*](./examples) `examples` contains examples and demos showcasing `Homestar` packages diff --git a/docker/Dockerfile b/docker/Dockerfile index f7c307d8..fcb83e4c 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -31,9 +31,11 @@ RUN cargo init --lib homestar-invocation && \ RUN echo "fn main() {}" > ./homestar-runtime/src/main.rs -RUN mkdir -p ./homestar-runtime/src/test_utils/proc_macro ./homestar-runtime/migrations ./examples ./homestar-functions +RUN mkdir -p ./homestar-runtime/src/test_utils/proc_macro ./homestar-runtime/migrations \ + ./examples ./homestar-functions ./homestar-schemas RUN bash -c 'pushd ./examples && cargo init dummy-app-examples && popd' RUN bash -c 'pushd ./homestar-functions && cargo init dummy-app-fns && popd' +RUN cargo init homestar-schemas RUN cargo init --lib homestar-workspace-hack # copy cargo.* @@ -43,6 +45,7 @@ COPY ../homestar-workflow/Cargo.toml ./homestar-workflow/ COPY ../homestar-wasm/Cargo.toml ./homestar-wasm/ COPY ../homestar-runtime/Cargo.toml ./homestar-runtime/ COPY ../homestar-runtime/migrations ./homestar-runtime/migrations +COPY ../homestar-runtime/schemas/api.json ./homestar-runtime/schemas/api.json COPY ../homestar-runtime/src/test_utils/proc_macro ./homestar-runtime/src/test_utils/proc_macro ENTRYPOINT ["/bin/bash"] @@ -91,6 +94,7 @@ WORKDIR /home/runner COPY --chown=homestar:homestar diesel.toml ./ COPY --chown=homestar:homestar ../homestar-runtime/migrations ./migrations +COPY --chown=homestar:homestar ../homestar-runtime/schemas ./schemas COPY --chown=homestar:homestar --from=builder /usr/local/bin/homestar-runtime ./homestar COPY --chown=homestar:homestar --from=builder /usr/local/bin/diesel /usr/local/bin/diesel COPY --chown=homestar:homestar --from=builder /etc/*.db ./ diff --git a/homestar-invocation/Cargo.toml b/homestar-invocation/Cargo.toml index 8c8b6bb6..7d5994f0 100644 --- a/homestar-invocation/Cargo.toml +++ b/homestar-invocation/Cargo.toml @@ -23,6 +23,7 @@ doctest = true anyhow = { workspace = true } async-recursion = { version = "1.0", default-features = false } byte-unit = { workspace = true } +const_format = { workspace = true } diesel = { version = "2.1", default-features = false, features = [ "sqlite", ], optional = true } @@ -36,7 +37,9 @@ homestar-workspace-hack = { workspace = true } libipld = { workspace = true } libsqlite3-sys = { workspace = true, optional = true } rand = { workspace = true } +schemars = { workspace = true } serde = { workspace = true } +serde_json = { workspace = true } signature = "2.2" thiserror = { workspace = true } tracing = { workspace = true } diff --git a/homestar-invocation/src/authority/issuer.rs b/homestar-invocation/src/authority/issuer.rs index 01f32600..3c3cc9bb 100644 --- a/homestar-invocation/src/authority/issuer.rs +++ b/homestar-invocation/src/authority/issuer.rs @@ -1,6 +1,7 @@ //! Issuer refers to the issuer of the invocation. use crate::{Error, Unit}; +use const_format::formatcp; #[cfg(feature = "diesel")] use diesel::{ backend::Backend, @@ -11,8 +12,13 @@ use diesel::{ sqlite::Sqlite, }; use libipld::{serde::from_ipld, Ipld}; +use schemars::{ + gen::SchemaGenerator, + schema::{InstanceType, Metadata, Schema, SchemaObject, SingleOrVec}, + JsonSchema, +}; use serde::{Deserialize, Serialize}; -use std::{fmt, str::FromStr}; +use std::{borrow::Cow, fmt, module_path, str::FromStr}; use ucan::ipld::Principle as Principal; /// [Principal] issuer of the [Invocation]. If omitted issuer is @@ -91,6 +97,29 @@ where } } +impl JsonSchema for Issuer { + fn schema_name() -> String { + "iss".to_owned() + } + + fn schema_id() -> Cow<'static, str> { + Cow::Borrowed(formatcp!("{}::Issuer", module_path!())) + } + + fn json_schema(_gen: &mut SchemaGenerator) -> Schema { + let schema = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::String.into())), + metadata: Some(Box::new(Metadata { + title: Some("Issuer".to_string()), + description: Some("Principal that issued the receipt".to_string()), + ..Default::default() + })), + ..Default::default() + }; + schema.into() + } +} + #[cfg(test)] mod test { use super::*; diff --git a/homestar-invocation/src/authority/prf.rs b/homestar-invocation/src/authority/prf.rs index 2a95a226..2a2808ee 100644 --- a/homestar-invocation/src/authority/prf.rs +++ b/homestar-invocation/src/authority/prf.rs @@ -4,6 +4,7 @@ //! [Task]: crate::Task use crate::{ipld::Link, Error, Unit}; +use const_format::formatcp; #[cfg(feature = "diesel")] use diesel::{ backend::Backend, @@ -16,7 +17,13 @@ use diesel::{ #[cfg(feature = "diesel")] use libipld::{cbor::DagCborCodec, prelude::Codec}; use libipld::{serde::from_ipld, Ipld}; +use schemars::{ + gen::SchemaGenerator, + schema::{ArrayValidation, InstanceType, Metadata, Schema, SchemaObject, SingleOrVec}, + JsonSchema, +}; use serde::{Deserialize, Serialize}; +use std::{borrow::Cow, module_path}; use ucan::ipld::UcanIpld; /// Proof container, with links to UCANs for a particular [Task] or @@ -89,6 +96,32 @@ impl TryFrom<&Ipld> for UcanPrf { } } +impl JsonSchema for UcanPrf { + fn schema_name() -> String { + "prf".to_owned() + } + + fn schema_id() -> Cow<'static, str> { + Cow::Borrowed(formatcp!("{}::UcanPrf", module_path!())) + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + let schema = SchemaObject { + instance_type: Some(SingleOrVec::Vec(vec![InstanceType::Array])), + array: Some(Box::new(ArrayValidation { + items: Some(gen.subschema_for::().into()), + ..Default::default() + })), + metadata: Some(Box::new(Metadata { + description: Some("CIDs referencing UCAN proofs".to_string()), + ..Default::default() + })), + ..Default::default() + }; + schema.into() + } +} + #[cfg(feature = "diesel")] #[cfg_attr(docsrs, doc(cfg(feature = "diesel")))] impl ToSql for UcanPrf { diff --git a/homestar-invocation/src/ipld/mod.rs b/homestar-invocation/src/ipld/mod.rs index d7868e8e..5a28bc09 100644 --- a/homestar-invocation/src/ipld/mod.rs +++ b/homestar-invocation/src/ipld/mod.rs @@ -3,6 +3,7 @@ mod dag_cbor; mod dag_json; mod link; +pub mod schema; pub use dag_cbor::*; pub use dag_json::*; diff --git a/homestar-invocation/src/ipld/schema.rs b/homestar-invocation/src/ipld/schema.rs new file mode 100644 index 00000000..eebfb288 --- /dev/null +++ b/homestar-invocation/src/ipld/schema.rs @@ -0,0 +1,144 @@ +//! JSON Schema generation for DAG-JSON encoded Ipld. + +use const_format::formatcp; +use schemars::{ + gen::SchemaGenerator, + schema::{InstanceType, Metadata, ObjectValidation, Schema, SchemaObject, SingleOrVec}, + JsonSchema, +}; +use std::{borrow::Cow, collections::BTreeMap, module_path}; + +/// Ipld stub for JSON Schema generation +#[derive(Debug)] +#[doc(hidden)] +pub struct IpldStub(); + +// The Ipld stub exists solely to implement a JSON Schema +// represenation of Ipld. Should libipld provide an implementation +// in the future, this can be removed. +impl JsonSchema for IpldStub { + fn schema_name() -> String { + "ipld".to_owned() + } + + fn schema_id() -> Cow<'static, str> { + Cow::Borrowed(formatcp!("{}::IpldStub", module_path!())) + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + let mut schema = SchemaObject { + instance_type: None, + metadata: Some(Box::new(Metadata { + title: Some("Ipld".to_string()), + description: Some("DAG-JSON encoded IPLD: https://github.com/ipld/ipld/blob/master/specs/codecs/dag-json/spec.md".to_string()), + ..Default::default() + })), + ..Default::default() + }; + + let number_schema = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Number.into())), + ..Default::default() + }; + let array_schema = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Array.into())), + ..Default::default() + }; + let object_schema = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), + ..Default::default() + }; + + schema.subschemas().one_of = Some(vec![ + <()>::json_schema(gen), + ::json_schema(gen), + Schema::Object(number_schema), + ::json_schema(gen), + gen.subschema_for::(), + Schema::Object(array_schema), + Schema::Object(object_schema), + gen.subschema_for::(), + ]); + + schema.into() + } +} + +/// Ipld link stub for JSON Schema generation +#[derive(Debug)] +#[doc(hidden)] +pub struct IpldLinkStub(); + +impl JsonSchema for IpldLinkStub { + fn schema_name() -> String { + "ipld_link".to_owned() + } + + fn schema_id() -> Cow<'static, str> { + Cow::Borrowed(formatcp!("{}::IpldLinkStub", module_path!())) + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + let schema = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), + object: Some(Box::new(ObjectValidation { + properties: BTreeMap::from([('/'.to_string(), ::json_schema(gen))]), + ..Default::default() + })), + metadata: Some(Box::new(Metadata { + title: Some("IPLD link".to_string()), + description: Some("CID link that points to some IPLD data".to_string()), + ..Default::default() + })), + ..Default::default() + }; + + schema.into() + } +} + +/// Ipld bytes stub for JSON Schema generation +#[derive(Debug)] +#[doc(hidden)] +pub struct IpldBytesStub(); + +impl JsonSchema for IpldBytesStub { + fn schema_name() -> String { + "ipld_bytes".to_owned() + } + + fn schema_id() -> Cow<'static, str> { + Cow::Borrowed(formatcp!("{}::IpldBytesStub", module_path!())) + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + let schema = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), + metadata: Some(Box::new(Metadata { + title: Some("IPLD bytes".to_string()), + description: Some("Base64 encoded binary".to_string()), + ..Default::default() + })), + object: Some(Box::new(ObjectValidation { + properties: BTreeMap::from([( + '/'.to_string(), + Schema::Object(SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), + object: Some(Box::new(ObjectValidation { + properties: BTreeMap::from([( + "bytes".to_string(), + ::json_schema(gen), + )]), + ..Default::default() + })), + ..Default::default() + }), + )]), + ..Default::default() + })), + ..Default::default() + }; + + schema.into() + } +} diff --git a/homestar-invocation/src/pointer.rs b/homestar-invocation/src/pointer.rs index 20b9818c..ebac05ad 100644 --- a/homestar-invocation/src/pointer.rs +++ b/homestar-invocation/src/pointer.rs @@ -9,6 +9,7 @@ //! [Receipts]: super::Receipt use crate::{ensure, Error, Unit}; +use const_format::formatcp; #[cfg(feature = "diesel")] use diesel::{ backend::Backend, @@ -20,10 +21,15 @@ use diesel::{ }; use enum_assoc::Assoc; use libipld::{cid::Cid, serde::from_ipld, Ipld, Link}; +use schemars::{ + gen::SchemaGenerator, + schema::{InstanceType, Metadata, ObjectValidation, Schema, SchemaObject, SingleOrVec}, + JsonSchema, +}; use serde::{Deserialize, Serialize}; #[cfg(feature = "diesel")] use std::str::FromStr; -use std::{borrow::Cow, collections::btree_map::BTreeMap, fmt}; +use std::{borrow::Cow, collections::btree_map::BTreeMap, fmt, module_path}; /// `await/ok` branch for instruction result. pub const OK_BRANCH: &str = "await/ok"; @@ -74,6 +80,69 @@ impl fmt::Display for AwaitResult { } } +impl JsonSchema for AwaitResult { + fn schema_name() -> String { + "await_result".to_owned() + } + + fn schema_id() -> Cow<'static, str> { + Cow::Borrowed(formatcp!("{}::AwaitResult", module_path!())) + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + let mut schema = SchemaObject { + instance_type: None, + metadata: Some(Box::new(Metadata { + title: Some("Await result".to_string()), + description: Some("Branches of a promise that is awaited".to_string()), + ..Default::default() + })), + ..Default::default() + }; + + let await_ok = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), + object: Some(Box::new(ObjectValidation { + properties: BTreeMap::from([( + OK_BRANCH.to_string(), + gen.subschema_for::(), + )]), + ..Default::default() + })), + ..Default::default() + }; + let await_err = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), + object: Some(Box::new(ObjectValidation { + properties: BTreeMap::from([( + ERR_BRANCH.to_string(), + gen.subschema_for::(), + )]), + ..Default::default() + })), + ..Default::default() + }; + let await_ptr = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), + object: Some(Box::new(ObjectValidation { + properties: BTreeMap::from([( + PTR_BRANCH.to_string(), + gen.subschema_for::(), + )]), + ..Default::default() + })), + ..Default::default() + }; + + schema.subschemas().one_of = Some(vec![ + Schema::Object(await_ok), + Schema::Object(await_err), + Schema::Object(await_ptr), + ]); + schema.into() + } +} + /// Describes the eventual output of the referenced [Instruction] as a /// [Pointer], either resolving to a tagged [OK_BRANCH], [ERR_BRANCH], or direct /// result of a [PTR_BRANCH]. @@ -131,7 +200,7 @@ impl TryFrom for Await { ensure!( map.len() == 1, Error::ConditionNotMet( - "await promise must jave only a single key ain a map".to_string() + "await promise must have only a single key in a map".to_string() ) ); @@ -278,6 +347,34 @@ where } } +impl JsonSchema for Pointer { + fn schema_name() -> String { + "pointer".to_owned() + } + + fn schema_id() -> Cow<'static, str> { + Cow::Borrowed(formatcp!("{}::Pointer", module_path!())) + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + let schema = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), + object: Some(Box::new(ObjectValidation { + properties: BTreeMap::from([('/'.to_string(), ::json_schema(gen))]), + ..Default::default() + })), + metadata: Some(Box::new(Metadata { + description: Some( + "CID reference to an invocation, task, instruction, or receipt".to_string(), + ), + ..Default::default() + })), + ..Default::default() + }; + schema.into() + } +} + #[cfg(test)] mod test { use super::*; diff --git a/homestar-invocation/src/receipt.rs b/homestar-invocation/src/receipt.rs index 57d51251..f0eb3a00 100644 --- a/homestar-invocation/src/receipt.rs +++ b/homestar-invocation/src/receipt.rs @@ -6,7 +6,15 @@ use crate::{ task, Error, Pointer, Unit, }; use libipld::{self, cbor::DagCborCodec, prelude::Codec, serde::from_ipld, Ipld}; -use std::collections::BTreeMap; +use schemars::{ + gen::SchemaGenerator, + schema::{InstanceType, Metadata, ObjectValidation, Schema, SchemaObject, SingleOrVec}, + JsonSchema, +}; +use std::{ + borrow::Cow, + collections::{BTreeMap, BTreeSet}, +}; pub mod metadata; @@ -178,3 +186,61 @@ impl TryFrom> for Pointer { Ok(Pointer::new(receipt.to_cid()?)) } } + +impl JsonSchema for Receipt { + fn schema_name() -> String { + "receipt".to_owned() + } + + fn schema_id() -> Cow<'static, str> { + Cow::Borrowed("homestar-invocation::receipt::Receipt") + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + let meta_schema = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), + metadata: Some(Box::new(Metadata { + title: Some("Receipt metadata".to_string()), + description: Some( + "Receipt metadata including the operation that produced the receipt" + .to_string(), + ), + ..Default::default() + })), + object: Some(Box::new(ObjectValidation { + properties: BTreeMap::from([("op".to_owned(), ::json_schema(gen))]), + required: BTreeSet::from(["op".to_string()]), + ..Default::default() + })), + ..Default::default() + }; + + let schema = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), + metadata: Some(Box::new(Metadata { + title: Some("Receipt".to_string()), + description: Some("A computed receipt".to_string()), + ..Default::default() + })), + object: Some(Box::new(ObjectValidation { + properties: BTreeMap::from([ + ("ran".to_owned(), gen.subschema_for::()), + ("out".to_owned(), gen.subschema_for::>()), + ("meta".to_owned(), Schema::Object(meta_schema)), + ("iss".to_owned(), gen.subschema_for::>()), + ("prf".to_owned(), gen.subschema_for::()), + ]), + required: BTreeSet::from([ + "ran".to_string(), + "out".to_string(), + "meta".to_string(), + "prf".to_string(), + ]), + ..Default::default() + })), + ..Default::default() + }; + + schema.into() + } +} diff --git a/homestar-invocation/src/task.rs b/homestar-invocation/src/task.rs index 8c0d5859..668d63ac 100644 --- a/homestar-invocation/src/task.rs +++ b/homestar-invocation/src/task.rs @@ -6,6 +6,7 @@ use crate::{ Error, Pointer, Unit, }; use libipld::{cid::Cid, serde::from_ipld, Ipld}; +use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use std::collections::BTreeMap; @@ -28,11 +29,19 @@ const PROOF_KEY: &str = "prf"; /// /// [Instruction]: Instruction /// [Receipt]: super::Receipt -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)] +#[schemars( + rename = "task", + description = "Contains a run instruction, configuration, optional reference to receipt that caused task to run, and authorization" +)] pub struct Task<'a, T> { + #[schemars(with = "Instruction<'a, T>", rename = "run", title = "Run instruction")] run: RunInstruction<'a, T>, + #[schemars(title = "Receipt reference")] cause: Option, + #[schemars(with = "Resources", title = "Task Configuration")] meta: Ipld, + #[schemars(title = "UCAN Authorization")] prf: UcanPrf, } diff --git a/homestar-invocation/src/task/config.rs b/homestar-invocation/src/task/config.rs index f90e5792..c9cfad0e 100644 --- a/homestar-invocation/src/task/config.rs +++ b/homestar-invocation/src/task/config.rs @@ -4,6 +4,7 @@ use crate::{consts, Error, Unit}; use libipld::{serde::from_ipld, Ipld}; +use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use std::{collections::BTreeMap, default::Default, time::Duration}; @@ -12,10 +13,16 @@ const MEMORY_KEY: &str = "memory"; const TIMEOUT_KEY: &str = "time"; /// Resource configuration for defining fuel quota, timeout, etc. -#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] +#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize, JsonSchema)] +#[schemars( + rename = "resources", + description = "Resource configuration for fuel quota, memory allowance, and timeout" +)] pub struct Resources { fuel: Option, + #[schemars(description = "Memory in bytes")] memory: Option, + #[schemars(with = "Option", description = "Timeout in milliseconds")] time: Option, } diff --git a/homestar-invocation/src/task/instruction.rs b/homestar-invocation/src/task/instruction.rs index 8ad90d16..621bd66c 100644 --- a/homestar-invocation/src/task/instruction.rs +++ b/homestar-invocation/src/task/instruction.rs @@ -1,10 +1,27 @@ //! An [Instruction] is the smallest unit of work that can be requested from a //! UCAN, described via `resource`, `ability`. -use crate::{ipld::DagCbor, Error, Pointer, Unit}; +use crate::{ + ipld::{self, DagCbor}, + pointer::AwaitResult, + Error, Pointer, Unit, +}; use libipld::{cid::multibase::Base, serde::from_ipld, Ipld}; +use schemars::{ + gen::SchemaGenerator, + schema::{ + ArrayValidation, InstanceType, Metadata, ObjectValidation, Schema, SchemaObject, + SingleOrVec, + }, + JsonSchema, +}; use serde::{Deserialize, Serialize}; -use std::{borrow::Cow, collections::BTreeMap, fmt}; +use serde_json::json; +use std::{ + borrow::Cow, + collections::{BTreeMap, BTreeSet}, + fmt, +}; use url::Url; const RESOURCE_KEY: &str = "rsc"; @@ -320,6 +337,137 @@ where impl<'a, T> DagCbor for Instruction<'a, T> where Ipld: From {} +impl<'a, T> JsonSchema for Instruction<'a, T> { + fn schema_name() -> String { + "run".to_owned() + } + + fn schema_id() -> Cow<'static, str> { + Cow::Borrowed("homestar-invocation::task::Instruction") + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + struct InputConditional { + if_schema: Schema, + then_schema: Schema, + else_schema: Schema, + } + + fn input_conditional(gen: &mut SchemaGenerator) -> InputConditional { + let if_schema = SchemaObject { + instance_type: None, + object: Some(Box::new(ObjectValidation { + properties: BTreeMap::from([( + "op".to_owned(), + Schema::Object(SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::String.into())), + const_value: Some(json!("wasm/run")), + ..Default::default() + }), + )]), + ..Default::default() + })), + ..Default::default() + }; + + let func_schema = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::String.into())), + metadata: Some(Box::new(Metadata { + description: Some("The function to call on the Wasm resource".to_string()), + ..Default::default() + })), + ..Default::default() + }; + + let args_schema = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Array.into())), + metadata: Some(Box::new(Metadata { + description: Some( + "Arguments to the function. May await a result from another task." + .to_string(), + ), + ..Default::default() + })), + array: Some(Box::new(ArrayValidation { + items: Some(SingleOrVec::Vec(vec![ + gen.subschema_for::(), + gen.subschema_for::(), + ])), + ..Default::default() + })), + ..Default::default() + }; + + let input_schema = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), + object: Some(Box::new(ObjectValidation { + properties: BTreeMap::from([ + ("func".to_string(), Schema::Object(func_schema)), + ("args".to_string(), Schema::Object(args_schema)), + ]), + required: BTreeSet::from(["func".to_string(), "args".to_string()]), + ..Default::default() + })), + ..Default::default() + }; + + let then_schema = SchemaObject { + instance_type: None, + object: Some(Box::new(ObjectValidation { + properties: BTreeMap::from([( + "input".to_string(), + Schema::Object(input_schema), + )]), + ..Default::default() + })), + ..Default::default() + }; + + InputConditional { + if_schema: Schema::Object(if_schema), + then_schema: Schema::Object(then_schema), + else_schema: Schema::Bool(false), + } + } + + let op_schema = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::String.into())), + metadata: Some(Box::new(Metadata { + description: Some("Function executor".to_string()), + ..Default::default() + })), + enum_values: Some(vec![json!("wasm/run")]), + ..Default::default() + }; + + let mut schema = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), + metadata: Some(Box::new(Metadata { + title: Some("Run instruction".to_string()), + description: Some("An instruction that runs a function from a resource, executor that will run the function, inputs to the executor, and optional nonce".to_string()), + ..Default::default() + })), + object: Some(Box::new(ObjectValidation { + properties: BTreeMap::from([ + ("rsc".to_owned(), ::json_schema(gen)), + ("op".to_owned(), Schema::Object(op_schema)), + ("nnc".to_owned(), ::json_schema(gen)) + ]), + required: BTreeSet::from(["rsc".to_string(), "op".to_string(), "input".to_string(), "nnc".to_string()]), + ..Default::default() + })), + ..Default::default() + }; + + let input = input_conditional(gen); + schema.subschemas().if_schema = Some(Box::new(input.if_schema)); + schema.subschemas().then_schema = Some(Box::new(input.then_schema)); + schema.subschemas().else_schema = Some(Box::new(input.else_schema)); + + schema.into() + } +} + #[cfg(test)] mod test { use super::*; diff --git a/homestar-invocation/src/task/instruction/nonce.rs b/homestar-invocation/src/task/instruction/nonce.rs index 2f807e93..a821a111 100644 --- a/homestar-invocation/src/task/instruction/nonce.rs +++ b/homestar-invocation/src/task/instruction/nonce.rs @@ -3,14 +3,20 @@ //! [Instruction]: super::Instruction use crate::{Error, Unit}; +use const_format::formatcp; use enum_as_inner::EnumAsInner; use generic_array::{ typenum::consts::{U12, U16}, GenericArray, }; use libipld::{multibase::Base::Base32HexLower, Ipld}; +use schemars::{ + gen::SchemaGenerator, + schema::{InstanceType, Metadata, Schema, SchemaObject, SingleOrVec}, + JsonSchema, +}; use serde::{Deserialize, Serialize}; -use std::fmt; +use std::{borrow::Cow, fmt, module_path}; use uuid::Uuid; type Nonce96 = GenericArray; @@ -88,6 +94,30 @@ impl TryFrom<&Ipld> for Nonce { } } +impl JsonSchema for Nonce { + fn schema_name() -> String { + "nonce".to_owned() + } + + fn schema_id() -> Cow<'static, str> { + Cow::Borrowed(formatcp!("{}::Nonce", module_path!())) + } + + fn json_schema(_gen: &mut SchemaGenerator) -> Schema { + let schema = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::String.into())), + metadata: Some(Box::new(Metadata { + description: Some( + "A 12-byte or 16-byte nonce. Use empty string for no nonce.".to_string(), + ), + ..Default::default() + })), + ..Default::default() + }; + schema.into() + } +} + #[cfg(test)] mod test { use super::*; diff --git a/homestar-invocation/src/task/result.rs b/homestar-invocation/src/task/result.rs index f552d066..fa610a7b 100644 --- a/homestar-invocation/src/task/result.rs +++ b/homestar-invocation/src/task/result.rs @@ -16,7 +16,14 @@ use diesel::{ use libipld::Ipld; #[cfg(feature = "diesel")] use libipld::{cbor::DagCborCodec, prelude::Codec}; +use schemars::{ + gen::SchemaGenerator, + schema::{ArrayValidation, InstanceType, Metadata, Schema, SchemaObject, SingleOrVec}, + JsonSchema, +}; use serde::{Deserialize, Serialize}; +use serde_json::json; +use std::borrow::Cow; const OK: &str = "ok"; const ERR: &str = "error"; @@ -160,6 +167,47 @@ where } } +impl JsonSchema for Result { + fn schema_name() -> String { + "out".to_owned() + } + + fn schema_id() -> Cow<'static, str> { + Cow::Borrowed("homestar-invocation::task::Result") + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + let out_result = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), + enum_values: Some(vec![json!(OK), json!(ERR), json!(JUST)]), + ..Default::default() + }; + + let schema = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), + metadata: Some(Box::new(Metadata { + title: Some("Computation result".to_string()), + description: Some( + "Result tuple with ok/err/just result and associated output".to_string(), + ), + ..Default::default() + })), + array: Some(Box::new(ArrayValidation { + items: Some(SingleOrVec::Vec(vec![ + Schema::Object(out_result), + gen.subschema_for::(), + ])), + min_items: Some(2), + max_items: Some(2), + ..Default::default() + })), + ..Default::default() + }; + + schema.into() + } +} + #[cfg(test)] mod test { use super::*; diff --git a/homestar-runtime/Cargo.toml b/homestar-runtime/Cargo.toml index 057c2d0a..d80dc2b6 100644 --- a/homestar-runtime/Cargo.toml +++ b/homestar-runtime/Cargo.toml @@ -12,6 +12,7 @@ documentation = "https://docs.rs/homestar-runtime" repository = "https://github.com/ipvm-wg/homestar/tree/main/homestar-runtime" authors = { workspace = true } autotests = false +default-run = "homestar" [lib] path = "src/lib.rs" @@ -48,10 +49,11 @@ config = { version = "0.13", default-features = false, features = ["toml"] } console-subscriber = { version = "0.2", default-features = false, features = [ "parking_lot", ], optional = true } -const_format = "0.2" +const_format = { workspace = true } crossbeam = "0.8" dagga = "0.2" dashmap = "5.5" +derive-getters = "0.3" diesel = { version = "2.1", default-features = false, features = [ "sqlite", "r2d2", @@ -132,13 +134,12 @@ reqwest = { version = "0.11", default-features = false, features = [ "blocking", "json", ] } +schemars = { workspace = true } sec1 = { version = "0.7", default-features = false, features = ["pem"] } semver = { version = "1.0", default-features = false } serde = { workspace = true } serde_ipld_dagcbor = { workspace = true } -serde_json = { version = "1.0", default-features = false, features = [ - "raw_value", -] } +serde_json = { workspace = true } serde_with = { version = "3.5", default-features = false, features = [ "base64", "macros", diff --git a/homestar-runtime/README.md b/homestar-runtime/README.md index 1e3d0e23..005bf049 100644 --- a/homestar-runtime/README.md +++ b/homestar-runtime/README.md @@ -32,3 +32,17 @@ and failure modes, etc. For more information, please go to our [Homestar Readme][homestar-readme]. [homestar-readme]: https://github.com/ipvm-wg/homestar/blob/main/README.md + +## API + +The runtime provides a JSON-RPC API to run workflows, request node information, health, and metrics, and to subscribe to network notifications. The OpenRPC API is documented in [api.json][api] and is available on the `rpc_discover` endpoint. + +JSON Schemas for [workflow][workflow-schema], [receipt][receipt-schema], [network notifications][network-schema], [health checks][health-schema], [node info][node-info-schema], and [metrics][metrics-schema] are also available inidividually. + +[api]: ./schemas/api.json +[health-schema]: ./schemas/health.json +[metrics-schema]: ./schemas/metrics.json +[network-schema]: ./schemas/network.json +[node-info-schema]: ./schemas/node_info.json +[receipt-schema]: ./schemas/receipt.json +[workflow-schema]: ./schemas/workflow.json diff --git a/homestar-runtime/schemas/api.json b/homestar-runtime/schemas/api.json new file mode 100644 index 00000000..01b88381 --- /dev/null +++ b/homestar-runtime/schemas/api.json @@ -0,0 +1,1683 @@ +{ + "openrpc": "1.2.6", + "info": { + "title": "homestar", + "description": "", + "version": "0.1.1", + "contact": { + "name": null, + "email": null, + "url": "" + }, + "license": { + "name": "", + "url": null + } + }, + "externalDocs": { + "description": null, + "url": "https://docs.everywhere.computer/homestar/what-is-homestar/" + }, + "methods": [ + { + "name": "rpc.discover", + "description": "OpenRPC schema as a description of this service", + "paramStructure": "either", + "params": [], + "result": { + "name": "OpenRPC Schema", + "schema": { + "$ref": "https://github.com/ipvm-wg/homestar/blob/main/homestar-runtime/schemas/docs/api.json" + }, + "required": true, + "deprecated": false + }, + "deprecated": false + }, + { + "name": "health", + "paramStructure": "either", + "params": [], + "result": { + "name": "health", + "schema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "health", + "description": "Health status of the server and database connection.", + "type": "object", + "required": [ + "healthy" + ], + "properties": { + "healthy": { + "description": "Health status.", + "type": "boolean" + } + } + }, + "required": true, + "deprecated": false + }, + "deprecated": false + }, + { + "name": "metrics", + "paramStructure": "either", + "params": [], + "result": { + "name": "metrics", + "schema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Metrics data", + "description": "Prometheus metrics data", + "type": "object", + "required": [ + "metrics" + ], + "properties": { + "metrics": { + "type": "array", + "items": { + "$ref": "#/definitions/metric" + } + } + }, + "definitions": { + "metric": { + "title": "Metric family", + "description": "A prometheus gauge, summary, or histogram metric", + "type": "object", + "if": { + "properties": { + "metric_type": { + "type": "string", + "const": "gauge" + } + } + }, + "then": { + "properties": { + "data": { + "title": "Gauge data", + "description": "A gauge metric", + "type": "object", + "required": [ + "type", + "value" + ], + "properties": { + "labels": { + "type": [ + "object", + "null" + ], + "additionalProperties": { + "type": "string" + } + }, + "type": { + "type": "string", + "const": "metric" + }, + "value": { + "type": "string" + } + } + } + } + }, + "else": false, + "required": [ + "data", + "help", + "metric_name", + "metric_type" + ], + "properties": { + "help": { + "type": "string" + }, + "metric_name": { + "type": "string" + }, + "metric_type": { + "title": "Metric type", + "type": "string", + "enum": [ + "gauge", + "histogram", + "summary" + ] + } + } + } + } + }, + "required": true, + "deprecated": false + }, + "deprecated": false + }, + { + "name": "node", + "paramStructure": "either", + "params": [], + "result": { + "name": "node_info", + "schema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "node_info", + "description": "Node information.", + "type": "object", + "required": [ + "dynamic", + "static" + ], + "properties": { + "dynamic": { + "description": "Dynamic node information available through events at runtime.", + "allOf": [ + { + "$ref": "#/definitions/dynamic" + } + ] + }, + "static": { + "description": "Static node information available at startup.", + "allOf": [ + { + "$ref": "#/definitions/static" + } + ] + } + }, + "definitions": { + "dynamic": { + "description": "Dynamic node information available through events at runtime.", + "type": "object", + "required": [ + "connections", + "listeners" + ], + "properties": { + "connections": { + "description": "Peers and their addresses that are connected to the node", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "listeners": { + "description": "Listen addresses for the node", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "static": { + "description": "Static node information available at startup.", + "type": "object", + "required": [ + "peer_id" + ], + "properties": { + "peer_id": { + "description": "The peer ID of the node", + "type": "string" + } + } + } + } + }, + "required": true, + "deprecated": false + }, + "deprecated": false + }, + { + "name": "subscribe_network_events", + "paramStructure": "either", + "params": [], + "result": { + "name": "subscription_id", + "schema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "String", + "type": "string" + }, + "required": true, + "deprecated": false + }, + "deprecated": false, + "x-messages": { + "name": "network subscription messages", + "schema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "network", + "description": "Network notification type.", + "oneOf": [ + { + "description": "Listening on new address notification.", + "type": "object", + "required": [ + "new_listen_addr" + ], + "properties": { + "new_listen_addr": { + "$ref": "#/definitions/new_listen_addr" + } + }, + "additionalProperties": false + }, + { + "description": "Connection established notification.", + "type": "object", + "required": [ + "connection_established" + ], + "properties": { + "connection_established": { + "$ref": "#/definitions/connection_established" + } + }, + "additionalProperties": false + }, + { + "description": "Connection closed notification.", + "type": "object", + "required": [ + "connection_closed" + ], + "properties": { + "connection_closed": { + "$ref": "#/definitions/connection_closed" + } + }, + "additionalProperties": false + }, + { + "description": "Outgoing conenction error notification.", + "type": "object", + "required": [ + "outgoing_connection_error" + ], + "properties": { + "outgoing_connection_error": { + "$ref": "#/definitions/outgoing_connection_error" + } + }, + "additionalProperties": false + }, + { + "description": "Incoming conenction error notification.", + "type": "object", + "required": [ + "incoming_connection_error" + ], + "properties": { + "incoming_connection_error": { + "$ref": "#/definitions/incoming_connection_error" + } + }, + "additionalProperties": false + }, + { + "description": "mDNS discovered notification.", + "type": "object", + "required": [ + "discovered_mdns" + ], + "properties": { + "discovered_mdns": { + "$ref": "#/definitions/discovered_mdns" + } + }, + "additionalProperties": false + }, + { + "description": "Rendezvous client discovered notification.", + "type": "object", + "required": [ + "discovered_rendezvous" + ], + "properties": { + "discovered_rendezvous": { + "$ref": "#/definitions/discovered_rendezvous" + } + }, + "additionalProperties": false + }, + { + "description": "Rendezvous client discovered notification.", + "type": "object", + "required": [ + "registered_rendezvous" + ], + "properties": { + "registered_rendezvous": { + "$ref": "#/definitions/registered_rendezvous" + } + }, + "additionalProperties": false + }, + { + "description": "Rendezvous discover served notification.", + "type": "object", + "required": [ + "discover_served_rendezvous" + ], + "properties": { + "discover_served_rendezvous": { + "$ref": "#/definitions/registered_rendezvous" + } + }, + "additionalProperties": false + }, + { + "description": "Rendezvous peer registered notification.", + "type": "object", + "required": [ + "peer_registered_rendezvous" + ], + "properties": { + "peer_registered_rendezvous": { + "$ref": "#/definitions/peer_registered_rendezvous" + } + }, + "additionalProperties": false + }, + { + "description": "Published receipt pubsub notification.", + "type": "object", + "required": [ + "published_receipt_pubsub" + ], + "properties": { + "published_receipt_pubsub": { + "$ref": "#/definitions/published_receipt_pubsub" + } + }, + "additionalProperties": false + }, + { + "description": "Received receipt pubsub notification.", + "type": "object", + "required": [ + "received_receipt_pubsub" + ], + "properties": { + "received_receipt_pubsub": { + "$ref": "#/definitions/received_receipt_pubsub" + } + }, + "additionalProperties": false + }, + { + "description": "Put receipt DHT notification.", + "type": "object", + "required": [ + "put_receipt_dht" + ], + "properties": { + "put_receipt_dht": { + "$ref": "#/definitions/put_receipt_dht" + } + }, + "additionalProperties": false + }, + { + "description": "Got receipt DHT notification.", + "type": "object", + "required": [ + "got_receipt_dht" + ], + "properties": { + "got_receipt_dht": { + "$ref": "#/definitions/got_receipt_dht" + } + }, + "additionalProperties": false + }, + { + "description": "Put workflow info DHT notification.", + "type": "object", + "required": [ + "put_workflow_info_dht" + ], + "properties": { + "put_workflow_info_dht": { + "$ref": "#/definitions/put_workflow_info_dht" + } + }, + "additionalProperties": false + }, + { + "description": "Put workflow info DHT notification.", + "type": "object", + "required": [ + "got_workflow_info_dht" + ], + "properties": { + "got_workflow_info_dht": { + "$ref": "#/definitions/got_workflow_info_dht" + } + }, + "additionalProperties": false + }, + { + "description": "Receipt quorum success notification.", + "type": "object", + "required": [ + "receipt_quorum_success_dht" + ], + "properties": { + "receipt_quorum_success_dht": { + "$ref": "#/definitions/receipt_quorum_success_dht" + } + }, + "additionalProperties": false + }, + { + "description": "Receipt quorum failure notification.", + "type": "object", + "required": [ + "receipt_quorum_failure_dht" + ], + "properties": { + "receipt_quorum_failure_dht": { + "$ref": "#/definitions/receipt_quorum_failure_dht" + } + }, + "additionalProperties": false + }, + { + "description": "Workflow info quorum success notification.", + "type": "object", + "required": [ + "workflow_info_quorum_success_dht" + ], + "properties": { + "workflow_info_quorum_success_dht": { + "$ref": "#/definitions/workflow_info_quorum_success_dht" + } + }, + "additionalProperties": false + }, + { + "description": "Workflow info quorum failure notification.", + "type": "object", + "required": [ + "workflow_info_quorum_failure_dht" + ], + "properties": { + "workflow_info_quorum_failure_dht": { + "$ref": "#/definitions/workflow_info_quorum_failure_dht" + } + }, + "additionalProperties": false + }, + { + "description": "Sent workflow info notification.", + "type": "object", + "required": [ + "sent_workflow_info" + ], + "properties": { + "sent_workflow_info": { + "$ref": "#/definitions/sent_workflow_info" + } + }, + "additionalProperties": false + }, + { + "description": "Received workflow info notification.", + "type": "object", + "required": [ + "received_workflow_info" + ], + "properties": { + "received_workflow_info": { + "$ref": "#/definitions/received_workflow_info" + } + }, + "additionalProperties": false + } + ], + "definitions": { + "connection_closed": { + "type": "object", + "required": [ + "address", + "peer_id", + "timestamp" + ], + "properties": { + "address": { + "type": "string" + }, + "peer_id": { + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "connection_established": { + "type": "object", + "required": [ + "address", + "peer_id", + "timestamp" + ], + "properties": { + "address": { + "type": "string" + }, + "peer_id": { + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "discovered_mdns": { + "type": "object", + "required": [ + "peers", + "timestamp" + ], + "properties": { + "peers": { + "description": "Peers discovered by peer ID and multiaddress", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "discovered_rendezvous": { + "type": "object", + "required": [ + "peers", + "server", + "timestamp" + ], + "properties": { + "peers": { + "description": "Peers discovered by peer ID and multiaddresses", + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "server": { + "description": "Server that fulfilled the discovery request", + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "got_receipt_dht": { + "type": "object", + "required": [ + "cid", + "ran", + "timestamp" + ], + "properties": { + "cid": { + "description": "Receipt CID", + "type": "string" + }, + "publisher": { + "description": "Receipt publisher peer ID", + "type": [ + "string", + "null" + ] + }, + "ran": { + "description": "Ran receipt CID", + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "got_workflow_info_dht": { + "type": "object", + "required": [ + "cid", + "num_tasks", + "progress", + "progress_count", + "timestamp" + ], + "properties": { + "cid": { + "description": "Workflow info CID", + "type": "string" + }, + "name": { + "description": "Optional workflow name", + "type": [ + "string", + "null" + ] + }, + "num_tasks": { + "description": "Number of tasks in workflow", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "progress": { + "description": "Completed task CIDs", + "type": "array", + "items": { + "type": "string" + } + }, + "progress_count": { + "description": "Number of workflow tasks completed", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "publisher": { + "description": "Workflow info publisher peer ID", + "type": [ + "string", + "null" + ] + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "incoming_connection_error": { + "type": "object", + "required": [ + "error", + "timestamp" + ], + "properties": { + "error": { + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "new_listen_addr": { + "type": "object", + "required": [ + "address", + "peer_id", + "timestamp" + ], + "properties": { + "address": { + "type": "string" + }, + "peer_id": { + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "outgoing_connection_error": { + "type": "object", + "required": [ + "error", + "timestamp" + ], + "properties": { + "error": { + "type": "string" + }, + "peer_id": { + "type": [ + "string", + "null" + ] + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "peer_registered_rendezvous": { + "type": "object", + "required": [ + "addresses", + "peer_id", + "timestamp" + ], + "properties": { + "addresses": { + "description": "Multiaddresses for peer", + "type": "array", + "items": { + "type": "string" + } + }, + "peer_id": { + "description": "Peer registered", + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "published_receipt_pubsub": { + "type": "object", + "required": [ + "cid", + "ran", + "timestamp" + ], + "properties": { + "cid": { + "description": "Receipt CID", + "type": "string" + }, + "ran": { + "description": "Ran receipt CID", + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "put_receipt_dht": { + "type": "object", + "required": [ + "cid", + "ran", + "timestamp" + ], + "properties": { + "cid": { + "description": "Receipt CID", + "type": "string" + }, + "ran": { + "description": "Ran receipt CID", + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "put_workflow_info_dht": { + "type": "object", + "required": [ + "cid", + "num_tasks", + "progress", + "progress_count", + "timestamp" + ], + "properties": { + "cid": { + "description": "Workflow info CID", + "type": "string" + }, + "name": { + "description": "Optional workflow name", + "type": [ + "string", + "null" + ] + }, + "num_tasks": { + "description": "Number of tasks in workflow", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "progress": { + "description": "Completed task CIDs", + "type": "array", + "items": { + "type": "string" + } + }, + "progress_count": { + "description": "Number of workflow tasks completed", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "receipt_quorum_failure_dht": { + "type": "object", + "required": [ + "cid", + "connected_peer_count", + "quorum", + "stored_to_peers", + "timestamp" + ], + "properties": { + "cid": { + "description": "Receipt CID", + "type": "string" + }, + "connected_peer_count": { + "description": "Number of connected peers", + "type": "integer", + "format": "uint", + "minimum": 0.0 + }, + "quorum": { + "description": "Number of peers required for quorum", + "type": "integer", + "format": "uint", + "minimum": 0.0 + }, + "stored_to_peers": { + "description": "Peers participating in quorum", + "type": "array", + "items": { + "type": "string" + } + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "receipt_quorum_success_dht": { + "type": "object", + "required": [ + "cid", + "quorum", + "timestamp" + ], + "properties": { + "cid": { + "description": "Receipt CID", + "type": "string" + }, + "quorum": { + "description": "Number of peers participating in quorum", + "type": "integer", + "format": "uint", + "minimum": 0.0 + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "received_receipt_pubsub": { + "type": "object", + "required": [ + "cid", + "publisher", + "ran", + "timestamp" + ], + "properties": { + "cid": { + "description": "Receipt CID", + "type": "string" + }, + "publisher": { + "description": "Receipt publisher peer ID", + "type": "string" + }, + "ran": { + "description": "Ran receipt CID", + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "received_workflow_info": { + "type": "object", + "required": [ + "cid", + "num_tasks", + "progress", + "progress_count", + "timestamp" + ], + "properties": { + "cid": { + "description": "Workflow info CID", + "type": "string" + }, + "name": { + "description": "Optional workflow name", + "type": [ + "string", + "null" + ] + }, + "num_tasks": { + "description": "Number of tasks in workflow", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "progress": { + "description": "Completed task CIDs", + "type": "array", + "items": { + "type": "string" + } + }, + "progress_count": { + "description": "Number of workflow tasks completed", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "provider": { + "description": "Workflow info provider peer ID", + "type": [ + "string", + "null" + ] + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "registered_rendezvous": { + "type": "object", + "required": [ + "server", + "timestamp" + ], + "properties": { + "server": { + "description": "Server that accepted registration", + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "sent_workflow_info": { + "type": "object", + "required": [ + "cid", + "num_tasks", + "progress", + "progress_count", + "requestor", + "timestamp" + ], + "properties": { + "cid": { + "description": "Workflow info CID", + "type": "string" + }, + "name": { + "description": "Optional workflow name", + "type": [ + "string", + "null" + ] + }, + "num_tasks": { + "description": "Number of tasks in workflow", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "progress": { + "description": "Completed task CIDs", + "type": "array", + "items": { + "type": "string" + } + }, + "progress_count": { + "description": "Number of workflow tasks completed", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "requestor": { + "description": "Peer that requested workflow info", + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "workflow_info_quorum_failure_dht": { + "type": "object", + "required": [ + "cid", + "connected_peer_count", + "quorum", + "stored_to_peers", + "timestamp" + ], + "properties": { + "cid": { + "description": "Workflow info CID", + "type": "string" + }, + "connected_peer_count": { + "description": "Number of connected peers", + "type": "integer", + "format": "uint", + "minimum": 0.0 + }, + "quorum": { + "description": "Number of peers required for quorum", + "type": "integer", + "format": "uint", + "minimum": 0.0 + }, + "stored_to_peers": { + "description": "Peers participating in quorum", + "type": "array", + "items": { + "type": "string" + } + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "workflow_info_quorum_success_dht": { + "type": "object", + "required": [ + "cid", + "quorum", + "timestamp" + ], + "properties": { + "cid": { + "description": "Workflow info CID", + "type": "string" + }, + "quorum": { + "description": "Number of peers participating in quorum", + "type": "integer", + "format": "uint", + "minimum": 0.0 + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + } + } + }, + "required": true, + "deprecated": false + } + }, + { + "name": "unsubscribe_network_events", + "paramStructure": "either", + "params": [], + "result": { + "name": "unsubscribe result", + "schema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Boolean", + "type": "boolean" + }, + "required": true, + "deprecated": false + }, + "deprecated": false + }, + { + "name": "subscribe_run_workflow", + "paramStructure": "by-name", + "params": [ + { + "name": "tasks", + "schema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Workflow", + "description": "Workflow composed of tasks", + "type": "object", + "required": [ + "tasks" + ], + "properties": { + "tasks": { + "type": "array", + "items": { + "$ref": "#/definitions/task" + } + } + }, + "definitions": { + "await_result": { + "title": "Await result", + "description": "Branches of a promise that is awaited", + "oneOf": [ + { + "type": "object", + "properties": { + "await/ok": { + "$ref": "#/definitions/pointer" + } + } + }, + { + "type": "object", + "properties": { + "await/error": { + "$ref": "#/definitions/pointer" + } + } + }, + { + "type": "object", + "properties": { + "await/*": { + "$ref": "#/definitions/pointer" + } + } + } + ] + }, + "ipld": { + "title": "Ipld", + "description": "DAG-JSON encoded IPLD: https://github.com/ipld/ipld/blob/master/specs/codecs/dag-json/spec.md", + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "$ref": "#/definitions/ipld_bytes" + }, + { + "type": "array" + }, + { + "type": "object" + }, + { + "$ref": "#/definitions/ipld_link" + } + ] + }, + "ipld_bytes": { + "title": "IPLD bytes", + "description": "Base64 encoded binary", + "type": "object", + "properties": { + "/": { + "type": "object", + "properties": { + "bytes": { + "type": "string" + } + } + } + } + }, + "ipld_link": { + "title": "IPLD link", + "description": "CID link that points to some IPLD data", + "type": "object", + "properties": { + "/": { + "type": "string" + } + } + }, + "pointer": { + "description": "CID reference to an invocation, task, instruction, or receipt", + "type": "object", + "properties": { + "/": { + "type": "string" + } + } + }, + "prf": { + "description": "CIDs referencing UCAN proofs", + "type": [ + "array" + ], + "items": { + "type": "string" + } + }, + "resources": { + "description": "Resource configuration for fuel quota, memory allowance, and timeout", + "type": "object", + "properties": { + "fuel": { + "type": [ + "integer", + "null" + ], + "format": "uint64", + "minimum": 0.0 + }, + "memory": { + "description": "Memory in bytes", + "type": [ + "integer", + "null" + ], + "format": "uint64", + "minimum": 0.0 + }, + "time": { + "description": "Timeout in milliseconds", + "type": [ + "integer", + "null" + ], + "format": "uint64", + "minimum": 0.0 + } + } + }, + "run": { + "title": "Run instruction", + "description": "An instruction that runs a function from a resource, executor that will run the function, inputs to the executor, and optional nonce", + "type": "object", + "if": { + "properties": { + "op": { + "type": "string", + "const": "wasm/run" + } + } + }, + "then": { + "properties": { + "input": { + "type": "object", + "required": [ + "args", + "func" + ], + "properties": { + "args": { + "description": "Arguments to the function. May await a result from another task.", + "type": "array", + "items": [ + { + "$ref": "#/definitions/ipld" + }, + { + "$ref": "#/definitions/await_result" + } + ] + }, + "func": { + "description": "The function to call on the Wasm resource", + "type": "string" + } + } + } + } + }, + "else": false, + "required": [ + "input", + "nnc", + "op", + "rsc" + ], + "properties": { + "nnc": { + "description": "A 12-byte or 16-byte nonce. Use empty string for no nonce.", + "type": "string" + }, + "op": { + "description": "Function executor", + "type": "string", + "enum": [ + "wasm/run" + ] + }, + "rsc": { + "type": "string", + "format": "uri" + } + } + }, + "task": { + "description": "Contains a run instruction, configuration, optional reference to receipt that caused task to run, and authorization", + "type": "object", + "required": [ + "meta", + "prf", + "run" + ], + "properties": { + "cause": { + "title": "Receipt reference", + "anyOf": [ + { + "$ref": "#/definitions/pointer" + }, + { + "type": "null" + } + ] + }, + "meta": { + "title": "Task Configuration", + "allOf": [ + { + "$ref": "#/definitions/resources" + } + ] + }, + "prf": { + "title": "UCAN Authorization", + "allOf": [ + { + "$ref": "#/definitions/prf" + } + ] + }, + "run": { + "title": "Run instruction", + "allOf": [ + { + "$ref": "#/definitions/run" + } + ] + } + } + } + } + }, + "required": true, + "deprecated": false + } + ], + "result": { + "name": "subscription_id", + "schema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "String", + "type": "string" + }, + "required": true, + "deprecated": false + }, + "deprecated": false, + "x-messages": { + "name": "workflow subscription messages", + "summary": "receipt notifications from a running workflow", + "schema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Receipt notification", + "description": "A receipt notification associated with a running workflow", + "type": "object", + "required": [ + "receipt", + "receipt_cid" + ], + "properties": { + "metadata": { + "title": "Metadata", + "description": "Workflow metadata to contextualize the receipt", + "type": "object", + "required": [ + "name", + "receipt", + "receipt_cid" + ], + "properties": { + "name": { + "type": "string" + }, + "replayed": { + "type": "boolean" + }, + "workflow": { + "$ref": "#/definitions/ipld_link" + } + } + }, + "receipt": { + "$ref": "#/definitions/receipt" + }, + "receipt_cid": { + "$ref": "#/definitions/ipld_link" + } + }, + "definitions": { + "ipld": { + "title": "Ipld", + "description": "DAG-JSON encoded IPLD: https://github.com/ipld/ipld/blob/master/specs/codecs/dag-json/spec.md", + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "$ref": "#/definitions/ipld_bytes" + }, + { + "type": "array" + }, + { + "type": "object" + }, + { + "$ref": "#/definitions/ipld_link" + } + ] + }, + "ipld_bytes": { + "title": "IPLD bytes", + "description": "Base64 encoded binary", + "type": "object", + "properties": { + "/": { + "type": "object", + "properties": { + "bytes": { + "type": "string" + } + } + } + } + }, + "ipld_link": { + "title": "IPLD link", + "description": "CID link that points to some IPLD data", + "type": "object", + "properties": { + "/": { + "type": "string" + } + } + }, + "iss": { + "title": "Issuer", + "description": "Principal that issued the receipt", + "type": "string" + }, + "out": { + "title": "Computation result", + "description": "Result tuple with ok/err/just result and associated output", + "type": "object", + "items": [ + { + "type": "object", + "enum": [ + "ok", + "error", + "just" + ] + }, + { + "$ref": "#/definitions/ipld" + } + ], + "maxItems": 2, + "minItems": 2 + }, + "pointer": { + "description": "CID reference to an invocation, task, instruction, or receipt", + "type": "object", + "properties": { + "/": { + "type": "string" + } + } + }, + "prf": { + "description": "CIDs referencing UCAN proofs", + "type": [ + "array" + ], + "items": { + "type": "string" + } + }, + "receipt": { + "title": "Receipt", + "description": "A computed receipt", + "type": "object", + "required": [ + "meta", + "out", + "prf", + "ran" + ], + "properties": { + "iss": { + "anyOf": [ + { + "$ref": "#/definitions/iss" + }, + { + "type": "null" + } + ] + }, + "meta": { + "title": "Receipt metadata", + "description": "Receipt metadata including the operation that produced the receipt", + "type": "object", + "required": [ + "op" + ], + "properties": { + "op": { + "type": "string" + } + } + }, + "out": { + "$ref": "#/definitions/out" + }, + "prf": { + "$ref": "#/definitions/prf" + }, + "ran": { + "$ref": "#/definitions/pointer" + } + } + } + } + }, + "required": true, + "deprecated": false + } + }, + { + "name": "unsubscribe_run_workflow", + "paramStructure": "either", + "params": [], + "result": { + "name": "unsubscribe result", + "schema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Boolean", + "type": "boolean" + }, + "required": true, + "deprecated": false + }, + "deprecated": false + } + ] +} \ No newline at end of file diff --git a/homestar-runtime/schemas/health.json b/homestar-runtime/schemas/health.json new file mode 100644 index 00000000..c4debee7 --- /dev/null +++ b/homestar-runtime/schemas/health.json @@ -0,0 +1,15 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "health", + "description": "Health status of the server and database connection.", + "type": "object", + "required": [ + "healthy" + ], + "properties": { + "healthy": { + "description": "Health status.", + "type": "boolean" + } + } +} \ No newline at end of file diff --git a/homestar-runtime/schemas/metrics.json b/homestar-runtime/schemas/metrics.json new file mode 100644 index 00000000..564c638b --- /dev/null +++ b/homestar-runtime/schemas/metrics.json @@ -0,0 +1,87 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Metrics data", + "description": "Prometheus metrics data", + "type": "object", + "required": [ + "metrics" + ], + "properties": { + "metrics": { + "type": "array", + "items": { + "$ref": "#/definitions/metric" + } + } + }, + "definitions": { + "metric": { + "title": "Metric family", + "description": "A prometheus gauge, summary, or histogram metric", + "type": "object", + "if": { + "properties": { + "metric_type": { + "type": "string", + "const": "gauge" + } + } + }, + "then": { + "properties": { + "data": { + "title": "Gauge data", + "description": "A gauge metric", + "type": "object", + "required": [ + "type", + "value" + ], + "properties": { + "labels": { + "type": [ + "object", + "null" + ], + "additionalProperties": { + "type": "string" + } + }, + "type": { + "type": "string", + "const": "metric" + }, + "value": { + "type": "string" + } + } + } + } + }, + "else": false, + "required": [ + "data", + "help", + "metric_name", + "metric_type" + ], + "properties": { + "help": { + "type": "string" + }, + "metric_name": { + "type": "string" + }, + "metric_type": { + "title": "Metric type", + "type": "string", + "enum": [ + "gauge", + "histogram", + "summary" + ] + } + } + } + } +} \ No newline at end of file diff --git a/homestar-runtime/schemas/network.json b/homestar-runtime/schemas/network.json new file mode 100644 index 00000000..4fdc9d8f --- /dev/null +++ b/homestar-runtime/schemas/network.json @@ -0,0 +1,912 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "network", + "description": "Network notification type.", + "oneOf": [ + { + "description": "Listening on new address notification.", + "type": "object", + "required": [ + "new_listen_addr" + ], + "properties": { + "new_listen_addr": { + "$ref": "#/definitions/new_listen_addr" + } + }, + "additionalProperties": false + }, + { + "description": "Connection established notification.", + "type": "object", + "required": [ + "connection_established" + ], + "properties": { + "connection_established": { + "$ref": "#/definitions/connection_established" + } + }, + "additionalProperties": false + }, + { + "description": "Connection closed notification.", + "type": "object", + "required": [ + "connection_closed" + ], + "properties": { + "connection_closed": { + "$ref": "#/definitions/connection_closed" + } + }, + "additionalProperties": false + }, + { + "description": "Outgoing conenction error notification.", + "type": "object", + "required": [ + "outgoing_connection_error" + ], + "properties": { + "outgoing_connection_error": { + "$ref": "#/definitions/outgoing_connection_error" + } + }, + "additionalProperties": false + }, + { + "description": "Incoming conenction error notification.", + "type": "object", + "required": [ + "incoming_connection_error" + ], + "properties": { + "incoming_connection_error": { + "$ref": "#/definitions/incoming_connection_error" + } + }, + "additionalProperties": false + }, + { + "description": "mDNS discovered notification.", + "type": "object", + "required": [ + "discovered_mdns" + ], + "properties": { + "discovered_mdns": { + "$ref": "#/definitions/discovered_mdns" + } + }, + "additionalProperties": false + }, + { + "description": "Rendezvous client discovered notification.", + "type": "object", + "required": [ + "discovered_rendezvous" + ], + "properties": { + "discovered_rendezvous": { + "$ref": "#/definitions/discovered_rendezvous" + } + }, + "additionalProperties": false + }, + { + "description": "Rendezvous client discovered notification.", + "type": "object", + "required": [ + "registered_rendezvous" + ], + "properties": { + "registered_rendezvous": { + "$ref": "#/definitions/registered_rendezvous" + } + }, + "additionalProperties": false + }, + { + "description": "Rendezvous discover served notification.", + "type": "object", + "required": [ + "discover_served_rendezvous" + ], + "properties": { + "discover_served_rendezvous": { + "$ref": "#/definitions/registered_rendezvous" + } + }, + "additionalProperties": false + }, + { + "description": "Rendezvous peer registered notification.", + "type": "object", + "required": [ + "peer_registered_rendezvous" + ], + "properties": { + "peer_registered_rendezvous": { + "$ref": "#/definitions/peer_registered_rendezvous" + } + }, + "additionalProperties": false + }, + { + "description": "Published receipt pubsub notification.", + "type": "object", + "required": [ + "published_receipt_pubsub" + ], + "properties": { + "published_receipt_pubsub": { + "$ref": "#/definitions/published_receipt_pubsub" + } + }, + "additionalProperties": false + }, + { + "description": "Received receipt pubsub notification.", + "type": "object", + "required": [ + "received_receipt_pubsub" + ], + "properties": { + "received_receipt_pubsub": { + "$ref": "#/definitions/received_receipt_pubsub" + } + }, + "additionalProperties": false + }, + { + "description": "Put receipt DHT notification.", + "type": "object", + "required": [ + "put_receipt_dht" + ], + "properties": { + "put_receipt_dht": { + "$ref": "#/definitions/put_receipt_dht" + } + }, + "additionalProperties": false + }, + { + "description": "Got receipt DHT notification.", + "type": "object", + "required": [ + "got_receipt_dht" + ], + "properties": { + "got_receipt_dht": { + "$ref": "#/definitions/got_receipt_dht" + } + }, + "additionalProperties": false + }, + { + "description": "Put workflow info DHT notification.", + "type": "object", + "required": [ + "put_workflow_info_dht" + ], + "properties": { + "put_workflow_info_dht": { + "$ref": "#/definitions/put_workflow_info_dht" + } + }, + "additionalProperties": false + }, + { + "description": "Put workflow info DHT notification.", + "type": "object", + "required": [ + "got_workflow_info_dht" + ], + "properties": { + "got_workflow_info_dht": { + "$ref": "#/definitions/got_workflow_info_dht" + } + }, + "additionalProperties": false + }, + { + "description": "Receipt quorum success notification.", + "type": "object", + "required": [ + "receipt_quorum_success_dht" + ], + "properties": { + "receipt_quorum_success_dht": { + "$ref": "#/definitions/receipt_quorum_success_dht" + } + }, + "additionalProperties": false + }, + { + "description": "Receipt quorum failure notification.", + "type": "object", + "required": [ + "receipt_quorum_failure_dht" + ], + "properties": { + "receipt_quorum_failure_dht": { + "$ref": "#/definitions/receipt_quorum_failure_dht" + } + }, + "additionalProperties": false + }, + { + "description": "Workflow info quorum success notification.", + "type": "object", + "required": [ + "workflow_info_quorum_success_dht" + ], + "properties": { + "workflow_info_quorum_success_dht": { + "$ref": "#/definitions/workflow_info_quorum_success_dht" + } + }, + "additionalProperties": false + }, + { + "description": "Workflow info quorum failure notification.", + "type": "object", + "required": [ + "workflow_info_quorum_failure_dht" + ], + "properties": { + "workflow_info_quorum_failure_dht": { + "$ref": "#/definitions/workflow_info_quorum_failure_dht" + } + }, + "additionalProperties": false + }, + { + "description": "Sent workflow info notification.", + "type": "object", + "required": [ + "sent_workflow_info" + ], + "properties": { + "sent_workflow_info": { + "$ref": "#/definitions/sent_workflow_info" + } + }, + "additionalProperties": false + }, + { + "description": "Received workflow info notification.", + "type": "object", + "required": [ + "received_workflow_info" + ], + "properties": { + "received_workflow_info": { + "$ref": "#/definitions/received_workflow_info" + } + }, + "additionalProperties": false + } + ], + "definitions": { + "connection_closed": { + "type": "object", + "required": [ + "address", + "peer_id", + "timestamp" + ], + "properties": { + "address": { + "type": "string" + }, + "peer_id": { + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "connection_established": { + "type": "object", + "required": [ + "address", + "peer_id", + "timestamp" + ], + "properties": { + "address": { + "type": "string" + }, + "peer_id": { + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "discovered_mdns": { + "type": "object", + "required": [ + "peers", + "timestamp" + ], + "properties": { + "peers": { + "description": "Peers discovered by peer ID and multiaddress", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "discovered_rendezvous": { + "type": "object", + "required": [ + "peers", + "server", + "timestamp" + ], + "properties": { + "peers": { + "description": "Peers discovered by peer ID and multiaddresses", + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "server": { + "description": "Server that fulfilled the discovery request", + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "got_receipt_dht": { + "type": "object", + "required": [ + "cid", + "ran", + "timestamp" + ], + "properties": { + "cid": { + "description": "Receipt CID", + "type": "string" + }, + "publisher": { + "description": "Receipt publisher peer ID", + "type": [ + "string", + "null" + ] + }, + "ran": { + "description": "Ran receipt CID", + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "got_workflow_info_dht": { + "type": "object", + "required": [ + "cid", + "num_tasks", + "progress", + "progress_count", + "timestamp" + ], + "properties": { + "cid": { + "description": "Workflow info CID", + "type": "string" + }, + "name": { + "description": "Optional workflow name", + "type": [ + "string", + "null" + ] + }, + "num_tasks": { + "description": "Number of tasks in workflow", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "progress": { + "description": "Completed task CIDs", + "type": "array", + "items": { + "type": "string" + } + }, + "progress_count": { + "description": "Number of workflow tasks completed", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "publisher": { + "description": "Workflow info publisher peer ID", + "type": [ + "string", + "null" + ] + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "incoming_connection_error": { + "type": "object", + "required": [ + "error", + "timestamp" + ], + "properties": { + "error": { + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "new_listen_addr": { + "type": "object", + "required": [ + "address", + "peer_id", + "timestamp" + ], + "properties": { + "address": { + "type": "string" + }, + "peer_id": { + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "outgoing_connection_error": { + "type": "object", + "required": [ + "error", + "timestamp" + ], + "properties": { + "error": { + "type": "string" + }, + "peer_id": { + "type": [ + "string", + "null" + ] + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "peer_registered_rendezvous": { + "type": "object", + "required": [ + "addresses", + "peer_id", + "timestamp" + ], + "properties": { + "addresses": { + "description": "Multiaddresses for peer", + "type": "array", + "items": { + "type": "string" + } + }, + "peer_id": { + "description": "Peer registered", + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "published_receipt_pubsub": { + "type": "object", + "required": [ + "cid", + "ran", + "timestamp" + ], + "properties": { + "cid": { + "description": "Receipt CID", + "type": "string" + }, + "ran": { + "description": "Ran receipt CID", + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "put_receipt_dht": { + "type": "object", + "required": [ + "cid", + "ran", + "timestamp" + ], + "properties": { + "cid": { + "description": "Receipt CID", + "type": "string" + }, + "ran": { + "description": "Ran receipt CID", + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "put_workflow_info_dht": { + "type": "object", + "required": [ + "cid", + "num_tasks", + "progress", + "progress_count", + "timestamp" + ], + "properties": { + "cid": { + "description": "Workflow info CID", + "type": "string" + }, + "name": { + "description": "Optional workflow name", + "type": [ + "string", + "null" + ] + }, + "num_tasks": { + "description": "Number of tasks in workflow", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "progress": { + "description": "Completed task CIDs", + "type": "array", + "items": { + "type": "string" + } + }, + "progress_count": { + "description": "Number of workflow tasks completed", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "receipt_quorum_failure_dht": { + "type": "object", + "required": [ + "cid", + "connected_peer_count", + "quorum", + "stored_to_peers", + "timestamp" + ], + "properties": { + "cid": { + "description": "Receipt CID", + "type": "string" + }, + "connected_peer_count": { + "description": "Number of connected peers", + "type": "integer", + "format": "uint", + "minimum": 0.0 + }, + "quorum": { + "description": "Number of peers required for quorum", + "type": "integer", + "format": "uint", + "minimum": 0.0 + }, + "stored_to_peers": { + "description": "Peers participating in quorum", + "type": "array", + "items": { + "type": "string" + } + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "receipt_quorum_success_dht": { + "type": "object", + "required": [ + "cid", + "quorum", + "timestamp" + ], + "properties": { + "cid": { + "description": "Receipt CID", + "type": "string" + }, + "quorum": { + "description": "Number of peers participating in quorum", + "type": "integer", + "format": "uint", + "minimum": 0.0 + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "received_receipt_pubsub": { + "type": "object", + "required": [ + "cid", + "publisher", + "ran", + "timestamp" + ], + "properties": { + "cid": { + "description": "Receipt CID", + "type": "string" + }, + "publisher": { + "description": "Receipt publisher peer ID", + "type": "string" + }, + "ran": { + "description": "Ran receipt CID", + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "received_workflow_info": { + "type": "object", + "required": [ + "cid", + "num_tasks", + "progress", + "progress_count", + "timestamp" + ], + "properties": { + "cid": { + "description": "Workflow info CID", + "type": "string" + }, + "name": { + "description": "Optional workflow name", + "type": [ + "string", + "null" + ] + }, + "num_tasks": { + "description": "Number of tasks in workflow", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "progress": { + "description": "Completed task CIDs", + "type": "array", + "items": { + "type": "string" + } + }, + "progress_count": { + "description": "Number of workflow tasks completed", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "provider": { + "description": "Workflow info provider peer ID", + "type": [ + "string", + "null" + ] + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "registered_rendezvous": { + "type": "object", + "required": [ + "server", + "timestamp" + ], + "properties": { + "server": { + "description": "Server that accepted registration", + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "sent_workflow_info": { + "type": "object", + "required": [ + "cid", + "num_tasks", + "progress", + "progress_count", + "requestor", + "timestamp" + ], + "properties": { + "cid": { + "description": "Workflow info CID", + "type": "string" + }, + "name": { + "description": "Optional workflow name", + "type": [ + "string", + "null" + ] + }, + "num_tasks": { + "description": "Number of tasks in workflow", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "progress": { + "description": "Completed task CIDs", + "type": "array", + "items": { + "type": "string" + } + }, + "progress_count": { + "description": "Number of workflow tasks completed", + "type": "integer", + "format": "uint32", + "minimum": 0.0 + }, + "requestor": { + "description": "Peer that requested workflow info", + "type": "string" + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "workflow_info_quorum_failure_dht": { + "type": "object", + "required": [ + "cid", + "connected_peer_count", + "quorum", + "stored_to_peers", + "timestamp" + ], + "properties": { + "cid": { + "description": "Workflow info CID", + "type": "string" + }, + "connected_peer_count": { + "description": "Number of connected peers", + "type": "integer", + "format": "uint", + "minimum": 0.0 + }, + "quorum": { + "description": "Number of peers required for quorum", + "type": "integer", + "format": "uint", + "minimum": 0.0 + }, + "stored_to_peers": { + "description": "Peers participating in quorum", + "type": "array", + "items": { + "type": "string" + } + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + }, + "workflow_info_quorum_success_dht": { + "type": "object", + "required": [ + "cid", + "quorum", + "timestamp" + ], + "properties": { + "cid": { + "description": "Workflow info CID", + "type": "string" + }, + "quorum": { + "description": "Number of peers participating in quorum", + "type": "integer", + "format": "uint", + "minimum": 0.0 + }, + "timestamp": { + "type": "integer", + "format": "int64" + } + } + } + } +} \ No newline at end of file diff --git a/homestar-runtime/schemas/node_info.json b/homestar-runtime/schemas/node_info.json new file mode 100644 index 00000000..b810c3c0 --- /dev/null +++ b/homestar-runtime/schemas/node_info.json @@ -0,0 +1,67 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "node_info", + "description": "Node information.", + "type": "object", + "required": [ + "dynamic", + "static" + ], + "properties": { + "dynamic": { + "description": "Dynamic node information available through events at runtime.", + "allOf": [ + { + "$ref": "#/definitions/dynamic" + } + ] + }, + "static": { + "description": "Static node information available at startup.", + "allOf": [ + { + "$ref": "#/definitions/static" + } + ] + } + }, + "definitions": { + "dynamic": { + "description": "Dynamic node information available through events at runtime.", + "type": "object", + "required": [ + "connections", + "listeners" + ], + "properties": { + "connections": { + "description": "Peers and their addresses that are connected to the node", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "listeners": { + "description": "Listen addresses for the node", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "static": { + "description": "Static node information available at startup.", + "type": "object", + "required": [ + "peer_id" + ], + "properties": { + "peer_id": { + "description": "The peer ID of the node", + "type": "string" + } + } + } + } +} \ No newline at end of file diff --git a/homestar-runtime/schemas/receipt.json b/homestar-runtime/schemas/receipt.json new file mode 100644 index 00000000..b0af3812 --- /dev/null +++ b/homestar-runtime/schemas/receipt.json @@ -0,0 +1,146 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Receipt", + "description": "A computed receipt", + "type": "object", + "required": [ + "meta", + "out", + "prf", + "ran" + ], + "properties": { + "iss": { + "anyOf": [ + { + "$ref": "#/definitions/iss" + }, + { + "type": "null" + } + ] + }, + "meta": { + "title": "Receipt metadata", + "description": "Receipt metadata including the operation that produced the receipt", + "type": "object", + "required": [ + "op" + ], + "properties": { + "op": { + "type": "string" + } + } + }, + "out": { + "$ref": "#/definitions/out" + }, + "prf": { + "$ref": "#/definitions/prf" + }, + "ran": { + "$ref": "#/definitions/pointer" + } + }, + "definitions": { + "ipld": { + "title": "Ipld", + "description": "DAG-JSON encoded IPLD: https://github.com/ipld/ipld/blob/master/specs/codecs/dag-json/spec.md", + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "$ref": "#/definitions/ipld_bytes" + }, + { + "type": "array" + }, + { + "type": "object" + }, + { + "$ref": "#/definitions/ipld_link" + } + ] + }, + "ipld_bytes": { + "title": "IPLD bytes", + "description": "Base64 encoded binary", + "type": "object", + "properties": { + "/": { + "type": "object", + "properties": { + "bytes": { + "type": "string" + } + } + } + } + }, + "ipld_link": { + "title": "IPLD link", + "description": "CID link that points to some IPLD data", + "type": "object", + "properties": { + "/": { + "type": "string" + } + } + }, + "iss": { + "title": "Issuer", + "description": "Principal that issued the receipt", + "type": "string" + }, + "out": { + "title": "Computation result", + "description": "Result tuple with ok/err/just result and associated output", + "type": "object", + "items": [ + { + "type": "object", + "enum": [ + "ok", + "error", + "just" + ] + }, + { + "$ref": "#/definitions/ipld" + } + ], + "maxItems": 2, + "minItems": 2 + }, + "pointer": { + "description": "CID reference to an invocation, task, instruction, or receipt", + "type": "object", + "properties": { + "/": { + "type": "string" + } + } + }, + "prf": { + "description": "CIDs referencing UCAN proofs", + "type": [ + "array" + ], + "items": { + "type": "string" + } + } + } +} \ No newline at end of file diff --git a/homestar-runtime/schemas/receipt_notification.json b/homestar-runtime/schemas/receipt_notification.json new file mode 100644 index 00000000..bc5fbd9b --- /dev/null +++ b/homestar-runtime/schemas/receipt_notification.json @@ -0,0 +1,184 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Receipt notification", + "description": "A receipt notification associated with a running workflow", + "type": "object", + "required": [ + "receipt", + "receipt_cid" + ], + "properties": { + "metadata": { + "title": "Metadata", + "description": "Workflow metadata to contextualize the receipt", + "type": "object", + "required": [ + "name", + "receipt", + "receipt_cid" + ], + "properties": { + "name": { + "type": "string" + }, + "replayed": { + "type": "boolean" + }, + "workflow": { + "$ref": "#/definitions/ipld_link" + } + } + }, + "receipt": { + "$ref": "#/definitions/receipt" + }, + "receipt_cid": { + "$ref": "#/definitions/ipld_link" + } + }, + "definitions": { + "ipld": { + "title": "Ipld", + "description": "DAG-JSON encoded IPLD: https://github.com/ipld/ipld/blob/master/specs/codecs/dag-json/spec.md", + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "$ref": "#/definitions/ipld_bytes" + }, + { + "type": "array" + }, + { + "type": "object" + }, + { + "$ref": "#/definitions/ipld_link" + } + ] + }, + "ipld_bytes": { + "title": "IPLD bytes", + "description": "Base64 encoded binary", + "type": "object", + "properties": { + "/": { + "type": "object", + "properties": { + "bytes": { + "type": "string" + } + } + } + } + }, + "ipld_link": { + "title": "IPLD link", + "description": "CID link that points to some IPLD data", + "type": "object", + "properties": { + "/": { + "type": "string" + } + } + }, + "iss": { + "title": "Issuer", + "description": "Principal that issued the receipt", + "type": "string" + }, + "out": { + "title": "Computation result", + "description": "Result tuple with ok/err/just result and associated output", + "type": "object", + "items": [ + { + "type": "object", + "enum": [ + "ok", + "error", + "just" + ] + }, + { + "$ref": "#/definitions/ipld" + } + ], + "maxItems": 2, + "minItems": 2 + }, + "pointer": { + "description": "CID reference to an invocation, task, instruction, or receipt", + "type": "object", + "properties": { + "/": { + "type": "string" + } + } + }, + "prf": { + "description": "CIDs referencing UCAN proofs", + "type": [ + "array" + ], + "items": { + "type": "string" + } + }, + "receipt": { + "title": "Receipt", + "description": "A computed receipt", + "type": "object", + "required": [ + "meta", + "out", + "prf", + "ran" + ], + "properties": { + "iss": { + "anyOf": [ + { + "$ref": "#/definitions/iss" + }, + { + "type": "null" + } + ] + }, + "meta": { + "title": "Receipt metadata", + "description": "Receipt metadata including the operation that produced the receipt", + "type": "object", + "required": [ + "op" + ], + "properties": { + "op": { + "type": "string" + } + } + }, + "out": { + "$ref": "#/definitions/out" + }, + "prf": { + "$ref": "#/definitions/prf" + }, + "ran": { + "$ref": "#/definitions/pointer" + } + } + } + } +} \ No newline at end of file diff --git a/homestar-runtime/schemas/workflow.json b/homestar-runtime/schemas/workflow.json new file mode 100644 index 00000000..b55c23ea --- /dev/null +++ b/homestar-runtime/schemas/workflow.json @@ -0,0 +1,266 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Workflow", + "description": "Workflow composed of tasks", + "type": "object", + "required": [ + "tasks" + ], + "properties": { + "tasks": { + "type": "array", + "items": { + "$ref": "#/definitions/task" + } + } + }, + "definitions": { + "await_result": { + "title": "Await result", + "description": "Branches of a promise that is awaited", + "oneOf": [ + { + "type": "object", + "properties": { + "await/ok": { + "$ref": "#/definitions/pointer" + } + } + }, + { + "type": "object", + "properties": { + "await/error": { + "$ref": "#/definitions/pointer" + } + } + }, + { + "type": "object", + "properties": { + "await/*": { + "$ref": "#/definitions/pointer" + } + } + } + ] + }, + "ipld": { + "title": "Ipld", + "description": "DAG-JSON encoded IPLD: https://github.com/ipld/ipld/blob/master/specs/codecs/dag-json/spec.md", + "oneOf": [ + { + "type": "null" + }, + { + "type": "boolean" + }, + { + "type": "number" + }, + { + "type": "string" + }, + { + "$ref": "#/definitions/ipld_bytes" + }, + { + "type": "array" + }, + { + "type": "object" + }, + { + "$ref": "#/definitions/ipld_link" + } + ] + }, + "ipld_bytes": { + "title": "IPLD bytes", + "description": "Base64 encoded binary", + "type": "object", + "properties": { + "/": { + "type": "object", + "properties": { + "bytes": { + "type": "string" + } + } + } + } + }, + "ipld_link": { + "title": "IPLD link", + "description": "CID link that points to some IPLD data", + "type": "object", + "properties": { + "/": { + "type": "string" + } + } + }, + "pointer": { + "description": "CID reference to an invocation, task, instruction, or receipt", + "type": "object", + "properties": { + "/": { + "type": "string" + } + } + }, + "prf": { + "description": "CIDs referencing UCAN proofs", + "type": [ + "array" + ], + "items": { + "type": "string" + } + }, + "resources": { + "description": "Resource configuration for fuel quota, memory allowance, and timeout", + "type": "object", + "properties": { + "fuel": { + "type": [ + "integer", + "null" + ], + "format": "uint64", + "minimum": 0.0 + }, + "memory": { + "description": "Memory in bytes", + "type": [ + "integer", + "null" + ], + "format": "uint64", + "minimum": 0.0 + }, + "time": { + "description": "Timeout in milliseconds", + "type": [ + "integer", + "null" + ], + "format": "uint64", + "minimum": 0.0 + } + } + }, + "run": { + "title": "Run instruction", + "description": "An instruction that runs a function from a resource, executor that will run the function, inputs to the executor, and optional nonce", + "type": "object", + "if": { + "properties": { + "op": { + "type": "string", + "const": "wasm/run" + } + } + }, + "then": { + "properties": { + "input": { + "type": "object", + "required": [ + "args", + "func" + ], + "properties": { + "args": { + "description": "Arguments to the function. May await a result from another task.", + "type": "array", + "items": [ + { + "$ref": "#/definitions/ipld" + }, + { + "$ref": "#/definitions/await_result" + } + ] + }, + "func": { + "description": "The function to call on the Wasm resource", + "type": "string" + } + } + } + } + }, + "else": false, + "required": [ + "input", + "nnc", + "op", + "rsc" + ], + "properties": { + "nnc": { + "description": "A 12-byte or 16-byte nonce. Use empty string for no nonce.", + "type": "string" + }, + "op": { + "description": "Function executor", + "type": "string", + "enum": [ + "wasm/run" + ] + }, + "rsc": { + "type": "string", + "format": "uri" + } + } + }, + "task": { + "description": "Contains a run instruction, configuration, optional reference to receipt that caused task to run, and authorization", + "type": "object", + "required": [ + "meta", + "prf", + "run" + ], + "properties": { + "cause": { + "title": "Receipt reference", + "anyOf": [ + { + "$ref": "#/definitions/pointer" + }, + { + "type": "null" + } + ] + }, + "meta": { + "title": "Task Configuration", + "allOf": [ + { + "$ref": "#/definitions/resources" + } + ] + }, + "prf": { + "title": "UCAN Authorization", + "allOf": [ + { + "$ref": "#/definitions/prf" + } + ] + }, + "run": { + "title": "Run instruction", + "allOf": [ + { + "$ref": "#/definitions/run" + } + ] + } + } + } + } +} \ No newline at end of file diff --git a/homestar-runtime/src/db.rs b/homestar-runtime/src/db.rs index 428d03d2..4a8acf4e 100644 --- a/homestar-runtime/src/db.rs +++ b/homestar-runtime/src/db.rs @@ -1,6 +1,7 @@ //! (Default) sqlite database integration and setup. use crate::{ + db::utils::Health, settings, workflow::{self, StoredReceipt}, Receipt, @@ -123,9 +124,9 @@ pub trait Database: Send + Sync + Clone { } /// Check if the database is up. - fn health_check(conn: &mut Connection) -> Result<(), diesel::result::Error> { + fn health_check(conn: &mut Connection) -> Result { diesel::sql_query("SELECT 1").execute(conn)?; - Ok(()) + Ok(Health { healthy: true }) } /// Commit a receipt to the database, updating two tables diff --git a/homestar-runtime/src/db/utils.rs b/homestar-runtime/src/db/utils.rs index 091de85c..656744a3 100644 --- a/homestar-runtime/src/db/utils.rs +++ b/homestar-runtime/src/db/utils.rs @@ -1,6 +1,8 @@ //! Utility functions Database interaction. use chrono::NaiveDateTime; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; /// Trait for converting nanoseconds to a timestamp. #[allow(dead_code)] @@ -15,3 +17,11 @@ impl Timestamp for i64 { NaiveDateTime::from_timestamp_opt(seconds, nanos as u32) } } + +/// Health status of the server and database connection. +#[derive(Debug, Serialize, Deserialize, JsonSchema)] +#[schemars(rename = "health")] +pub struct Health { + /// Health status. + pub healthy: bool, +} diff --git a/homestar-runtime/src/event_handler/event.rs b/homestar-runtime/src/event_handler/event.rs index c54d6c92..1094c175 100644 --- a/homestar-runtime/src/event_handler/event.rs +++ b/homestar-runtime/src/event_handler/event.rs @@ -5,7 +5,7 @@ use super::swarm_event::FoundEvent; use super::EventHandler; #[cfg(feature = "websocket-notify")] use crate::event_handler::{ - notification::{self, emit_receipt, EventNotificationTyp, SwarmNotification}, + notification::{self, emit_receipt, NetworkNotification}, swarm_event::{ReceiptEvent, WorkflowInfoEvent}, }; #[cfg(feature = "ipfs")] @@ -31,8 +31,6 @@ use libp2p::{ rendezvous::Namespace, PeerId, }; -#[cfg(feature = "websocket-notify")] -use maplit::btreemap; use std::{ collections::{HashMap, HashSet}, num::NonZeroUsize, @@ -176,39 +174,49 @@ impl Event { #[cfg(feature = "websocket-notify")] #[cfg_attr(docsrs, doc(cfg(feature = "websocket-notify")))] Event::StoredRecord(event) => match event { - FoundEvent::Receipt(ReceiptEvent { + FoundEvent::Receipt(ReceiptEvent { peer_id, receipt }) => { + notification::emit_network_event( + event_handler.ws_evt_sender(), + NetworkNotification::GotReceiptDht(notification::GotReceiptDht::new( + peer_id, + receipt.cid(), + receipt.ran(), + )), + ) + } + FoundEvent::Workflow(WorkflowInfoEvent { peer_id, - receipt, - notification_type, - }) => notification::emit_event( + workflow_info, + workflow_source, + }) => notification::emit_network_event( event_handler.ws_evt_sender(), - notification_type, - btreemap! { - "publisher" => peer_id.map_or(Ipld::Null, |peer_id| Ipld::String(peer_id.to_string())), - "cid" => Ipld::String(receipt.cid().to_string()), - "ran" => Ipld::String(receipt.ran().to_string()) + match workflow_source { + notification::WorkflowInfoSource::Dht => { + NetworkNotification::GotWorkflowInfoDht( + notification::GotWorkflowInfoDht::new( + peer_id, + workflow_info.cid(), + workflow_info.name, + workflow_info.num_tasks, + workflow_info.progress, + workflow_info.progress_count, + ), + ) + } + notification::WorkflowInfoSource::RequestResponse => { + NetworkNotification::ReceivedWorkflowInfo( + notification::ReceivedWorkflowInfo::new( + peer_id, + workflow_info.cid(), + workflow_info.name, + workflow_info.num_tasks, + workflow_info.progress, + workflow_info.progress_count, + ), + ) + } }, ), - FoundEvent::Workflow(WorkflowInfoEvent { - peer_id, - workflow_info, - notification_type, - }) => { - if let Some(peer_label) = notification_type.workflow_info_source_label() { - notification::emit_event( - event_handler.ws_evt_sender(), - notification_type, - btreemap! { - peer_label => peer_id.map_or(Ipld::Null, |peer_id| Ipld::String(peer_id.to_string())), - "cid" => Ipld::String(workflow_info.cid().to_string()), - "name" => workflow_info.name.map_or(Ipld::Null, |name| Ipld::String(name.to_string())), - "numTasks" => Ipld::Integer(workflow_info.num_tasks as i128), - "progress" => Ipld::List(workflow_info.progress.iter().map(|cid| Ipld::String(cid.to_string())).collect()), - "progressCount" => Ipld::Integer(workflow_info.progress_count as i128), - }, - ) - } - } }, Event::OutboundRequest(PeerRequest { peer, @@ -401,16 +409,12 @@ impl Captured { ); #[cfg(feature = "websocket-notify")] - notification::emit_event( + notification::emit_network_event( event_handler.ws_evt_sender(), - EventNotificationTyp::SwarmNotification( - SwarmNotification::PublishedReceiptPubsub, + NetworkNotification::PublishedReceiptPubsub( + notification::PublishedReceiptPubsub::new(receipt.cid(), receipt.ran()), ), - btreemap! { - "cid" => Ipld::String(receipt.cid().to_string()), - "ran" => Ipld::String(receipt.ran().to_string()) - }, - ); + ) } Err(err) => { warn!( @@ -467,16 +471,13 @@ impl Captured { ); #[cfg(feature = "websocket-notify")] - notification::emit_event( + notification::emit_network_event( event_handler.ws_evt_sender(), - EventNotificationTyp::SwarmNotification( - SwarmNotification::PutReceiptDht, - ), - btreemap! { - "cid" => Ipld::String(receipt.cid().to_string()), - "ran" => Ipld::String(receipt.ran().to_string()) - }, - ); + NetworkNotification::PutReceiptDht(notification::PutReceiptDht::new( + receipt.cid(), + receipt.ran(), + )), + ) }, ); @@ -512,18 +513,17 @@ impl Captured { ); #[cfg(feature = "websocket-notify")] - notification::emit_event( + notification::emit_network_event( event_handler.ws_evt_sender(), - EventNotificationTyp::SwarmNotification( - SwarmNotification::PutWorkflowInfoDht, + NetworkNotification::PutWorkflowInfoDht( + notification::PutWorkflowInfoDht::new( + self.workflow.cid(), + self.workflow.name.to_owned(), + self.workflow.num_tasks, + self.workflow.progress.to_owned(), + self.workflow.progress_count, + ), ), - btreemap! { - "cid" => Ipld::String(self.workflow.cid().to_string()), - "name" => self.workflow.name.as_ref().map_or(Ipld::Null, |name| Ipld::String(name.to_string())), - "numTasks" => Ipld::Integer(self.workflow.num_tasks as i128), - "progress" => Ipld::List(self.workflow.progress.iter().map(|cid| Ipld::String(cid.to_string())).collect()), - "progressCount" => Ipld::Integer(self.workflow.progress_count as i128), - }, ) }, ); @@ -610,16 +610,15 @@ impl Replay { ); #[cfg(feature = "websocket-notify")] - notification::emit_event( + notification::emit_network_event( event_handler.ws_evt_sender(), - EventNotificationTyp::SwarmNotification( - SwarmNotification::PublishedReceiptPubsub, + NetworkNotification::PublishedReceiptPubsub( + notification::PublishedReceiptPubsub::new( + receipt.cid(), + receipt.ran(), + ), ), - btreemap! { - "cid" => Ipld::String(receipt.cid().to_string()), - "ran" => Ipld::String(receipt.ran().to_string()) - }, - ); + ) }) .map_err(|err| { warn!( @@ -709,16 +708,28 @@ where { #[cfg(not(feature = "ipfs"))] async fn handle_event(self, event_handler: &mut EventHandler) { - if let Err(err) = self.handle_info(event_handler).await { - error!(subject = "handle.err", - category = "handle_event", - error=?err, - "error storing event") + match self { + #[cfg(feature = "websocket-notify")] + Event::ReplayReceipts(replay) => { + if let Err(err) = replay.notify(event_handler) { + error!(subject = "replay.err", + category = "handle_event", + error=?err, + "error replaying and notifying receipts") + } + } + event => { + if let Err(err) = event.handle_info(event_handler).await { + error!(subject = "event.err", + category = "handle_event", + error=?err, + "error storing event") + } + } } } #[cfg(feature = "ipfs")] - #[cfg_attr(docsrs, doc(cfg(feature = "ipfs")))] #[allow(unused_variables)] async fn handle_event(self, event_handler: &mut EventHandler, ipfs: IpfsCli) { match self { diff --git a/homestar-runtime/src/event_handler/notification.rs b/homestar-runtime/src/event_handler/notification.rs index 209ca367..c03df12e 100644 --- a/homestar-runtime/src/event_handler/notification.rs +++ b/homestar-runtime/src/event_handler/notification.rs @@ -8,22 +8,22 @@ use crate::{ receipt::metadata::{WORKFLOW_KEY, WORKFLOW_NAME_KEY}, Receipt, }; -use anyhow::anyhow; -use chrono::prelude::Utc; use homestar_invocation::{ipld::DagJson, Receipt as InvocationReceipt}; -use libipld::{serde::from_ipld, Ipld}; -use serde::{Deserialize, Serialize}; -use std::{collections::BTreeMap, fmt, str::FromStr}; +use libipld::Ipld; use tracing::{debug, warn}; +pub(crate) mod network; pub(crate) mod receipt; -pub(crate) mod swarm; +pub(crate) use network::{ + ConnectionClosed, ConnectionEstablished, DiscoverServedRendezvous, DiscoveredMdns, + DiscoveredRendezvous, GotReceiptDht, GotWorkflowInfoDht, IncomingConnectionError, + NetworkNotification, NewListenAddr, OutgoingConnectionError, PeerRegisteredRendezvous, + PublishedReceiptPubsub, PutReceiptDht, PutWorkflowInfoDht, ReceiptQuorumFailureDht, + ReceiptQuorumSuccessDht, ReceivedReceiptPubsub, ReceivedWorkflowInfo, RegisteredRendezvous, + SentWorkflowInfo, WorkflowInfoQuorumFailureDht, WorkflowInfoQuorumSuccessDht, + WorkflowInfoSource, +}; pub(crate) use receipt::ReceiptNotification; -pub(crate) use swarm::SwarmNotification; - -const TYPE_KEY: &str = "type"; -const DATA_KEY: &str = "data"; -const TIMESTAMP_KEY: &str = "timestamp"; /// Send receipt notification as bytes. pub(crate) fn emit_receipt( @@ -66,210 +66,32 @@ pub(crate) fn emit_receipt( } } -/// Send event notification as bytes. -pub(crate) fn emit_event( +/// Send network event notification as bytes. +pub(crate) fn emit_network_event( notifier: Notifier, - ty: EventNotificationTyp, - data: BTreeMap<&str, Ipld>, + notification: NetworkNotification, ) { let header = Header::new( SubscriptionTyp::EventSub(SUBSCRIBE_NETWORK_EVENTS_ENDPOINT.to_string()), None, ); - let notification = EventNotification::new(ty, data); if let Ok(json) = notification.to_json() { - let _ = notifier.notify(Message::new(header, json)); + if let Err(err) = notifier.notify(Message::new(header, json)) { + debug!( + subject = "notification.err", + category = "notification", + err=?err, + "unable to send notification {:?}", + notification, + ) + }; } else { - warn!( + debug!( subject = "notification.err", category = "notification", - "unable to serialize event notification as bytes: {}", - notification.typ - ); - } -} - -/// Notification sent to clients. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub(crate) struct EventNotification { - typ: EventNotificationTyp, - data: Ipld, - timestamp: i64, -} - -impl EventNotification { - pub(crate) fn new(typ: EventNotificationTyp, data: BTreeMap<&str, Ipld>) -> Self { - let data = data - .iter() - .map(|(key, val)| (key.to_string(), val.to_owned())) - .collect(); - - Self { - typ, - data: Ipld::Map(data), - timestamp: Utc::now().timestamp_millis(), - } - } -} - -impl DagJson for EventNotification where Ipld: From {} - -impl From for Ipld { - fn from(notification: EventNotification) -> Self { - Ipld::Map(BTreeMap::from([ - ("type".into(), notification.typ.into()), - ("data".into(), notification.data), - ("timestamp".into(), notification.timestamp.into()), - ])) - } -} - -impl TryFrom for EventNotification { - type Error = anyhow::Error; - - fn try_from(ipld: Ipld) -> Result { - let map = from_ipld::>(ipld)?; - - let typ: EventNotificationTyp = map - .get(TYPE_KEY) - .ok_or_else(|| anyhow!("missing {TYPE_KEY}"))? - .to_owned() - .try_into()?; - - let data = map - .get(DATA_KEY) - .ok_or_else(|| anyhow!("missing {DATA_KEY}"))? - .to_owned(); - - let timestamp = from_ipld( - map.get(TIMESTAMP_KEY) - .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? - .to_owned(), - )?; - - Ok(EventNotification { - typ, - data, - timestamp, - }) - } -} - -/// Types of notification sent to clients. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub(crate) enum EventNotificationTyp { - SwarmNotification(SwarmNotification), -} - -impl EventNotificationTyp { - pub(crate) fn workflow_info_source_label<'a>(&self) -> Option<&'a str> { - match &self { - EventNotificationTyp::SwarmNotification(SwarmNotification::ReceivedWorkflowInfo) => { - Some("provider") - } - EventNotificationTyp::SwarmNotification(SwarmNotification::GotWorkflowInfoDht) => { - Some("publisher") - } - _ => None, - } - } -} - -impl fmt::Display for EventNotificationTyp { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - EventNotificationTyp::SwarmNotification(subtype) => { - write!(f, "swarm notification: {}", subtype) - } - } - } -} - -impl DagJson for EventNotificationTyp where Ipld: From {} - -impl From for Ipld { - fn from(typ: EventNotificationTyp) -> Self { - match typ { - EventNotificationTyp::SwarmNotification(subtype) => { - Ipld::String(format!("network:{}", subtype)) - } - } - } -} - -impl TryFrom for EventNotificationTyp { - type Error = anyhow::Error; - - fn try_from(ipld: Ipld) -> Result { - if let Some((ty, subtype)) = from_ipld::(ipld)?.split_once(':') { - match ty { - "network" => Ok(EventNotificationTyp::SwarmNotification( - SwarmNotification::from_str(subtype)?, - )), - _ => Err(anyhow!("Missing event notification type: {}", ty)), - } - } else { - Err(anyhow!( - "Event notification type missing colon delimiter between type and subtype." - )) - } - } -} - -#[cfg(test)] -mod test { - use super::*; - use libp2p::PeerId; - use maplit::btreemap; - - #[test] - fn notification_bytes_rountrip() { - let peer_id = PeerId::random().to_string(); - let address: String = "/ip4/127.0.0.1/tcp/7000".to_string(); - - let notification = EventNotification::new( - EventNotificationTyp::SwarmNotification(SwarmNotification::ConnnectionEstablished), - btreemap! { - "peerId" => Ipld::String(peer_id.clone()), - "address" => Ipld::String(address.clone()) - }, - ); - let bytes = notification.to_json().unwrap(); - - let parsed = EventNotification::from_json(bytes.as_ref()).unwrap(); - let data: BTreeMap = from_ipld(parsed.data).unwrap(); - - assert_eq!( - parsed.typ, - EventNotificationTyp::SwarmNotification(SwarmNotification::ConnnectionEstablished) - ); - assert_eq!(data.get("peerId").unwrap(), &peer_id); - assert_eq!(data.get("address").unwrap(), &address); - } - - #[test] - fn notification_json_string_rountrip() { - let peer_id = PeerId::random().to_string(); - let address: String = "/ip4/127.0.0.1/tcp/7000".to_string(); - - let notification = EventNotification::new( - EventNotificationTyp::SwarmNotification(SwarmNotification::ConnnectionEstablished), - btreemap! { - "peerId" => Ipld::String(peer_id.clone()), - "address" => Ipld::String(address.clone()), - }, - ); - let json_string = notification.to_json_string().unwrap(); - - let parsed = EventNotification::from_json_string(json_string).unwrap(); - let data: BTreeMap = from_ipld(parsed.data).unwrap(); - - assert_eq!( - parsed.typ, - EventNotificationTyp::SwarmNotification(SwarmNotification::ConnnectionEstablished) + "unable to serialize event notification as bytes: {:?}", + notification ); - assert_eq!(data.get("peerId").unwrap(), &peer_id); - assert_eq!(data.get("address").unwrap(), &address); } } diff --git a/homestar-runtime/src/event_handler/notification/network.rs b/homestar-runtime/src/event_handler/notification/network.rs new file mode 100644 index 00000000..7281ed09 --- /dev/null +++ b/homestar-runtime/src/event_handler/notification/network.rs @@ -0,0 +1,844 @@ +//! Notification types for [swarm] events. +//! +//! [swarm]: libp2p::swarm::Swarm + +use anyhow::anyhow; + +use homestar_invocation::ipld::DagJson; +use libipld::{serde::from_ipld, Ipld}; +use schemars::JsonSchema; +use std::{collections::BTreeMap, fmt}; + +pub(crate) mod connection; +pub(crate) mod dht; +pub(crate) mod mdns; +pub(crate) mod pubsub; +pub(crate) mod rendezvous; +pub(crate) mod req_resp; +pub(crate) use connection::{ + ConnectionClosed, ConnectionEstablished, IncomingConnectionError, NewListenAddr, + OutgoingConnectionError, +}; +pub(crate) use dht::{ + GotReceiptDht, GotWorkflowInfoDht, PutReceiptDht, PutWorkflowInfoDht, ReceiptQuorumFailureDht, + ReceiptQuorumSuccessDht, WorkflowInfoQuorumFailureDht, WorkflowInfoQuorumSuccessDht, +}; +pub(crate) use mdns::DiscoveredMdns; +pub(crate) use pubsub::{PublishedReceiptPubsub, ReceivedReceiptPubsub}; +pub(crate) use rendezvous::{ + DiscoverServedRendezvous, DiscoveredRendezvous, PeerRegisteredRendezvous, RegisteredRendezvous, +}; +pub(crate) use req_resp::{ReceivedWorkflowInfo, SentWorkflowInfo}; + +/// Network notification type. +#[derive(Debug, Clone, JsonSchema)] +#[schemars(rename = "network")] +pub enum NetworkNotification { + /// Listening on new address notification. + #[schemars(rename = "new_listen_addr")] + NewListenAddr(NewListenAddr), + /// Connection established notification. + #[schemars(rename = "connection_established")] + ConnnectionEstablished(ConnectionEstablished), + /// Connection closed notification. + #[schemars(rename = "connection_closed")] + ConnnectionClosed(ConnectionClosed), + /// Outgoing conenction error notification. + #[schemars(rename = "outgoing_connection_error")] + OutgoingConnectionError(OutgoingConnectionError), + /// Incoming conenction error notification. + #[schemars(rename = "incoming_connection_error")] + IncomingConnectionError(IncomingConnectionError), + /// mDNS discovered notification. + #[schemars(rename = "discovered_mdns")] + DiscoveredMdns(DiscoveredMdns), + /// Rendezvous client discovered notification. + #[schemars(rename = "discovered_rendezvous")] + DiscoveredRendezvous(DiscoveredRendezvous), + /// Rendezvous client discovered notification. + #[schemars(rename = "registered_rendezvous")] + RegisteredRendezvous(RegisteredRendezvous), + /// Rendezvous discover served notification. + #[schemars(rename = "discover_served_rendezvous")] + DiscoverServedRendezvous(DiscoverServedRendezvous), + /// Rendezvous peer registered notification. + #[schemars(rename = "peer_registered_rendezvous")] + PeerRegisteredRendezvous(PeerRegisteredRendezvous), + /// Published receipt pubsub notification. + #[schemars(rename = "published_receipt_pubsub")] + PublishedReceiptPubsub(PublishedReceiptPubsub), + /// Received receipt pubsub notification. + #[schemars(rename = "received_receipt_pubsub")] + ReceivedReceiptPubsub(ReceivedReceiptPubsub), + /// Put receipt DHT notification. + #[schemars(rename = "put_receipt_dht")] + PutReceiptDht(PutReceiptDht), + /// Got receipt DHT notification. + #[schemars(rename = "got_receipt_dht")] + GotReceiptDht(GotReceiptDht), + /// Put workflow info DHT notification. + #[schemars(rename = "put_workflow_info_dht")] + PutWorkflowInfoDht(PutWorkflowInfoDht), + /// Put workflow info DHT notification. + #[schemars(rename = "got_workflow_info_dht")] + GotWorkflowInfoDht(GotWorkflowInfoDht), + /// Receipt quorum success notification. + #[schemars(rename = "receipt_quorum_success_dht")] + ReceiptQuorumSuccessDht(ReceiptQuorumSuccessDht), + /// Receipt quorum failure notification. + #[schemars(rename = "receipt_quorum_failure_dht")] + ReceiptQuorumFailureDht(ReceiptQuorumFailureDht), + /// Workflow info quorum success notification. + #[schemars(rename = "workflow_info_quorum_success_dht")] + WorkflowInfoQuorumSuccessDht(WorkflowInfoQuorumSuccessDht), + /// Workflow info quorum failure notification. + #[schemars(rename = "workflow_info_quorum_failure_dht")] + WorkflowInfoQuorumFailureDht(WorkflowInfoQuorumFailureDht), + /// Sent workflow info notification. + #[schemars(rename = "sent_workflow_info")] + SentWorkflowInfo(SentWorkflowInfo), + /// Received workflow info notification. + #[schemars(rename = "received_workflow_info")] + ReceivedWorkflowInfo(ReceivedWorkflowInfo), +} + +#[derive(Debug, Clone, PartialEq)] +pub(crate) enum WorkflowInfoSource { + Dht, + RequestResponse, +} + +impl fmt::Display for NetworkNotification { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + NetworkNotification::NewListenAddr(_) => write!(f, "new_listen_addr"), + NetworkNotification::ConnnectionEstablished(_) => write!(f, "connection_established"), + NetworkNotification::ConnnectionClosed(_) => write!(f, "connection_closed"), + NetworkNotification::OutgoingConnectionError(_) => { + write!(f, "outgoing_connection_error") + } + NetworkNotification::IncomingConnectionError(_) => { + write!(f, "incoming_connection_error") + } + NetworkNotification::DiscoveredMdns(_) => write!(f, "discovered_mdns"), + NetworkNotification::DiscoveredRendezvous(_) => write!(f, "discovered_rendezvous"), + NetworkNotification::RegisteredRendezvous(_) => write!(f, "registered_rendezvous"), + NetworkNotification::DiscoverServedRendezvous(_) => { + write!(f, "discover_served_rendezvous") + } + NetworkNotification::PeerRegisteredRendezvous(_) => { + write!(f, "peer_registered_rendezvous") + } + NetworkNotification::PublishedReceiptPubsub(_) => write!(f, "published_receipt_pubsub"), + NetworkNotification::ReceivedReceiptPubsub(_) => write!(f, "received_receipt_pubsub"), + NetworkNotification::PutReceiptDht(_) => write!(f, "put_receipt_dht"), + NetworkNotification::GotReceiptDht(_) => write!(f, "got_receipt_dht"), + NetworkNotification::PutWorkflowInfoDht(_) => write!(f, "put_workflow_info_dht"), + NetworkNotification::GotWorkflowInfoDht(_) => write!(f, "got_workflow_info_dht"), + NetworkNotification::ReceiptQuorumSuccessDht(_) => { + write!(f, "receipt_quorum_success_dht") + } + NetworkNotification::ReceiptQuorumFailureDht(_) => { + write!(f, "receipt_quorum_failure_dht") + } + NetworkNotification::WorkflowInfoQuorumSuccessDht(_) => { + write!(f, "workflow_info_quorum_success_dht") + } + NetworkNotification::WorkflowInfoQuorumFailureDht(_) => { + write!(f, "workflow_info_quorum_failure_dht") + } + NetworkNotification::SentWorkflowInfo(_) => { + write!(f, "sent_workflow_info") + } + NetworkNotification::ReceivedWorkflowInfo(_) => { + write!(f, "received_workflow_info") + } + } + } +} + +impl DagJson for NetworkNotification {} + +impl From for Ipld { + fn from(notification: NetworkNotification) -> Self { + match notification { + NetworkNotification::NewListenAddr(n) => { + Ipld::Map(BTreeMap::from([("new_listen_addr".into(), n.into())])) + } + NetworkNotification::ConnnectionEstablished(n) => Ipld::Map(BTreeMap::from([( + "connection_established".into(), + n.into(), + )])), + NetworkNotification::ConnnectionClosed(n) => { + Ipld::Map(BTreeMap::from([("connection_closed".into(), n.into())])) + } + NetworkNotification::OutgoingConnectionError(n) => Ipld::Map(BTreeMap::from([( + "outgoing_connection_error".into(), + n.into(), + )])), + NetworkNotification::IncomingConnectionError(n) => Ipld::Map(BTreeMap::from([( + "incoming_connection_error".into(), + n.into(), + )])), + NetworkNotification::DiscoveredMdns(n) => { + Ipld::Map(BTreeMap::from([("discovered_mdns".into(), n.into())])) + } + NetworkNotification::DiscoveredRendezvous(n) => { + Ipld::Map(BTreeMap::from([("discovered_rendezvous".into(), n.into())])) + } + NetworkNotification::RegisteredRendezvous(n) => { + Ipld::Map(BTreeMap::from([("registered_rendezvous".into(), n.into())])) + } + NetworkNotification::DiscoverServedRendezvous(n) => Ipld::Map(BTreeMap::from([( + "discover_served_rendezvous".into(), + n.into(), + )])), + NetworkNotification::PeerRegisteredRendezvous(n) => Ipld::Map(BTreeMap::from([( + "peer_registered_rendezvous".into(), + n.into(), + )])), + NetworkNotification::PublishedReceiptPubsub(n) => Ipld::Map(BTreeMap::from([( + "published_receipt_pubsub".into(), + n.into(), + )])), + NetworkNotification::ReceivedReceiptPubsub(n) => Ipld::Map(BTreeMap::from([( + "received_receipt_pubsub".into(), + n.into(), + )])), + NetworkNotification::PutReceiptDht(n) => { + Ipld::Map(BTreeMap::from([("put_receipt_dht".into(), n.into())])) + } + NetworkNotification::GotReceiptDht(n) => { + Ipld::Map(BTreeMap::from([("got_receipt_dht".into(), n.into())])) + } + NetworkNotification::PutWorkflowInfoDht(n) => { + Ipld::Map(BTreeMap::from([("put_workflow_info_dht".into(), n.into())])) + } + NetworkNotification::GotWorkflowInfoDht(n) => { + Ipld::Map(BTreeMap::from([("got_workflow_info_dht".into(), n.into())])) + } + NetworkNotification::ReceiptQuorumSuccessDht(n) => Ipld::Map(BTreeMap::from([( + "receipt_quorum_success_dht".into(), + n.into(), + )])), + NetworkNotification::ReceiptQuorumFailureDht(n) => Ipld::Map(BTreeMap::from([( + "receipt_quorum_failure_dht".into(), + n.into(), + )])), + NetworkNotification::WorkflowInfoQuorumSuccessDht(n) => Ipld::Map(BTreeMap::from([( + "workflow_info_quorum_success_dht".into(), + n.into(), + )])), + NetworkNotification::WorkflowInfoQuorumFailureDht(n) => Ipld::Map(BTreeMap::from([( + "workflow_info_quorum_failure_dht".into(), + n.into(), + )])), + NetworkNotification::SentWorkflowInfo(n) => { + Ipld::Map(BTreeMap::from([("sent_workflow_info".into(), n.into())])) + } + NetworkNotification::ReceivedWorkflowInfo(n) => Ipld::Map(BTreeMap::from([( + "received_workflow_info".into(), + n.into(), + )])), + } + } +} + +impl TryFrom for NetworkNotification { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + if let Some((key, val)) = map.first_key_value() { + match key.as_str() { + "new_listen_addr" => Ok(NetworkNotification::NewListenAddr( + NewListenAddr::try_from(val.to_owned())?, + )), + "connection_established" => Ok(NetworkNotification::ConnnectionEstablished( + ConnectionEstablished::try_from(val.to_owned())?, + )), + "connection_closed" => Ok(NetworkNotification::ConnnectionClosed( + ConnectionClosed::try_from(val.to_owned())?, + )), + "outgoing_connection_error" => Ok(NetworkNotification::OutgoingConnectionError( + OutgoingConnectionError::try_from(val.to_owned())?, + )), + "incoming_connection_error" => Ok(NetworkNotification::IncomingConnectionError( + IncomingConnectionError::try_from(val.to_owned())?, + )), + "discovered_mdns" => Ok(NetworkNotification::DiscoveredMdns( + DiscoveredMdns::try_from(val.to_owned())?, + )), + "discovered_rendezvous" => Ok(NetworkNotification::DiscoveredRendezvous( + DiscoveredRendezvous::try_from(val.to_owned())?, + )), + "registered_rendezvous" => Ok(NetworkNotification::RegisteredRendezvous( + RegisteredRendezvous::try_from(val.to_owned())?, + )), + "discover_served_rendezvous" => Ok(NetworkNotification::DiscoverServedRendezvous( + DiscoverServedRendezvous::try_from(val.to_owned())?, + )), + "peer_registered_rendezvous" => Ok(NetworkNotification::PeerRegisteredRendezvous( + PeerRegisteredRendezvous::try_from(val.to_owned())?, + )), + "published_receipt_pubsub" => Ok(NetworkNotification::PublishedReceiptPubsub( + PublishedReceiptPubsub::try_from(val.to_owned())?, + )), + "received_receipt_pubsub" => Ok(NetworkNotification::ReceivedReceiptPubsub( + ReceivedReceiptPubsub::try_from(val.to_owned())?, + )), + "put_receipt_dht" => Ok(NetworkNotification::PutReceiptDht( + PutReceiptDht::try_from(val.to_owned())?, + )), + "got_receipt_dht" => Ok(NetworkNotification::GotReceiptDht( + GotReceiptDht::try_from(val.to_owned())?, + )), + "put_workflow_info_dht" => Ok(NetworkNotification::PutWorkflowInfoDht( + PutWorkflowInfoDht::try_from(val.to_owned())?, + )), + "got_workflow_info_dht" => Ok(NetworkNotification::GotWorkflowInfoDht( + GotWorkflowInfoDht::try_from(val.to_owned())?, + )), + "receipt_quorum_success_dht" => Ok(NetworkNotification::ReceiptQuorumSuccessDht( + ReceiptQuorumSuccessDht::try_from(val.to_owned())?, + )), + "receipt_quorum_failure_dht" => Ok(NetworkNotification::ReceiptQuorumFailureDht( + ReceiptQuorumFailureDht::try_from(val.to_owned())?, + )), + "workflow_info_quorum_success_dht" => { + Ok(NetworkNotification::WorkflowInfoQuorumSuccessDht( + WorkflowInfoQuorumSuccessDht::try_from(val.to_owned())?, + )) + } + "workflow_info_quorum_failure_dht" => { + Ok(NetworkNotification::WorkflowInfoQuorumFailureDht( + WorkflowInfoQuorumFailureDht::try_from(val.to_owned())?, + )) + } + "sent_workflow_info" => Ok(NetworkNotification::SentWorkflowInfo( + SentWorkflowInfo::try_from(val.to_owned())?, + )), + "received_workflow_info" => Ok(NetworkNotification::ReceivedWorkflowInfo( + ReceivedWorkflowInfo::try_from(val.to_owned())?, + )), + _ => Err(anyhow!("Unknown network notification tag type")), + } + } else { + Err(anyhow!("Network notification was an empty map")) + } + } +} + +#[cfg(test)] +mod test { + use super::*; + + use connection::NewListenAddr; + use faststr::FastStr; + use homestar_invocation::test_utils::cid::generate_cid; + use libipld::Cid; + use libp2p::{ + swarm::{DialError, ListenError}, + Multiaddr, PeerId, + }; + + use rand::thread_rng; + use std::str::FromStr; + + #[derive(Clone, Debug)] + struct Fixtures { + address: Multiaddr, + addresses: Vec, + cid: Cid, + connected_peer_count: usize, + name: FastStr, + num_tasks: u32, + peer_id: PeerId, + peers: Vec, + peers_map: BTreeMap, + peers_map_vec_addr: BTreeMap>, + progress: Vec, + progress_count: u32, + quorum: usize, + ran: Cid, + } + + fn generate_fixtures() -> Fixtures { + Fixtures { + address: Multiaddr::from_str("/ip4/127.0.0.1/tcp/7000").unwrap(), + addresses: vec![ + Multiaddr::from_str("/ip4/127.0.0.1/tcp/7000").unwrap(), + Multiaddr::from_str("/ip4/127.0.0.1/tcp/7001").unwrap(), + ], + cid: generate_cid(&mut thread_rng()), + connected_peer_count: 1, + name: FastStr::new("Strong Bad"), + num_tasks: 1, + peer_id: PeerId::random(), + peers: vec![PeerId::random(), PeerId::random()], + peers_map: BTreeMap::from([ + ( + PeerId::random(), + Multiaddr::from_str("/ip4/127.0.0.1/tcp/7000").unwrap(), + ), + ( + PeerId::random(), + Multiaddr::from_str("/ip4/127.0.0.1/tcp/7001").unwrap(), + ), + ]), + peers_map_vec_addr: BTreeMap::from([ + ( + PeerId::random(), + vec![Multiaddr::from_str("/ip4/127.0.0.1/tcp/7000").unwrap()], + ), + ( + PeerId::random(), + vec![ + Multiaddr::from_str("/ip4/127.0.0.1/tcp/7001").unwrap(), + Multiaddr::from_str("/ip4/127.0.0.1/tcp/7002").unwrap(), + ], + ), + ]), + progress: vec![generate_cid(&mut thread_rng())], + progress_count: 1, + quorum: 3, + ran: generate_cid(&mut thread_rng()), + } + } + + fn generate_notifications(fixtures: Fixtures) -> Vec<(i64, NetworkNotification)> { + let Fixtures { + address, + addresses, + cid, + connected_peer_count, + name, + num_tasks, + peer_id, + peers, + peers_map, + peers_map_vec_addr, + progress, + progress_count, + quorum, + ran, + } = fixtures; + + let new_listen_addr = NewListenAddr::new(peer_id, address.clone()); + let connection_established = ConnectionEstablished::new(peer_id, address.clone()); + let connection_closed = ConnectionClosed::new(peer_id, address.clone()); + let outgoing_connection_error = + OutgoingConnectionError::new(Some(peer_id), DialError::NoAddresses); + let incoming_connection_error = IncomingConnectionError::new(ListenError::Aborted); + let discovered_mdns = DiscoveredMdns::new(peers_map); + let discovered_rendezvous = DiscoveredRendezvous::new(peer_id, peers_map_vec_addr); + let registered_rendezvous = RegisteredRendezvous::new(peer_id); + let discover_served_rendezvous = DiscoverServedRendezvous::new(peer_id); + let peer_registered_rendezvous = PeerRegisteredRendezvous::new(peer_id, addresses); + let published_receipt_pubsub = PublishedReceiptPubsub::new(cid, ran.to_string()); + let received_receipt_pubsub = ReceivedReceiptPubsub::new(peer_id, cid, ran.to_string()); + let put_receipt_dht = PutReceiptDht::new(cid, ran.to_string()); + let got_receipt_dht = GotReceiptDht::new(Some(peer_id), cid, ran.to_string()); + let put_workflow_info_dht = PutWorkflowInfoDht::new( + cid, + Some(name.clone()), + num_tasks, + progress.clone(), + progress_count, + ); + let got_workflow_info_dht = GotWorkflowInfoDht::new( + Some(peer_id), + cid, + Some(name.clone()), + num_tasks, + progress.clone(), + progress_count, + ); + let receipt_quorum_success_dht = + ReceiptQuorumSuccessDht::new(FastStr::new(cid.to_string()), quorum); + let receipt_quorum_failure_dht = ReceiptQuorumFailureDht::new( + FastStr::new(cid.to_string()), + quorum, + connected_peer_count, + peers.clone(), + ); + let workflow_info_quorum_success_dht = + WorkflowInfoQuorumSuccessDht::new(FastStr::new(cid.to_string()), quorum); + let workflow_info_quorum_failure_dht = WorkflowInfoQuorumFailureDht::new( + FastStr::new(cid.to_string()), + quorum, + connected_peer_count, + peers, + ); + let sent_workflow_info = SentWorkflowInfo::new( + peer_id, + cid, + Some(name.clone()), + num_tasks, + progress.clone(), + progress_count, + ); + let received_workflow_info = ReceivedWorkflowInfo::new( + Some(peer_id), + cid, + Some(name), + num_tasks, + progress, + progress_count, + ); + + vec![ + ( + new_listen_addr.timestamp().to_owned(), + NetworkNotification::NewListenAddr(new_listen_addr), + ), + ( + connection_established.timestamp().to_owned(), + NetworkNotification::ConnnectionEstablished(connection_established), + ), + ( + connection_closed.timestamp().to_owned(), + NetworkNotification::ConnnectionClosed(connection_closed), + ), + ( + outgoing_connection_error.timestamp().to_owned(), + NetworkNotification::OutgoingConnectionError(outgoing_connection_error), + ), + ( + incoming_connection_error.timestamp().to_owned(), + NetworkNotification::IncomingConnectionError(incoming_connection_error), + ), + ( + discovered_mdns.timestamp().to_owned(), + NetworkNotification::DiscoveredMdns(discovered_mdns), + ), + ( + discovered_rendezvous.timestamp().to_owned(), + NetworkNotification::DiscoveredRendezvous(discovered_rendezvous), + ), + ( + registered_rendezvous.timestamp().to_owned(), + NetworkNotification::RegisteredRendezvous(registered_rendezvous), + ), + ( + discover_served_rendezvous.timestamp().to_owned(), + NetworkNotification::DiscoverServedRendezvous(discover_served_rendezvous), + ), + ( + peer_registered_rendezvous.timestamp().to_owned(), + NetworkNotification::PeerRegisteredRendezvous(peer_registered_rendezvous), + ), + ( + published_receipt_pubsub.timestamp().to_owned(), + NetworkNotification::PublishedReceiptPubsub(published_receipt_pubsub), + ), + ( + received_receipt_pubsub.timestamp().to_owned(), + NetworkNotification::ReceivedReceiptPubsub(received_receipt_pubsub), + ), + ( + put_receipt_dht.timestamp().to_owned(), + NetworkNotification::PutReceiptDht(put_receipt_dht), + ), + ( + got_receipt_dht.timestamp().to_owned(), + NetworkNotification::GotReceiptDht(got_receipt_dht), + ), + ( + put_workflow_info_dht.timestamp().to_owned(), + NetworkNotification::PutWorkflowInfoDht(put_workflow_info_dht), + ), + ( + got_workflow_info_dht.timestamp().to_owned(), + NetworkNotification::GotWorkflowInfoDht(got_workflow_info_dht), + ), + ( + receipt_quorum_success_dht.timestamp().to_owned(), + NetworkNotification::ReceiptQuorumSuccessDht(receipt_quorum_success_dht), + ), + ( + receipt_quorum_failure_dht.timestamp().to_owned(), + NetworkNotification::ReceiptQuorumFailureDht(receipt_quorum_failure_dht), + ), + ( + workflow_info_quorum_success_dht.timestamp().to_owned(), + NetworkNotification::WorkflowInfoQuorumSuccessDht(workflow_info_quorum_success_dht), + ), + ( + workflow_info_quorum_failure_dht.timestamp().to_owned(), + NetworkNotification::WorkflowInfoQuorumFailureDht(workflow_info_quorum_failure_dht), + ), + ( + sent_workflow_info.timestamp().to_owned(), + NetworkNotification::SentWorkflowInfo(sent_workflow_info), + ), + ( + received_workflow_info.timestamp().to_owned(), + NetworkNotification::ReceivedWorkflowInfo(received_workflow_info), + ), + ] + } + + fn check_notification(timestamp: &i64, notification: NetworkNotification, fixtures: Fixtures) { + let Fixtures { + address, + addresses, + cid, + connected_peer_count, + name, + num_tasks, + peer_id, + peers, + peers_map, + peers_map_vec_addr, + progress, + progress_count, + quorum, + ran, + } = fixtures; + + match notification { + NetworkNotification::NewListenAddr(n) => { + assert_eq!(n.timestamp(), timestamp); + assert_eq!(PeerId::from_str(n.peer_id()).unwrap(), peer_id); + assert_eq!(Multiaddr::from_str(n.address()).unwrap(), address); + } + NetworkNotification::ConnnectionEstablished(n) => { + assert_eq!(n.timestamp(), timestamp); + assert_eq!(PeerId::from_str(n.peer_id()).unwrap(), peer_id); + assert_eq!(Multiaddr::from_str(n.address()).unwrap(), address); + } + NetworkNotification::ConnnectionClosed(n) => { + assert_eq!(n.timestamp(), timestamp); + assert_eq!(PeerId::from_str(n.peer_id()).unwrap(), peer_id); + assert_eq!(Multiaddr::from_str(n.address()).unwrap(), address); + } + NetworkNotification::OutgoingConnectionError(n) => { + assert_eq!(n.timestamp(), timestamp); + assert_eq!( + n.peer_id().as_ref().map(|p| PeerId::from_str(&p).unwrap()), + Some(peer_id) + ); + assert_eq!(n.error().to_string(), DialError::NoAddresses.to_string()); + } + NetworkNotification::IncomingConnectionError(n) => { + assert_eq!(n.timestamp(), timestamp); + assert_eq!(n.error().to_string(), ListenError::Aborted.to_string()); + } + NetworkNotification::DiscoveredMdns(n) => { + assert_eq!(n.timestamp(), timestamp); + + for peer in n.peers() { + assert_eq!( + Multiaddr::from_str(&peer.1).unwrap(), + peers_map[&PeerId::from_str(&peer.0).unwrap()] + ) + } + } + NetworkNotification::DiscoveredRendezvous(n) => { + assert_eq!(n.timestamp(), timestamp); + assert_eq!(PeerId::from_str(n.server()).unwrap(), peer_id); + + for peer in n.peers() { + assert_eq!( + peer.1 + .iter() + .map(|address| Multiaddr::from_str(address).unwrap()) + .collect::>(), + peers_map_vec_addr[&PeerId::from_str(&peer.0).unwrap()] + ) + } + } + NetworkNotification::RegisteredRendezvous(n) => { + assert_eq!(n.timestamp(), timestamp); + assert_eq!(PeerId::from_str(n.server()).unwrap(), peer_id); + } + NetworkNotification::DiscoverServedRendezvous(n) => { + assert_eq!(n.timestamp(), timestamp); + assert_eq!(PeerId::from_str(n.enquirer()).unwrap(), peer_id); + } + NetworkNotification::PeerRegisteredRendezvous(n) => { + assert_eq!(n.timestamp(), timestamp); + assert_eq!(PeerId::from_str(n.peer_id()).unwrap(), peer_id); + assert_eq!( + n.addresses() + .iter() + .map(|address| Multiaddr::from_str(address).unwrap()) + .collect::>(), + addresses + ); + } + NetworkNotification::PublishedReceiptPubsub(n) => { + assert_eq!(n.timestamp(), timestamp); + assert_eq!(Cid::from_str(n.cid()).unwrap(), cid); + assert_eq!(Cid::from_str(n.ran()).unwrap(), ran); + } + NetworkNotification::ReceivedReceiptPubsub(n) => { + assert_eq!(n.timestamp(), timestamp); + assert_eq!(PeerId::from_str(n.publisher()).unwrap(), peer_id); + assert_eq!(Cid::from_str(n.cid()).unwrap(), cid); + assert_eq!(Cid::from_str(n.ran()).unwrap(), ran); + } + NetworkNotification::PutReceiptDht(n) => { + assert_eq!(n.timestamp(), timestamp); + assert_eq!(Cid::from_str(n.cid()).unwrap(), cid); + assert_eq!(Cid::from_str(n.ran()).unwrap(), ran); + } + NetworkNotification::GotReceiptDht(n) => { + assert_eq!(n.timestamp(), timestamp); + assert_eq!( + n.publisher() + .as_ref() + .map(|p| PeerId::from_str(&p).unwrap()), + Some(peer_id) + ); + assert_eq!(Cid::from_str(n.cid()).unwrap(), cid); + assert_eq!(Cid::from_str(n.ran()).unwrap(), ran); + } + NetworkNotification::PutWorkflowInfoDht(n) => { + assert_eq!(n.timestamp(), timestamp); + assert_eq!(Cid::from_str(n.cid()).unwrap(), cid); + assert_eq!(n.name().as_ref().map(|name| FastStr::new(name)), Some(name)); + assert_eq!(n.num_tasks(), &num_tasks); + assert_eq!( + n.progress() + .iter() + .map(|cid| Cid::from_str(&cid).unwrap()) + .collect::>(), + progress + ); + assert_eq!(n.progress_count(), &progress_count); + } + NetworkNotification::GotWorkflowInfoDht(n) => { + assert_eq!(n.timestamp(), timestamp); + assert_eq!( + n.publisher() + .as_ref() + .map(|p| PeerId::from_str(&p).unwrap()), + Some(peer_id) + ); + assert_eq!(Cid::from_str(&n.cid()).unwrap(), cid); + assert_eq!(n.name().as_ref().map(|name| FastStr::new(name)), Some(name)); + assert_eq!(n.num_tasks(), &num_tasks); + assert_eq!( + n.progress() + .iter() + .map(|cid| Cid::from_str(&cid).unwrap()) + .collect::>(), + progress + ); + assert_eq!(n.progress_count(), &progress_count); + } + NetworkNotification::ReceiptQuorumSuccessDht(n) => { + assert_eq!(n.timestamp(), timestamp); + assert_eq!(FastStr::new(n.cid()), FastStr::new(cid.to_string())); + assert_eq!(n.quorum(), &quorum); + } + NetworkNotification::ReceiptQuorumFailureDht(n) => { + assert_eq!(n.timestamp(), timestamp); + assert_eq!(FastStr::new(n.cid()), FastStr::new(cid.to_string())); + assert_eq!(n.quorum(), &quorum); + assert_eq!(n.connected_peer_count(), &connected_peer_count); + assert_eq!( + n.stored_to_peers() + .iter() + .map(|p| PeerId::from_str(p).unwrap()) + .collect::>(), + peers + ); + } + NetworkNotification::WorkflowInfoQuorumSuccessDht(n) => { + assert_eq!(n.timestamp(), timestamp); + assert_eq!(FastStr::new(n.cid()), FastStr::new(cid.to_string())); + assert_eq!(n.quorum(), &quorum); + } + NetworkNotification::WorkflowInfoQuorumFailureDht(n) => { + assert_eq!(n.timestamp(), timestamp); + assert_eq!(FastStr::new(n.cid()), FastStr::new(cid.to_string())); + assert_eq!(n.quorum(), &quorum); + assert_eq!(n.connected_peer_count(), &connected_peer_count); + assert_eq!( + n.stored_to_peers() + .iter() + .map(|p| PeerId::from_str(p).unwrap()) + .collect::>(), + peers + ); + } + NetworkNotification::SentWorkflowInfo(n) => { + assert_eq!(n.timestamp(), timestamp); + assert_eq!(PeerId::from_str(&n.requestor()).unwrap(), peer_id); + assert_eq!(Cid::from_str(n.cid()).unwrap(), cid); + assert_eq!(n.name().as_ref().map(|name| FastStr::new(name)), Some(name)); + assert_eq!(n.num_tasks(), &num_tasks); + assert_eq!( + n.progress() + .iter() + .map(|cid| Cid::from_str(&cid).unwrap()) + .collect::>(), + progress + ); + assert_eq!(n.progress_count(), &progress_count); + } + NetworkNotification::ReceivedWorkflowInfo(n) => { + assert_eq!(n.timestamp(), timestamp); + assert_eq!( + n.provider().as_ref().map(|p| PeerId::from_str(&p).unwrap()), + Some(peer_id) + ); + assert_eq!(Cid::from_str(n.cid()).unwrap(), cid); + assert_eq!(n.name().as_ref().map(|name| FastStr::new(name)), Some(name)); + assert_eq!(n.num_tasks(), &num_tasks); + assert_eq!( + n.progress() + .iter() + .map(|cid| Cid::from_str(&cid).unwrap()) + .collect::>(), + progress + ); + assert_eq!(n.progress_count(), &progress_count); + } + } + } + + #[test] + fn notification_bytes_rountrip() { + let fixtures = generate_fixtures(); + + // Generate notifications and convert them to bytes + let notifications: Vec<(i64, Vec)> = generate_notifications(fixtures.clone()) + .into_iter() + .map(|(timestamp, notification)| (timestamp, notification.to_json().unwrap())) + .collect(); + + // Convert notifications back and check them + for (timestamp, bytes) in notifications { + check_notification( + ×tamp, + NetworkNotification::from_json(bytes.as_ref()).unwrap(), + fixtures.clone(), + ) + } + } + + #[test] + fn notification_json_string_rountrip() { + let fixtures = generate_fixtures(); + + // Generate notifications and convert them to JSON strings + let notifications: Vec<(i64, String)> = generate_notifications(fixtures.clone()) + .into_iter() + .map(|(timestamp, notification)| (timestamp, notification.to_json_string().unwrap())) + .collect(); + + // Convert notifications back and check them + for (timestamp, json) in notifications { + check_notification( + ×tamp, + NetworkNotification::from_json_string(json).unwrap(), + fixtures.clone(), + ) + } + } +} diff --git a/homestar-runtime/src/event_handler/notification/network/connection.rs b/homestar-runtime/src/event_handler/notification/network/connection.rs new file mode 100644 index 00000000..59ddc31a --- /dev/null +++ b/homestar-runtime/src/event_handler/notification/network/connection.rs @@ -0,0 +1,325 @@ +//! Notification types for [swarm] connection events. +//! +//! [swarm]: libp2p::swarm::Swarm + +use anyhow::anyhow; +use chrono::prelude::Utc; +use derive_getters::Getters; +use homestar_invocation::ipld::DagJson; +use libipld::{serde::from_ipld, Ipld}; +use libp2p::{ + swarm::{DialError, ListenError}, + Multiaddr, PeerId, +}; +use schemars::JsonSchema; +use std::collections::BTreeMap; + +const ADDRESS_KEY: &str = "address"; +const ERROR_KEY: &str = "error"; +const PEER_KEY: &str = "peer_id"; +const TIMESTAMP_KEY: &str = "timestamp"; + +#[derive(Debug, Clone, Getters, JsonSchema)] +#[schemars(rename = "new_listen_addr")] +pub struct NewListenAddr { + timestamp: i64, + peer_id: String, + address: String, +} + +impl NewListenAddr { + pub(crate) fn new(peer_id: PeerId, address: Multiaddr) -> NewListenAddr { + NewListenAddr { + timestamp: Utc::now().timestamp_millis(), + peer_id: peer_id.to_string(), + address: address.to_string(), + } + } +} + +impl DagJson for NewListenAddr {} + +impl From for Ipld { + fn from(notification: NewListenAddr) -> Self { + Ipld::Map(BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (PEER_KEY.into(), notification.peer_id.into()), + (ADDRESS_KEY.into(), notification.address.into()), + ])) + } +} + +impl TryFrom for NewListenAddr { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let peer_id = from_ipld( + map.get(PEER_KEY) + .ok_or_else(|| anyhow!("missing {PEER_KEY}"))? + .to_owned(), + )?; + + let address = from_ipld( + map.get(ADDRESS_KEY) + .ok_or_else(|| anyhow!("missing {ADDRESS_KEY}"))? + .to_owned(), + )?; + + Ok(NewListenAddr { + timestamp, + peer_id, + address, + }) + } +} + +#[derive(Debug, Clone, Getters, JsonSchema)] +#[schemars(rename = "connection_established")] +pub struct ConnectionEstablished { + timestamp: i64, + peer_id: String, + address: String, +} + +impl ConnectionEstablished { + pub(crate) fn new(peer_id: PeerId, address: Multiaddr) -> ConnectionEstablished { + ConnectionEstablished { + timestamp: Utc::now().timestamp_millis(), + peer_id: peer_id.to_string(), + address: address.to_string(), + } + } +} + +impl DagJson for ConnectionEstablished {} + +impl From for Ipld { + fn from(notification: ConnectionEstablished) -> Self { + Ipld::Map(BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (PEER_KEY.into(), notification.peer_id.into()), + (ADDRESS_KEY.into(), notification.address.into()), + ])) + } +} + +impl TryFrom for ConnectionEstablished { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let peer_id = from_ipld( + map.get(PEER_KEY) + .ok_or_else(|| anyhow!("missing {PEER_KEY}"))? + .to_owned(), + )?; + + let address = from_ipld( + map.get(ADDRESS_KEY) + .ok_or_else(|| anyhow!("missing {ADDRESS_KEY}"))? + .to_owned(), + )?; + + Ok(ConnectionEstablished { + timestamp, + peer_id, + address, + }) + } +} + +#[derive(Debug, Clone, Getters, JsonSchema)] +#[schemars(rename = "connection_closed")] +pub struct ConnectionClosed { + timestamp: i64, + peer_id: String, + address: String, +} + +impl ConnectionClosed { + pub(crate) fn new(peer_id: PeerId, address: Multiaddr) -> ConnectionClosed { + ConnectionClosed { + timestamp: Utc::now().timestamp_millis(), + peer_id: peer_id.to_string(), + address: address.to_string(), + } + } +} + +impl DagJson for ConnectionClosed {} + +impl From for Ipld { + fn from(notification: ConnectionClosed) -> Self { + Ipld::Map(BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (PEER_KEY.into(), notification.peer_id.into()), + (ADDRESS_KEY.into(), notification.address.into()), + ])) + } +} + +impl TryFrom for ConnectionClosed { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let peer_id = from_ipld( + map.get(PEER_KEY) + .ok_or_else(|| anyhow!("missing {PEER_KEY}"))? + .to_owned(), + )?; + + let address = from_ipld( + map.get(ADDRESS_KEY) + .ok_or_else(|| anyhow!("missing {ADDRESS_KEY}"))? + .to_owned(), + )?; + + Ok(ConnectionClosed { + timestamp, + peer_id, + address, + }) + } +} + +#[derive(Debug, Clone, Getters, JsonSchema)] +#[schemars(rename = "outgoing_connection_error")] +pub struct OutgoingConnectionError { + timestamp: i64, + peer_id: Option, + error: String, +} + +impl OutgoingConnectionError { + pub(crate) fn new(peer_id: Option, error: DialError) -> OutgoingConnectionError { + OutgoingConnectionError { + timestamp: Utc::now().timestamp_millis(), + peer_id: peer_id.map(|p| p.to_string()), + error: error.to_string(), + } + } +} + +impl DagJson for OutgoingConnectionError {} + +impl From for Ipld { + fn from(notification: OutgoingConnectionError) -> Self { + Ipld::Map(BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + ( + PEER_KEY.into(), + notification + .peer_id + .map(|peer_id| peer_id.into()) + .unwrap_or(Ipld::Null), + ), + (ERROR_KEY.into(), notification.error.into()), + ])) + } +} + +impl TryFrom for OutgoingConnectionError { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let peer_id = map + .get(PEER_KEY) + .and_then(|ipld| match ipld { + Ipld::Null => None, + ipld => Some(ipld), + }) + .and_then(|ipld| from_ipld(ipld.to_owned()).ok()); + + let error = from_ipld( + map.get(ERROR_KEY) + .ok_or_else(|| anyhow!("missing {ERROR_KEY}"))? + .to_owned(), + )?; + + Ok(OutgoingConnectionError { + timestamp, + peer_id, + error, + }) + } +} + +#[derive(Debug, Clone, Getters, JsonSchema)] +#[schemars(rename = "incoming_connection_error")] +pub struct IncomingConnectionError { + timestamp: i64, + error: String, +} + +impl IncomingConnectionError { + pub(crate) fn new(error: ListenError) -> IncomingConnectionError { + IncomingConnectionError { + timestamp: Utc::now().timestamp_millis(), + error: error.to_string(), + } + } +} + +impl DagJson for IncomingConnectionError {} + +impl From for Ipld { + fn from(notification: IncomingConnectionError) -> Self { + Ipld::Map(BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (ERROR_KEY.into(), notification.error.into()), + ])) + } +} + +impl TryFrom for IncomingConnectionError { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let error = from_ipld( + map.get(ERROR_KEY) + .ok_or_else(|| anyhow!("missing {ERROR_KEY}"))? + .to_owned(), + )?; + + Ok(IncomingConnectionError { timestamp, error }) + } +} diff --git a/homestar-runtime/src/event_handler/notification/network/dht.rs b/homestar-runtime/src/event_handler/notification/network/dht.rs new file mode 100644 index 00000000..abac34d2 --- /dev/null +++ b/homestar-runtime/src/event_handler/notification/network/dht.rs @@ -0,0 +1,788 @@ +//! Notification types for [swarm] DHT events. +//! +//! [swarm]: libp2p::swarm::Swarm + +use anyhow::anyhow; +use chrono::prelude::Utc; +use derive_getters::Getters; +use faststr::FastStr; +use homestar_invocation::ipld::DagJson; +use libipld::{serde::from_ipld, Cid, Ipld}; +use libp2p::PeerId; +use schemars::JsonSchema; +use std::collections::BTreeMap; + +const CID_KEY: &str = "cid"; +const CONNECTED_PEER_COUNT_KEY: &str = "connected_peer_count"; +const NAME_KEY: &str = "name"; +const NUM_TASKS_KEY: &str = "num_tasks"; +const PROGRESS_KEY: &str = "progress"; +const PROGRESS_COUNT_KEY: &str = "progress_count"; +const PUBLISHER_KEY: &str = "publisher"; +const QUORUM_KEY: &str = "quorum"; +const RAN_KEY: &str = "ran"; +const STORED_TO_PEERS_KEY: &str = "stored_to_peers"; +const TIMESTAMP_KEY: &str = "timestamp"; + +#[derive(Debug, Clone, Getters, JsonSchema)] +#[schemars(rename = "put_receipt_dht")] +pub struct PutReceiptDht { + timestamp: i64, + #[schemars(description = "Receipt CID")] + cid: String, + #[schemars(description = "Ran receipt CID")] + ran: String, +} + +impl PutReceiptDht { + pub(crate) fn new(cid: Cid, ran: String) -> PutReceiptDht { + PutReceiptDht { + timestamp: Utc::now().timestamp_millis(), + cid: cid.to_string(), + ran, + } + } +} + +impl DagJson for PutReceiptDht {} + +impl From for Ipld { + fn from(notification: PutReceiptDht) -> Self { + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (CID_KEY.into(), notification.cid.into()), + (RAN_KEY.into(), notification.ran.into()), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for PutReceiptDht { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let cid = from_ipld( + map.get(CID_KEY) + .ok_or_else(|| anyhow!("missing {CID_KEY}"))? + .to_owned(), + )?; + + let ran = from_ipld( + map.get(RAN_KEY) + .ok_or_else(|| anyhow!("missing {RAN_KEY}"))? + .to_owned(), + )?; + + Ok(PutReceiptDht { + timestamp, + cid, + ran, + }) + } +} + +#[derive(Debug, Clone, Getters, JsonSchema)] +#[schemars(rename = "got_receipt_dht")] +pub struct GotReceiptDht { + timestamp: i64, + #[schemars(description = "Receipt publisher peer ID")] + publisher: Option, + #[schemars(description = "Receipt CID")] + cid: String, + #[schemars(description = "Ran receipt CID")] + ran: String, +} + +impl GotReceiptDht { + pub(crate) fn new(publisher: Option, cid: Cid, ran: String) -> GotReceiptDht { + GotReceiptDht { + timestamp: Utc::now().timestamp_millis(), + publisher: publisher.map(|p| p.to_string()), + cid: cid.to_string(), + ran, + } + } +} + +impl DagJson for GotReceiptDht {} + +impl From for Ipld { + fn from(notification: GotReceiptDht) -> Self { + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + ( + PUBLISHER_KEY.into(), + notification + .publisher + .map(|peer_id| peer_id.into()) + .unwrap_or(Ipld::Null), + ), + (CID_KEY.into(), notification.cid.into()), + (RAN_KEY.into(), notification.ran.into()), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for GotReceiptDht { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let publisher = map + .get(PUBLISHER_KEY) + .and_then(|ipld| match ipld { + Ipld::Null => None, + ipld => Some(ipld), + }) + .and_then(|ipld| from_ipld(ipld.to_owned()).ok()); + + let cid = from_ipld( + map.get(CID_KEY) + .ok_or_else(|| anyhow!("missing {CID_KEY}"))? + .to_owned(), + )?; + + let ran = from_ipld( + map.get(RAN_KEY) + .ok_or_else(|| anyhow!("missing {RAN_KEY}"))? + .to_owned(), + )?; + + Ok(GotReceiptDht { + timestamp, + publisher, + cid, + ran, + }) + } +} + +#[derive(Debug, Clone, Getters, JsonSchema)] +#[schemars(rename = "put_workflow_info_dht")] +pub struct PutWorkflowInfoDht { + timestamp: i64, + #[schemars(description = "Workflow info CID")] + cid: String, + #[schemars(description = "Optional workflow name")] + name: Option, + #[schemars(description = "Number of tasks in workflow")] + num_tasks: u32, + #[schemars(description = "Completed task CIDs")] + progress: Vec, + #[schemars(description = "Number of workflow tasks completed")] + progress_count: u32, +} + +impl PutWorkflowInfoDht { + pub(crate) fn new( + cid: Cid, + name: Option, + num_tasks: u32, + progress: Vec, + progress_count: u32, + ) -> PutWorkflowInfoDht { + PutWorkflowInfoDht { + timestamp: Utc::now().timestamp_millis(), + cid: cid.to_string(), + name: name.map(|n| n.into()), + num_tasks, + progress: progress.iter().map(|cid| cid.to_string()).collect(), + progress_count, + } + } +} + +impl DagJson for PutWorkflowInfoDht {} + +impl From for Ipld { + fn from(notification: PutWorkflowInfoDht) -> Self { + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (CID_KEY.into(), notification.cid.into()), + ( + NAME_KEY.into(), + notification + .name + .map(|peer_id| peer_id.into()) + .unwrap_or(Ipld::Null), + ), + (NUM_TASKS_KEY.into(), notification.num_tasks.into()), + ( + PROGRESS_KEY.into(), + Ipld::List( + notification + .progress + .iter() + .map(|cid| Ipld::String(cid.to_string())) + .collect(), + ), + ), + ( + PROGRESS_COUNT_KEY.into(), + notification.progress_count.into(), + ), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for PutWorkflowInfoDht { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let cid = from_ipld( + map.get(CID_KEY) + .ok_or_else(|| anyhow!("missing {CID_KEY}"))? + .to_owned(), + )?; + + let name = map + .get(NAME_KEY) + .and_then(|ipld| match ipld { + Ipld::Null => None, + ipld => Some(ipld), + }) + .and_then(|ipld| from_ipld(ipld.to_owned()).ok()); + + let num_tasks = from_ipld( + map.get(NUM_TASKS_KEY) + .ok_or_else(|| anyhow!("missing {NUM_TASKS_KEY}"))? + .to_owned(), + )?; + + let progress = from_ipld::>( + map.get(PROGRESS_KEY) + .ok_or_else(|| anyhow!("missing {PROGRESS_KEY}"))? + .to_owned(), + )?; + + let progress_count = from_ipld( + map.get(PROGRESS_COUNT_KEY) + .ok_or_else(|| anyhow!("missing {PROGRESS_COUNT_KEY}"))? + .to_owned(), + )?; + + Ok(PutWorkflowInfoDht { + timestamp, + cid, + name, + num_tasks, + progress, + progress_count, + }) + } +} + +#[derive(Debug, Clone, Getters, JsonSchema)] +#[schemars(rename = "got_workflow_info_dht")] +pub struct GotWorkflowInfoDht { + timestamp: i64, + #[schemars(description = "Workflow info publisher peer ID")] + publisher: Option, + #[schemars(description = "Workflow info CID")] + cid: String, + #[schemars(description = "Optional workflow name")] + name: Option, + #[schemars(description = "Number of tasks in workflow")] + num_tasks: u32, + #[schemars(description = "Completed task CIDs")] + progress: Vec, + #[schemars(description = "Number of workflow tasks completed")] + progress_count: u32, +} + +impl GotWorkflowInfoDht { + pub(crate) fn new( + publisher: Option, + cid: Cid, + name: Option, + num_tasks: u32, + progress: Vec, + progress_count: u32, + ) -> GotWorkflowInfoDht { + GotWorkflowInfoDht { + timestamp: Utc::now().timestamp_millis(), + publisher: publisher.map(|p| p.to_string()), + cid: cid.to_string(), + name: name.map(|n| n.into()), + num_tasks, + progress: progress.iter().map(|cid| cid.to_string()).collect(), + progress_count, + } + } +} + +impl DagJson for GotWorkflowInfoDht {} + +impl From for Ipld { + fn from(notification: GotWorkflowInfoDht) -> Self { + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + ( + PUBLISHER_KEY.into(), + notification + .publisher + .map(|peer_id| peer_id.into()) + .unwrap_or(Ipld::Null), + ), + (CID_KEY.into(), notification.cid.into()), + ( + NAME_KEY.into(), + notification + .name + .map(|peer_id| peer_id.into()) + .unwrap_or(Ipld::Null), + ), + (NUM_TASKS_KEY.into(), notification.num_tasks.into()), + ( + PROGRESS_KEY.into(), + Ipld::List( + notification + .progress + .iter() + .map(|cid| Ipld::String(cid.to_string())) + .collect(), + ), + ), + ( + PROGRESS_COUNT_KEY.into(), + notification.progress_count.into(), + ), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for GotWorkflowInfoDht { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let publisher = map + .get(PUBLISHER_KEY) + .and_then(|ipld| match ipld { + Ipld::Null => None, + ipld => Some(ipld), + }) + .and_then(|ipld| from_ipld(ipld.to_owned()).ok()); + + let cid = from_ipld( + map.get(CID_KEY) + .ok_or_else(|| anyhow!("missing {CID_KEY}"))? + .to_owned(), + )?; + + let name = map + .get(NAME_KEY) + .and_then(|ipld| match ipld { + Ipld::Null => None, + ipld => Some(ipld), + }) + .and_then(|ipld| from_ipld(ipld.to_owned()).ok()); + + let num_tasks = from_ipld( + map.get(NUM_TASKS_KEY) + .ok_or_else(|| anyhow!("missing {NUM_TASKS_KEY}"))? + .to_owned(), + )?; + + let progress = from_ipld::>( + map.get(PROGRESS_KEY) + .ok_or_else(|| anyhow!("missing {PROGRESS_KEY}"))? + .to_owned(), + )?; + + let progress_count = from_ipld( + map.get(PROGRESS_COUNT_KEY) + .ok_or_else(|| anyhow!("missing {PROGRESS_COUNT_KEY}"))? + .to_owned(), + )?; + + Ok(GotWorkflowInfoDht { + timestamp, + publisher, + cid, + name, + num_tasks, + progress, + progress_count, + }) + } +} + +#[derive(Debug, Clone, Getters, JsonSchema)] +#[schemars(rename = "receipt_quorum_success_dht")] +pub struct ReceiptQuorumSuccessDht { + timestamp: i64, + #[schemars(description = "Receipt CID")] + cid: String, + #[schemars(description = "Number of peers participating in quorum")] + quorum: usize, +} + +impl ReceiptQuorumSuccessDht { + pub(crate) fn new(cid: FastStr, quorum: usize) -> ReceiptQuorumSuccessDht { + ReceiptQuorumSuccessDht { + timestamp: Utc::now().timestamp_millis(), + cid: cid.to_string(), + quorum, + } + } +} + +impl DagJson for ReceiptQuorumSuccessDht {} + +impl From for Ipld { + fn from(notification: ReceiptQuorumSuccessDht) -> Self { + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (CID_KEY.into(), notification.cid.into()), + (QUORUM_KEY.into(), notification.quorum.into()), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for ReceiptQuorumSuccessDht { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let cid = from_ipld( + map.get(CID_KEY) + .ok_or_else(|| anyhow!("missing {CID_KEY}"))? + .to_owned(), + )?; + + let quorum = from_ipld( + map.get(QUORUM_KEY) + .ok_or_else(|| anyhow!("missing {QUORUM_KEY}"))? + .to_owned(), + )?; + + Ok(ReceiptQuorumSuccessDht { + timestamp, + cid, + quorum, + }) + } +} + +#[derive(Debug, Clone, Getters, JsonSchema)] +#[schemars(rename = "receipt_quorum_failure_dht")] +pub struct ReceiptQuorumFailureDht { + timestamp: i64, + #[schemars(description = "Receipt CID")] + cid: String, + #[schemars(description = "Number of peers required for quorum")] + quorum: usize, + #[schemars(description = "Number of connected peers")] + connected_peer_count: usize, + #[schemars(description = "Peers participating in quorum")] + stored_to_peers: Vec, +} + +impl ReceiptQuorumFailureDht { + pub(crate) fn new( + cid: FastStr, + quorum: usize, + connected_peer_count: usize, + stored_to_peers: Vec, + ) -> ReceiptQuorumFailureDht { + ReceiptQuorumFailureDht { + timestamp: Utc::now().timestamp_millis(), + cid: cid.to_string(), + quorum, + connected_peer_count, + stored_to_peers: stored_to_peers.iter().map(|p| p.to_string()).collect(), + } + } +} + +impl DagJson for ReceiptQuorumFailureDht {} + +impl From for Ipld { + fn from(notification: ReceiptQuorumFailureDht) -> Self { + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (CID_KEY.into(), notification.cid.into()), + (QUORUM_KEY.into(), notification.quorum.into()), + ( + CONNECTED_PEER_COUNT_KEY.into(), + notification.connected_peer_count.into(), + ), + ( + STORED_TO_PEERS_KEY.into(), + Ipld::List( + notification + .stored_to_peers + .iter() + .map(|p| Ipld::String(p.to_string())) + .collect(), + ), + ), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for ReceiptQuorumFailureDht { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let cid = from_ipld( + map.get(CID_KEY) + .ok_or_else(|| anyhow!("missing {CID_KEY}"))? + .to_owned(), + )?; + + let quorum = from_ipld( + map.get(QUORUM_KEY) + .ok_or_else(|| anyhow!("missing {QUORUM_KEY}"))? + .to_owned(), + )?; + + let connected_peer_count = from_ipld( + map.get(CONNECTED_PEER_COUNT_KEY) + .ok_or_else(|| anyhow!("missing {CONNECTED_PEER_COUNT_KEY}"))? + .to_owned(), + )?; + + let stored_to_peers = from_ipld( + map.get(STORED_TO_PEERS_KEY) + .ok_or_else(|| anyhow!("missing {STORED_TO_PEERS_KEY}"))? + .to_owned(), + )?; + + Ok(ReceiptQuorumFailureDht { + timestamp, + cid, + quorum, + connected_peer_count, + stored_to_peers, + }) + } +} + +#[derive(Debug, Clone, Getters, JsonSchema)] +#[schemars(rename = "workflow_info_quorum_success_dht")] +pub struct WorkflowInfoQuorumSuccessDht { + timestamp: i64, + #[schemars(description = "Workflow info CID")] + cid: String, + #[schemars(description = "Number of peers participating in quorum")] + quorum: usize, +} + +impl WorkflowInfoQuorumSuccessDht { + pub(crate) fn new(cid: FastStr, quorum: usize) -> WorkflowInfoQuorumSuccessDht { + WorkflowInfoQuorumSuccessDht { + timestamp: Utc::now().timestamp_millis(), + cid: cid.to_string(), + quorum, + } + } +} + +impl DagJson for WorkflowInfoQuorumSuccessDht {} + +impl From for Ipld { + fn from(notification: WorkflowInfoQuorumSuccessDht) -> Self { + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (CID_KEY.into(), notification.cid.into()), + (QUORUM_KEY.into(), notification.quorum.into()), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for WorkflowInfoQuorumSuccessDht { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let cid = from_ipld( + map.get(CID_KEY) + .ok_or_else(|| anyhow!("missing {CID_KEY}"))? + .to_owned(), + )?; + + let quorum = from_ipld( + map.get(QUORUM_KEY) + .ok_or_else(|| anyhow!("missing {QUORUM_KEY}"))? + .to_owned(), + )?; + + Ok(WorkflowInfoQuorumSuccessDht { + timestamp, + cid, + quorum, + }) + } +} + +#[derive(Debug, Clone, Getters, JsonSchema)] +#[schemars(rename = "workflow_info_quorum_failure_dht")] +pub struct WorkflowInfoQuorumFailureDht { + timestamp: i64, + #[schemars(description = "Workflow info CID")] + cid: String, + #[schemars(description = "Number of peers required for quorum")] + quorum: usize, + #[schemars(description = "Number of connected peers")] + connected_peer_count: usize, + #[schemars(description = "Peers participating in quorum")] + stored_to_peers: Vec, +} + +impl WorkflowInfoQuorumFailureDht { + pub(crate) fn new( + cid: FastStr, + quorum: usize, + connected_peer_count: usize, + stored_to_peers: Vec, + ) -> WorkflowInfoQuorumFailureDht { + WorkflowInfoQuorumFailureDht { + timestamp: Utc::now().timestamp_millis(), + cid: cid.to_string(), + quorum, + connected_peer_count, + stored_to_peers: stored_to_peers.iter().map(|p| p.to_string()).collect(), + } + } +} + +impl DagJson for WorkflowInfoQuorumFailureDht {} + +impl From for Ipld { + fn from(notification: WorkflowInfoQuorumFailureDht) -> Self { + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (CID_KEY.into(), notification.cid.into()), + (QUORUM_KEY.into(), notification.quorum.into()), + ( + CONNECTED_PEER_COUNT_KEY.into(), + notification.connected_peer_count.into(), + ), + ( + STORED_TO_PEERS_KEY.into(), + Ipld::List( + notification + .stored_to_peers + .iter() + .map(|p| Ipld::String(p.to_string())) + .collect(), + ), + ), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for WorkflowInfoQuorumFailureDht { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let cid = from_ipld( + map.get(CID_KEY) + .ok_or_else(|| anyhow!("missing {CID_KEY}"))? + .to_owned(), + )?; + + let quorum = from_ipld( + map.get(QUORUM_KEY) + .ok_or_else(|| anyhow!("missing {QUORUM_KEY}"))? + .to_owned(), + )?; + + let connected_peer_count = from_ipld( + map.get(CONNECTED_PEER_COUNT_KEY) + .ok_or_else(|| anyhow!("missing {CONNECTED_PEER_COUNT_KEY}"))? + .to_owned(), + )?; + + let stored_to_peers = from_ipld( + map.get(STORED_TO_PEERS_KEY) + .ok_or_else(|| anyhow!("missing {STORED_TO_PEERS_KEY}"))? + .to_owned(), + )?; + + Ok(WorkflowInfoQuorumFailureDht { + timestamp, + cid, + quorum, + connected_peer_count, + stored_to_peers, + }) + } +} diff --git a/homestar-runtime/src/event_handler/notification/network/mdns.rs b/homestar-runtime/src/event_handler/notification/network/mdns.rs new file mode 100644 index 00000000..36938a9f --- /dev/null +++ b/homestar-runtime/src/event_handler/notification/network/mdns.rs @@ -0,0 +1,76 @@ +//! Notification types for [swarm] mDNS events. +//! +//! [swarm]: libp2p::swarm::Swarm + +use anyhow::anyhow; +use chrono::prelude::Utc; +use derive_getters::Getters; +use homestar_invocation::ipld::DagJson; +use libipld::{serde::from_ipld, Ipld}; +use libp2p::{Multiaddr, PeerId}; +use schemars::JsonSchema; +use std::collections::BTreeMap; + +const PEERS_KEY: &str = "peers"; +const TIMESTAMP_KEY: &str = "timestamp"; + +#[derive(Debug, Clone, Getters, JsonSchema)] +#[schemars(rename = "discovered_mdns")] +pub struct DiscoveredMdns { + timestamp: i64, + #[schemars(description = "Peers discovered by peer ID and multiaddress")] + peers: BTreeMap, +} + +impl DiscoveredMdns { + pub(crate) fn new(peers: BTreeMap) -> DiscoveredMdns { + DiscoveredMdns { + timestamp: Utc::now().timestamp_millis(), + peers: peers + .iter() + .map(|(peer_id, address)| (peer_id.to_string(), address.to_string())) + .collect(), + } + } +} + +impl DagJson for DiscoveredMdns {} + +impl From for Ipld { + fn from(notification: DiscoveredMdns) -> Self { + let peers: BTreeMap = notification + .peers + .into_iter() + .map(|(peer_id, address)| (peer_id, address.into())) + .collect(); + + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (PEERS_KEY.into(), peers.into()), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for DiscoveredMdns { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let peers = from_ipld::>( + map.get(PEERS_KEY) + .ok_or_else(|| anyhow!("missing {PEERS_KEY}"))? + .to_owned(), + )?; + + Ok(DiscoveredMdns { timestamp, peers }) + } +} diff --git a/homestar-runtime/src/event_handler/notification/network/pubsub.rs b/homestar-runtime/src/event_handler/notification/network/pubsub.rs new file mode 100644 index 00000000..01d0adbc --- /dev/null +++ b/homestar-runtime/src/event_handler/notification/network/pubsub.rs @@ -0,0 +1,160 @@ +//! Notification types for [swarm] gossipsub events. +//! +//! [swarm]: libp2p::swarm::Swarm + +use anyhow::anyhow; +use chrono::prelude::Utc; +use derive_getters::Getters; +use homestar_invocation::ipld::DagJson; +use libipld::{serde::from_ipld, Cid, Ipld}; +use libp2p::PeerId; +use schemars::JsonSchema; +use std::collections::BTreeMap; + +const CID_KEY: &str = "cid"; +const PUBLISHER_KEY: &str = "publisher"; +const RAN_KEY: &str = "ran"; +const TIMESTAMP_KEY: &str = "timestamp"; + +#[derive(Debug, Clone, Getters, JsonSchema)] +#[schemars(rename = "published_receipt_pubsub")] +pub struct PublishedReceiptPubsub { + timestamp: i64, + #[schemars(description = "Receipt CID")] + cid: String, + #[schemars(description = "Ran receipt CID")] + ran: String, +} + +impl PublishedReceiptPubsub { + pub(crate) fn new(cid: Cid, ran: String) -> PublishedReceiptPubsub { + PublishedReceiptPubsub { + timestamp: Utc::now().timestamp_millis(), + cid: cid.to_string(), + ran, + } + } +} + +impl DagJson for PublishedReceiptPubsub {} + +impl From for Ipld { + fn from(notification: PublishedReceiptPubsub) -> Self { + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (CID_KEY.into(), notification.cid.into()), + (RAN_KEY.into(), notification.ran.into()), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for PublishedReceiptPubsub { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let cid = from_ipld( + map.get(CID_KEY) + .ok_or_else(|| anyhow!("missing {CID_KEY}"))? + .to_owned(), + )?; + + let ran = from_ipld( + map.get(RAN_KEY) + .ok_or_else(|| anyhow!("missing {RAN_KEY}"))? + .to_owned(), + )?; + + Ok(PublishedReceiptPubsub { + timestamp, + cid, + ran, + }) + } +} + +#[derive(Debug, Clone, Getters, JsonSchema)] +#[schemars(rename = "received_receipt_pubsub")] +pub struct ReceivedReceiptPubsub { + timestamp: i64, + #[schemars(description = "Receipt publisher peer ID")] + publisher: String, + #[schemars(description = "Receipt CID")] + cid: String, + #[schemars(description = "Ran receipt CID")] + ran: String, +} + +impl ReceivedReceiptPubsub { + pub(crate) fn new(publisher: PeerId, cid: Cid, ran: String) -> ReceivedReceiptPubsub { + ReceivedReceiptPubsub { + timestamp: Utc::now().timestamp_millis(), + publisher: publisher.to_string(), + cid: cid.to_string(), + ran, + } + } +} + +impl DagJson for ReceivedReceiptPubsub {} + +impl From for Ipld { + fn from(notification: ReceivedReceiptPubsub) -> Self { + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (PUBLISHER_KEY.into(), notification.publisher.into()), + (CID_KEY.into(), notification.cid.into()), + (RAN_KEY.into(), notification.ran.into()), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for ReceivedReceiptPubsub { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let publisher = from_ipld( + map.get(PUBLISHER_KEY) + .ok_or_else(|| anyhow!("missing {PUBLISHER_KEY}"))? + .to_owned(), + )?; + + let cid = from_ipld( + map.get(CID_KEY) + .ok_or_else(|| anyhow!("missing {CID_KEY}"))? + .to_owned(), + )?; + + let ran = from_ipld( + map.get(RAN_KEY) + .ok_or_else(|| anyhow!("missing {RAN_KEY}"))? + .to_owned(), + )?; + + Ok(ReceivedReceiptPubsub { + timestamp, + publisher, + cid, + ran, + }) + } +} diff --git a/homestar-runtime/src/event_handler/notification/network/rendezvous.rs b/homestar-runtime/src/event_handler/notification/network/rendezvous.rs new file mode 100644 index 00000000..b1803828 --- /dev/null +++ b/homestar-runtime/src/event_handler/notification/network/rendezvous.rs @@ -0,0 +1,300 @@ +//! Notification types for [swarm] rendezvous events. +//! +//! [swarm]: libp2p::swarm::Swarm + +use anyhow::anyhow; +use chrono::prelude::Utc; +use derive_getters::Getters; +use homestar_invocation::ipld::DagJson; +use libipld::{serde::from_ipld, Ipld}; +use libp2p::{Multiaddr, PeerId}; +use schemars::JsonSchema; +use std::collections::BTreeMap; + +const ADDRESSES_KEY: &str = "addresses"; +const ENQUIRER_KEY: &str = "enquirer"; +const PEER_KEY: &str = "peer_id"; +const PEERS_KEY: &str = "peers"; +const SERVER_KEY: &str = "server"; +const TIMESTAMP_KEY: &str = "timestamp"; + +#[derive(Debug, Clone, Getters, JsonSchema)] +#[schemars(rename = "discovered_rendezvous")] +pub struct DiscoveredRendezvous { + timestamp: i64, + #[schemars(description = "Server that fulfilled the discovery request")] + server: String, + #[schemars(description = "Peers discovered by peer ID and multiaddresses")] + peers: BTreeMap>, +} + +impl DiscoveredRendezvous { + pub(crate) fn new( + server: PeerId, + peers: BTreeMap>, + ) -> DiscoveredRendezvous { + DiscoveredRendezvous { + timestamp: Utc::now().timestamp_millis(), + server: server.to_string(), + peers: peers + .iter() + .map(|(peer_id, addresses)| { + ( + peer_id.to_string(), + addresses + .iter() + .map(|address| address.to_string()) + .collect(), + ) + }) + .collect(), + } + } +} + +impl DagJson for DiscoveredRendezvous {} + +impl From for Ipld { + fn from(notification: DiscoveredRendezvous) -> Self { + let peers: BTreeMap = notification + .peers + .into_iter() + .map(|(peer_id, addresses)| { + ( + peer_id, + Ipld::List( + addresses + .iter() + .map(|address| Ipld::String(address.to_owned())) + .collect(), + ), + ) + }) + .collect(); + + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (SERVER_KEY.into(), notification.server.into()), + (PEERS_KEY.into(), peers.into()), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for DiscoveredRendezvous { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let server = from_ipld( + map.get(SERVER_KEY) + .ok_or_else(|| anyhow!("missing {SERVER_KEY}"))? + .to_owned(), + )?; + + let peers = from_ipld::>>( + map.get(PEERS_KEY) + .ok_or_else(|| anyhow!("missing {PEERS_KEY}"))? + .to_owned(), + )?; + + Ok(DiscoveredRendezvous { + timestamp, + server, + peers, + }) + } +} + +#[derive(Debug, Clone, Getters, JsonSchema)] +#[schemars(rename = "registered_rendezvous")] +pub struct RegisteredRendezvous { + timestamp: i64, + #[schemars(description = "Server that accepted registration")] + server: String, +} + +impl RegisteredRendezvous { + pub(crate) fn new(server: PeerId) -> RegisteredRendezvous { + RegisteredRendezvous { + timestamp: Utc::now().timestamp_millis(), + server: server.to_string(), + } + } +} + +impl DagJson for RegisteredRendezvous {} + +impl From for Ipld { + fn from(notification: RegisteredRendezvous) -> Self { + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (SERVER_KEY.into(), notification.server.into()), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for RegisteredRendezvous { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let server = from_ipld( + map.get(SERVER_KEY) + .ok_or_else(|| anyhow!("missing {SERVER_KEY}"))? + .to_owned(), + )?; + + Ok(RegisteredRendezvous { timestamp, server }) + } +} + +#[derive(Debug, Clone, Getters, JsonSchema)] +#[schemars(rename = "registered_rendezvous")] +pub struct DiscoverServedRendezvous { + timestamp: i64, + #[schemars(description = "Peer that requested discovery")] + enquirer: String, +} + +impl DiscoverServedRendezvous { + pub(crate) fn new(enquirer: PeerId) -> DiscoverServedRendezvous { + DiscoverServedRendezvous { + timestamp: Utc::now().timestamp_millis(), + enquirer: enquirer.to_string(), + } + } +} + +impl DagJson for DiscoverServedRendezvous {} + +impl From for Ipld { + fn from(notification: DiscoverServedRendezvous) -> Self { + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (ENQUIRER_KEY.into(), notification.enquirer.into()), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for DiscoverServedRendezvous { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let enquirer = from_ipld( + map.get(ENQUIRER_KEY) + .ok_or_else(|| anyhow!("missing {ENQUIRER_KEY}"))? + .to_owned(), + )?; + + Ok(DiscoverServedRendezvous { + timestamp, + enquirer, + }) + } +} + +#[derive(Debug, Clone, Getters, JsonSchema)] +#[schemars(rename = "peer_registered_rendezvous")] +pub struct PeerRegisteredRendezvous { + timestamp: i64, + #[schemars(description = "Peer registered")] + peer_id: String, + #[schemars(description = "Multiaddresses for peer")] + addresses: Vec, +} + +impl PeerRegisteredRendezvous { + pub(crate) fn new(peer_id: PeerId, addresses: Vec) -> PeerRegisteredRendezvous { + PeerRegisteredRendezvous { + timestamp: Utc::now().timestamp_millis(), + peer_id: peer_id.to_string(), + addresses: addresses + .iter() + .map(|address| address.to_string()) + .collect(), + } + } +} + +impl DagJson for PeerRegisteredRendezvous {} + +impl From for Ipld { + fn from(notification: PeerRegisteredRendezvous) -> Self { + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (PEER_KEY.into(), notification.peer_id.into()), + ( + ADDRESSES_KEY.into(), + Ipld::List( + notification + .addresses + .iter() + .map(|address| Ipld::String(address.to_owned())) + .collect(), + ), + ), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for PeerRegisteredRendezvous { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let peer_id = from_ipld( + map.get(PEER_KEY) + .ok_or_else(|| anyhow!("missing {PEER_KEY}"))? + .to_owned(), + )?; + + let addresses = from_ipld( + map.get(ADDRESSES_KEY) + .ok_or_else(|| anyhow!("missing {ADDRESSES_KEY}"))? + .to_owned(), + )?; + + Ok(PeerRegisteredRendezvous { + timestamp, + peer_id, + addresses, + }) + } +} diff --git a/homestar-runtime/src/event_handler/notification/network/req_resp.rs b/homestar-runtime/src/event_handler/notification/network/req_resp.rs new file mode 100644 index 00000000..59f8aaab --- /dev/null +++ b/homestar-runtime/src/event_handler/notification/network/req_resp.rs @@ -0,0 +1,304 @@ +//! Notification types for [swarm] request_reponse events. +//! +//! [swarm]: libp2p::swarm::Swarm + +use anyhow::anyhow; +use chrono::prelude::Utc; +use derive_getters::Getters; +use faststr::FastStr; +use homestar_invocation::ipld::DagJson; +use libipld::{serde::from_ipld, Cid, Ipld}; +use libp2p::PeerId; +use schemars::JsonSchema; +use std::collections::BTreeMap; + +const CID_KEY: &str = "cid"; +const NAME_KEY: &str = "name"; +const NUM_TASKS_KEY: &str = "num_tasks"; +const PROGRESS_KEY: &str = "progress"; +const PROGRESS_COUNT_KEY: &str = "progress_count"; +const PROVIDER_KEY: &str = "provider"; +const REQUESTOR_KEY: &str = "requestor"; +const TIMESTAMP_KEY: &str = "timestamp"; + +#[derive(Debug, Clone, Getters, JsonSchema)] +#[schemars(rename = "sent_workflow_info")] +pub struct SentWorkflowInfo { + timestamp: i64, + #[schemars(description = "Peer that requested workflow info")] + requestor: String, + #[schemars(description = "Workflow info CID")] + cid: String, + #[schemars(description = "Optional workflow name")] + name: Option, + #[schemars(description = "Number of tasks in workflow")] + num_tasks: u32, + #[schemars(description = "Completed task CIDs")] + progress: Vec, + #[schemars(description = "Number of workflow tasks completed")] + progress_count: u32, +} + +impl SentWorkflowInfo { + pub(crate) fn new( + requestor: PeerId, + cid: Cid, + name: Option, + num_tasks: u32, + progress: Vec, + progress_count: u32, + ) -> SentWorkflowInfo { + SentWorkflowInfo { + requestor: requestor.to_string(), + timestamp: Utc::now().timestamp_millis(), + cid: cid.to_string(), + name: name.map(|n| n.into()), + num_tasks, + progress: progress.iter().map(|cid| cid.to_string()).collect(), + progress_count, + } + } +} + +impl DagJson for SentWorkflowInfo {} + +impl From for Ipld { + fn from(notification: SentWorkflowInfo) -> Self { + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + (REQUESTOR_KEY.into(), notification.requestor.into()), + (CID_KEY.into(), notification.cid.into()), + ( + NAME_KEY.into(), + notification + .name + .map(|peer_id| peer_id.into()) + .unwrap_or(Ipld::Null), + ), + (NUM_TASKS_KEY.into(), notification.num_tasks.into()), + ( + PROGRESS_KEY.into(), + Ipld::List( + notification + .progress + .iter() + .map(|cid| Ipld::String(cid.to_string())) + .collect(), + ), + ), + ( + PROGRESS_COUNT_KEY.into(), + notification.progress_count.into(), + ), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for SentWorkflowInfo { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let requestor = from_ipld( + map.get(REQUESTOR_KEY) + .ok_or_else(|| anyhow!("missing {REQUESTOR_KEY}"))? + .to_owned(), + )?; + + let cid = from_ipld( + map.get(CID_KEY) + .ok_or_else(|| anyhow!("missing {CID_KEY}"))? + .to_owned(), + )?; + + let name = map + .get(NAME_KEY) + .and_then(|ipld| match ipld { + Ipld::Null => None, + ipld => Some(ipld), + }) + .and_then(|ipld| from_ipld(ipld.to_owned()).ok()); + + let num_tasks = from_ipld( + map.get(NUM_TASKS_KEY) + .ok_or_else(|| anyhow!("missing {NUM_TASKS_KEY}"))? + .to_owned(), + )?; + + let progress = from_ipld::>( + map.get(PROGRESS_KEY) + .ok_or_else(|| anyhow!("missing {PROGRESS_KEY}"))? + .to_owned(), + )?; + + let progress_count = from_ipld( + map.get(PROGRESS_COUNT_KEY) + .ok_or_else(|| anyhow!("missing {PROGRESS_COUNT_KEY}"))? + .to_owned(), + )?; + + Ok(SentWorkflowInfo { + timestamp, + requestor, + cid, + name, + num_tasks, + progress, + progress_count, + }) + } +} + +#[derive(Debug, Clone, Getters, JsonSchema)] +#[schemars(rename = "received_workflow_info")] +pub struct ReceivedWorkflowInfo { + timestamp: i64, + #[schemars(description = "Workflow info provider peer ID")] + provider: Option, + #[schemars(description = "Workflow info CID")] + cid: String, + #[schemars(description = "Optional workflow name")] + name: Option, + #[schemars(description = "Number of tasks in workflow")] + num_tasks: u32, + #[schemars(description = "Completed task CIDs")] + progress: Vec, + #[schemars(description = "Number of workflow tasks completed")] + progress_count: u32, +} + +impl ReceivedWorkflowInfo { + pub(crate) fn new( + provider: Option, + cid: Cid, + name: Option, + num_tasks: u32, + progress: Vec, + progress_count: u32, + ) -> ReceivedWorkflowInfo { + ReceivedWorkflowInfo { + timestamp: Utc::now().timestamp_millis(), + provider: provider.map(|p| p.to_string()), + cid: cid.to_string(), + name: name.map(|n| n.into()), + num_tasks, + progress: progress.iter().map(|cid| cid.to_string()).collect(), + progress_count, + } + } +} + +impl DagJson for ReceivedWorkflowInfo {} + +impl From for Ipld { + fn from(notification: ReceivedWorkflowInfo) -> Self { + let map: BTreeMap = BTreeMap::from([ + (TIMESTAMP_KEY.into(), notification.timestamp.into()), + ( + PROVIDER_KEY.into(), + notification + .provider + .map(|peer_id| peer_id.into()) + .unwrap_or(Ipld::Null), + ), + (CID_KEY.into(), notification.cid.into()), + ( + NAME_KEY.into(), + notification + .name + .map(|peer_id| peer_id.into()) + .unwrap_or(Ipld::Null), + ), + (NUM_TASKS_KEY.into(), notification.num_tasks.into()), + ( + PROGRESS_KEY.into(), + Ipld::List( + notification + .progress + .iter() + .map(|cid| Ipld::String(cid.to_string())) + .collect(), + ), + ), + ( + PROGRESS_COUNT_KEY.into(), + notification.progress_count.into(), + ), + ]); + + Ipld::Map(map) + } +} + +impl TryFrom for ReceivedWorkflowInfo { + type Error = anyhow::Error; + + fn try_from(ipld: Ipld) -> Result { + let map = from_ipld::>(ipld)?; + + let timestamp = from_ipld( + map.get(TIMESTAMP_KEY) + .ok_or_else(|| anyhow!("missing {TIMESTAMP_KEY}"))? + .to_owned(), + )?; + + let provider = map + .get(PROVIDER_KEY) + .and_then(|ipld| match ipld { + Ipld::Null => None, + ipld => Some(ipld), + }) + .and_then(|ipld| from_ipld(ipld.to_owned()).ok()); + + let cid = from_ipld( + map.get(CID_KEY) + .ok_or_else(|| anyhow!("missing {CID_KEY}"))? + .to_owned(), + )?; + + let name = map + .get(NAME_KEY) + .and_then(|ipld| match ipld { + Ipld::Null => None, + ipld => Some(ipld), + }) + .and_then(|ipld| from_ipld(ipld.to_owned()).ok()); + + let num_tasks = from_ipld( + map.get(NUM_TASKS_KEY) + .ok_or_else(|| anyhow!("missing {NUM_TASKS_KEY}"))? + .to_owned(), + )?; + + let progress = from_ipld::>( + map.get(PROGRESS_KEY) + .ok_or_else(|| anyhow!("missing {PROGRESS_KEY}"))? + .to_owned(), + )?; + + let progress_count = from_ipld( + map.get(PROGRESS_COUNT_KEY) + .ok_or_else(|| anyhow!("missing {PROGRESS_COUNT_KEY}"))? + .to_owned(), + )?; + + Ok(ReceivedWorkflowInfo { + timestamp, + provider, + cid, + name, + num_tasks, + progress, + progress_count, + }) + } +} diff --git a/homestar-runtime/src/event_handler/notification/receipt.rs b/homestar-runtime/src/event_handler/notification/receipt.rs index 770d223e..44ee4b10 100644 --- a/homestar-runtime/src/event_handler/notification/receipt.rs +++ b/homestar-runtime/src/event_handler/notification/receipt.rs @@ -1,11 +1,25 @@ //! Notification receipts. -use homestar_invocation::{ipld::DagJson, Receipt}; +use const_format::formatcp; +use homestar_invocation::{ + ipld::{schema, DagJson}, + Receipt, +}; use libipld::{ipld, Cid, Ipld}; +use schemars::{ + gen::SchemaGenerator, + schema::{InstanceType, Metadata, ObjectValidation, Schema, SchemaObject, SingleOrVec}, + JsonSchema, +}; +use std::{ + borrow::Cow, + collections::{BTreeMap, BTreeSet}, + module_path, +}; /// A [Receipt] that is sent out for websocket notifications. #[derive(Debug, Clone, PartialEq)] -pub(crate) struct ReceiptNotification(Ipld); +pub struct ReceiptNotification(Ipld); impl ReceiptNotification { /// Obtain a reference to the inner Ipld value. @@ -45,3 +59,67 @@ impl From for ReceiptNotification { ReceiptNotification(ipld) } } + +impl JsonSchema for ReceiptNotification { + fn schema_name() -> String { + "receipt_notification".to_owned() + } + + fn schema_id() -> Cow<'static, str> { + Cow::Borrowed(formatcp!("{}::ReceiptNotification", module_path!())) + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + let metadata_schema = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), + metadata: Some(Box::new(Metadata { + title: Some("Metadata".to_string()), + description: Some("Workflow metadata to contextualize the receipt".to_string()), + ..Default::default() + })), + object: Some(Box::new(ObjectValidation { + properties: BTreeMap::from([ + ("name".to_owned(), ::json_schema(gen)), + ("replayed".to_owned(), ::json_schema(gen)), + ( + "workflow".to_owned(), + gen.subschema_for::(), + ), + ]), + required: BTreeSet::from([ + "name".to_string(), + "receipt".to_string(), + "receipt_cid".to_string(), + ]), + ..Default::default() + })), + ..Default::default() + }; + + let schema = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), + metadata: Some(Box::new(Metadata { + title: Some("Receipt notification".to_string()), + description: Some( + "A receipt notification associated with a running workflow".to_string(), + ), + ..Default::default() + })), + object: Some(Box::new(ObjectValidation { + properties: BTreeMap::from([ + ("metadata".to_owned(), Schema::Object(metadata_schema)), + ("receipt".to_owned(), gen.subschema_for::>()), + ( + "receipt_cid".to_owned(), + gen.subschema_for::(), + ), + ]), + required: BTreeSet::from(["receipt".to_string(), "receipt_cid".to_string()]), + ..Default::default() + })), + ..Default::default() + }; + + schema.into() + } +} diff --git a/homestar-runtime/src/event_handler/notification/swarm.rs b/homestar-runtime/src/event_handler/notification/swarm.rs deleted file mode 100644 index 517fe3ba..00000000 --- a/homestar-runtime/src/event_handler/notification/swarm.rs +++ /dev/null @@ -1,108 +0,0 @@ -// Notification types for [swarm] events. -// -// [swarm]: libp2p_swarm::Swarm - -use anyhow::anyhow; -use serde::{Deserialize, Serialize}; -use std::{fmt, str::FromStr}; - -// Swarm notification types sent to clients -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub(crate) enum SwarmNotification { - ConnnectionEstablished, - ConnnectionClosed, - ListeningOn, - OutgoingConnectionError, - IncomingConnectionError, - PublishedReceiptPubsub, - ReceivedReceiptPubsub, - GotReceiptDht, - PutReceiptDht, - GotWorkflowInfoDht, - PutWorkflowInfoDht, - ReceiptQuorumSuccess, - ReceiptQuorumFailure, - WorkflowInfoQuorumSuccess, - WorkflowInfoQuorumFailure, - SentWorkflowInfo, - ReceivedWorkflowInfo, -} - -impl fmt::Display for SwarmNotification { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match *self { - SwarmNotification::ConnnectionEstablished => write!(f, "connectionEstablished"), - SwarmNotification::ConnnectionClosed => write!(f, "connectionClosed"), - SwarmNotification::ListeningOn => write!(f, "listeningOn"), - SwarmNotification::OutgoingConnectionError => { - write!(f, "outgoingConnectionError") - } - SwarmNotification::IncomingConnectionError => { - write!(f, "incomingConnectionError") - } - SwarmNotification::ReceivedReceiptPubsub => { - write!(f, "receivedReceiptPubsub") - } - SwarmNotification::PublishedReceiptPubsub => { - write!(f, "publishedReceiptPubsub") - } - SwarmNotification::PutReceiptDht => { - write!(f, "putReceiptDht") - } - SwarmNotification::GotReceiptDht => { - write!(f, "gotReceiptDht") - } - SwarmNotification::PutWorkflowInfoDht => { - write!(f, "putWorkflowInfoDht") - } - SwarmNotification::GotWorkflowInfoDht => { - write!(f, "gotWorkflowInfoDht") - } - SwarmNotification::ReceiptQuorumSuccess => { - write!(f, "receiptQuorumSuccess") - } - SwarmNotification::ReceiptQuorumFailure => { - write!(f, "receiptQuorumFailure") - } - SwarmNotification::WorkflowInfoQuorumSuccess => { - write!(f, "workflowInfoQuorumSuccess") - } - SwarmNotification::WorkflowInfoQuorumFailure => { - write!(f, "workflowInfoQuorumFailure") - } - SwarmNotification::SentWorkflowInfo => { - write!(f, "sentWorkflowInfo") - } - SwarmNotification::ReceivedWorkflowInfo => { - write!(f, "receivedWorkflowInfo") - } - } - } -} - -impl FromStr for SwarmNotification { - type Err = anyhow::Error; - - fn from_str(ty: &str) -> Result { - match ty { - "connectionEstablished" => Ok(Self::ConnnectionEstablished), - "connectionClosed" => Ok(Self::ConnnectionClosed), - "listeningOn" => Ok(Self::ListeningOn), - "outgoingConnectionError" => Ok(Self::OutgoingConnectionError), - "incomingConnectionError" => Ok(Self::IncomingConnectionError), - "receivedReceiptPubsub" => Ok(Self::ReceivedReceiptPubsub), - "publishedReceiptPubsub" => Ok(Self::PublishedReceiptPubsub), - "putReciptDht" => Ok(Self::PutReceiptDht), - "gotReceiptDht" => Ok(Self::GotReceiptDht), - "putWorkflowInfoDht" => Ok(Self::PutWorkflowInfoDht), - "gotWorkflowInfoDht" => Ok(Self::GotWorkflowInfoDht), - "receiptQuorumSuccess" => Ok(Self::ReceiptQuorumSuccess), - "receiptQuorumFailure" => Ok(Self::ReceiptQuorumFailure), - "workflowInfoQuorumSuccess" => Ok(Self::WorkflowInfoQuorumSuccess), - "workflowInfoQuorumFailure" => Ok(Self::WorkflowInfoQuorumFailure), - "sentWorkflowInfo" => Ok(Self::SentWorkflowInfo), - "receivedWorkflowInfo" => Ok(Self::ReceivedWorkflowInfo), - _ => Err(anyhow!("Missing swarm notification type: {}", ty)), - } - } -} diff --git a/homestar-runtime/src/event_handler/swarm_event.rs b/homestar-runtime/src/event_handler/swarm_event.rs index c5cc0982..9e04dfa6 100644 --- a/homestar-runtime/src/event_handler/swarm_event.rs +++ b/homestar-runtime/src/event_handler/swarm_event.rs @@ -2,7 +2,7 @@ use super::EventHandler; #[cfg(feature = "websocket-notify")] -use crate::event_handler::notification::{self, EventNotificationTyp, SwarmNotification}; +use crate::event_handler::notification::{self, NetworkNotification}; #[cfg(feature = "ipfs")] use crate::network::IpfsCli; use crate::{ @@ -26,7 +26,7 @@ use anyhow::{anyhow, Result}; use async_trait::async_trait; use libipld::Cid; #[cfg(feature = "websocket-notify")] -use libipld::Ipld; +use libp2p::Multiaddr; use libp2p::{ gossipsub, identify, kad, kad::{AddProviderOk, BootstrapOk, GetProvidersOk, GetRecordOk, PutRecordOk, QueryResult}, @@ -38,7 +38,7 @@ use libp2p::{ PeerId, StreamProtocol, }; #[cfg(feature = "websocket-notify")] -use maplit::btreemap; +use std::collections::BTreeMap; use std::collections::{HashMap, HashSet}; use tracing::{debug, error, info, warn}; @@ -74,9 +74,6 @@ pub(crate) enum FoundEvent { pub(crate) struct ReceiptEvent { pub(crate) peer_id: Option, pub(crate) receipt: Receipt, - #[cfg(feature = "websocket-notify")] - #[cfg_attr(docsrs, doc(cfg(feature = "websocket-notify")))] - pub(crate) notification_type: EventNotificationTyp, } /// [FoundEvent] variant for workflow info found on the DHT. @@ -86,7 +83,7 @@ pub(crate) struct WorkflowInfoEvent { pub(crate) workflow_info: workflow::Info, #[cfg(feature = "websocket-notify")] #[cfg_attr(docsrs, doc(cfg(feature = "websocket-notify")))] - pub(crate) notification_type: EventNotificationTyp, + pub(crate) workflow_source: notification::WorkflowInfoSource, } #[async_trait] @@ -304,6 +301,25 @@ async fn handle_swarm_event( } } + #[cfg(feature = "websocket-notify")] + notification::emit_network_event( + event_handler.ws_evt_sender(), + NetworkNotification::DiscoveredRendezvous( + notification::DiscoveredRendezvous::new( + rendezvous_node, + registrations + .iter() + .map(|registration| { + ( + registration.record.peer_id(), + registration.record.addresses().to_owned(), + ) + }) + .collect::>>(), + ), + ), + ); + // Discover peers again at discovery interval event_handler .cache @@ -358,6 +374,14 @@ async fn handle_swarm_event( "registered self with rendezvous node" ); + #[cfg(feature = "websocket-notify")] + notification::emit_network_event( + event_handler.ws_evt_sender(), + NetworkNotification::RegisteredRendezvous( + notification::RegisteredRendezvous::new(rendezvous_node), + ), + ); + event_handler .cache .insert( @@ -415,12 +439,22 @@ async fn handle_swarm_event( } SwarmEvent::Behaviour(ComposedEvent::RendezvousServer(rendezvous_server_event)) => { match rendezvous_server_event { - rendezvous::server::Event::DiscoverServed { enquirer, .. } => debug!( - subject = "libp2p.rendezvous.server.discover", - category = "handle_swarm_event", - peer_id = enquirer.to_string(), - "served rendezvous discover request to peer" - ), + rendezvous::server::Event::DiscoverServed { enquirer, .. } => { + debug!( + subject = "libp2p.rendezvous.server.discover", + category = "handle_swarm_event", + peer_id = enquirer.to_string(), + "served rendezvous discover request to peer" + ); + + #[cfg(feature = "websocket-notify")] + notification::emit_network_event( + event_handler.ws_evt_sender(), + NetworkNotification::DiscoverServedRendezvous( + notification::DiscoverServedRendezvous::new(enquirer), + ), + ); + } rendezvous::server::Event::DiscoverNotServed { enquirer, error } => { warn!(subject = "libp2p.rendezvous.server.discover.err", category = "handle_swarm_event", @@ -428,13 +462,25 @@ async fn handle_swarm_event( err=?error, "did not serve rendezvous discover request") } - rendezvous::server::Event::PeerRegistered { peer, .. } => { + rendezvous::server::Event::PeerRegistered { peer, registration } => { debug!( subject = "libp2p.rendezvous.server.peer_registered", category = "handle_swarm_event", peer_id = peer.to_string(), + addresses = ?registration.record.addresses(), "registered peer through rendezvous" - ) + ); + + #[cfg(feature = "websocket-notify")] + notification::emit_network_event( + event_handler.ws_evt_sender(), + NetworkNotification::PeerRegisteredRendezvous( + notification::PeerRegisteredRendezvous::new( + peer, + registration.record.addresses().to_owned(), + ), + ), + ); } rendezvous::server::Event::PeerNotRegistered { peer, @@ -486,17 +532,16 @@ async fn handle_swarm_event( .map(|conn| Db::store_receipt(receipt.clone(), conn)); #[cfg(feature = "websocket-notify")] - notification::emit_event( + notification::emit_network_event( event_handler.ws_evt_sender(), - EventNotificationTyp::SwarmNotification( - SwarmNotification::ReceivedReceiptPubsub, + NetworkNotification::ReceivedReceiptPubsub( + notification::ReceivedReceiptPubsub::new( + propagation_source, + receipt.cid(), + receipt.ran(), + ), ), - btreemap! { - "publisher" => Ipld::String(propagation_source.to_string()), - "cid" => Ipld::String(receipt.cid().to_string()), - "ran" => Ipld::String(receipt.ran().to_string()) - }, - ); + ) } Err(err) => debug!(subject = "libp2p.gossipsub.err", category = "handle_swarm_event", @@ -592,11 +637,6 @@ async fn handle_swarm_event( FoundEvent::Receipt(ReceiptEvent { peer_id, receipt: receipt.clone(), - #[cfg(feature = "websocket-notify")] - notification_type: - EventNotificationTyp::SwarmNotification( - SwarmNotification::GotReceiptDht, - ), }), )); @@ -624,10 +664,7 @@ async fn handle_swarm_event( peer_id, workflow_info: workflow_info.clone(), #[cfg(feature = "websocket-notify")] - notification_type: - EventNotificationTyp::SwarmNotification( - SwarmNotification::GotWorkflowInfoDht, - ), + workflow_source: notification::WorkflowInfoSource::Dht, }), )); @@ -726,25 +763,23 @@ async fn handle_swarm_event( #[cfg(feature = "websocket-notify")] match key.capsule_tag { - CapsuleTag::Receipt => notification::emit_event( + CapsuleTag::Receipt => notification::emit_network_event( event_handler.ws_evt_sender(), - EventNotificationTyp::SwarmNotification( - SwarmNotification::ReceiptQuorumSuccess, + NetworkNotification::ReceiptQuorumSuccessDht( + notification::ReceiptQuorumSuccessDht::new( + key.cid, + event_handler.quorum.receipt, + ), ), - btreemap! { - "cid" => Ipld::String(key.cid.to_string()), - "quorum" => Ipld::Integer(event_handler.quorum.receipt as i128), - }, ), - CapsuleTag::Workflow => notification::emit_event( + CapsuleTag::Workflow => notification::emit_network_event( event_handler.ws_evt_sender(), - EventNotificationTyp::SwarmNotification( - SwarmNotification::WorkflowInfoQuorumSuccess, + NetworkNotification::WorkflowInfoQuorumSuccessDht( + notification::WorkflowInfoQuorumSuccessDht::new( + key.cid, + event_handler.quorum.workflow, + ), ), - btreemap! { - "cid" => Ipld::String(key.cid.to_string()), - "quorum" => Ipld::Integer(event_handler.quorum.workflow as i128), - }, ), } } @@ -768,29 +803,27 @@ async fn handle_swarm_event( #[cfg(feature = "websocket-notify")] if let kad::PutRecordError::QuorumFailed { success, .. } = err { match key.capsule_tag { - CapsuleTag::Receipt => notification::emit_event( + CapsuleTag::Receipt => notification::emit_network_event( event_handler.ws_evt_sender(), - EventNotificationTyp::SwarmNotification( - SwarmNotification::ReceiptQuorumFailure, + NetworkNotification::ReceiptQuorumFailureDht( + notification::ReceiptQuorumFailureDht::new( + key.cid, + event_handler.quorum.receipt, + event_handler.connections.peers.len(), + success, + ), ), - btreemap! { - "cid" => Ipld::String(key.cid.to_string()), - "quorum" => Ipld::Integer(event_handler.quorum.receipt as i128), - "connectedPeers" => Ipld::Integer(event_handler.connections.peers.len() as i128), - "storedToPeers" => Ipld::List(success.iter().map(|cid| Ipld::String(cid.to_string())).collect()) - }, ), - CapsuleTag::Workflow => notification::emit_event( + CapsuleTag::Workflow => notification::emit_network_event( event_handler.ws_evt_sender(), - EventNotificationTyp::SwarmNotification( - SwarmNotification::WorkflowInfoQuorumFailure, + NetworkNotification::WorkflowInfoQuorumFailureDht( + notification::WorkflowInfoQuorumFailureDht::new( + key.cid, + event_handler.quorum.workflow, + event_handler.connections.peers.len(), + success, + ), ), - btreemap! { - "cid" => Ipld::String(key.cid.to_string()), - "quorum" => Ipld::Integer(event_handler.quorum.workflow as i128), - "connectedPeers" => Ipld::Integer(event_handler.connections.peers.len() as i128), - "storedToPeers" => Ipld::List(success.iter().map(|cid| Ipld::String(cid.to_string())).collect()) - }, ), } } @@ -889,19 +922,18 @@ async fn handle_swarm_event( ); #[cfg(feature = "websocket-notify")] - notification::emit_event( + notification::emit_network_event( event_handler.ws_evt_sender(), - EventNotificationTyp::SwarmNotification( - SwarmNotification::SentWorkflowInfo, + NetworkNotification::SentWorkflowInfo( + notification::SentWorkflowInfo::new( + peer, + workflow_info.cid(), + workflow_info.name, + workflow_info.num_tasks, + workflow_info.progress, + workflow_info.progress_count, + ), ), - btreemap! { - "requestor" => Ipld::String(peer.to_string()), - "cid" => Ipld::String(workflow_info.cid().to_string()), - "name" => workflow_info.name.as_ref().map_or(Ipld::Null, |name| Ipld::String(name.to_string())), - "numTasks" => Ipld::Integer(workflow_info.num_tasks as i128), - "progress" => Ipld::List(workflow_info.progress.iter().map(|cid| Ipld::String(cid.to_string())).collect()), - "progressCount" => Ipld::Integer(workflow_info.progress_count as i128), - }, ) } else { let _ = event_handler @@ -967,9 +999,8 @@ async fn handle_swarm_event( peer_id, workflow_info: workflow_info.clone(), #[cfg(feature = "websocket-notify")] - notification_type: EventNotificationTyp::SwarmNotification( - SwarmNotification::ReceivedWorkflowInfo, - ), + workflow_source: + notification::WorkflowInfoSource::RequestResponse, }), )); @@ -1020,7 +1051,7 @@ async fn handle_swarm_event( } SwarmEvent::Behaviour(ComposedEvent::Mdns(mdns::Event::Discovered(list))) => { - for (peer_id, multiaddr) in list { + for (peer_id, multiaddr) in list.clone() { debug!( subject = "libp2p.mdns.discovered", category = "handle_swarm_event", @@ -1045,6 +1076,16 @@ async fn handle_swarm_event( ) } } + + #[cfg(feature = "websocket-notify")] + notification::emit_network_event( + event_handler.ws_evt_sender(), + NetworkNotification::DiscoveredMdns(notification::DiscoveredMdns::new( + list.iter() + .map(|peer| (peer.0, peer.1.to_owned())) + .collect::>(), + )), + ) } SwarmEvent::Behaviour(ComposedEvent::Mdns(mdns::Event::Expired(list))) => { let behaviour = event_handler.swarm.behaviour_mut(); @@ -1081,13 +1122,11 @@ async fn handle_swarm_event( ); #[cfg(feature = "websocket-notify")] - notification::emit_event( + notification::emit_network_event( event_handler.ws_evt_sender(), - EventNotificationTyp::SwarmNotification(SwarmNotification::ListeningOn), - btreemap! { - "peerId" => Ipld::String(local_peer.to_string()), - "address" => Ipld::String(address.to_string()) - }, + NetworkNotification::NewListenAddr(notification::NewListenAddr::new( + local_peer, address, + )), ); // Init bootstrapping of the DHT @@ -1154,14 +1193,15 @@ async fn handle_swarm_event( .insert(peer_id, endpoint.clone()); #[cfg(feature = "websocket-notify")] - notification::emit_event( + notification::emit_network_event( event_handler.ws_evt_sender(), - EventNotificationTyp::SwarmNotification(SwarmNotification::ConnnectionEstablished), - btreemap! { - "peerId" => Ipld::String(peer_id.to_string()), - "address" => Ipld::String(endpoint.get_remote_address().to_string()) - }, - ); + NetworkNotification::ConnnectionEstablished( + notification::ConnectionEstablished::new( + peer_id, + endpoint.get_remote_address().to_owned(), + ), + ), + ) } SwarmEvent::ConnectionClosed { peer_id, @@ -1233,14 +1273,13 @@ async fn handle_swarm_event( } #[cfg(feature = "websocket-notify")] - notification::emit_event( + notification::emit_network_event( event_handler.ws_evt_sender(), - EventNotificationTyp::SwarmNotification(SwarmNotification::ConnnectionClosed), - btreemap! { - "peerId" => Ipld::String(peer_id.to_string()), - "address" => Ipld::String(endpoint.get_remote_address().to_string()) - }, - ); + NetworkNotification::ConnnectionClosed(notification::ConnectionClosed::new( + peer_id, + endpoint.get_remote_address().to_owned(), + )), + ) } SwarmEvent::OutgoingConnectionError { connection_id, @@ -1300,14 +1339,12 @@ async fn handle_swarm_event( } #[cfg(feature = "websocket-notify")] - notification::emit_event( + notification::emit_network_event( event_handler.ws_evt_sender(), - EventNotificationTyp::SwarmNotification(SwarmNotification::OutgoingConnectionError), - btreemap! { - "peerId" => peer_id.map_or(Ipld::Null, |p| Ipld::String(p.to_string())), - "error" => Ipld::String(error.to_string()) - }, - ); + NetworkNotification::OutgoingConnectionError( + notification::OutgoingConnectionError::new(peer_id, error), + ), + ) } SwarmEvent::IncomingConnectionError { connection_id, @@ -1324,13 +1361,12 @@ async fn handle_swarm_event( "incoming connection error"); #[cfg(feature = "websocket-notify")] - notification::emit_event( + notification::emit_network_event( event_handler.ws_evt_sender(), - EventNotificationTyp::SwarmNotification(SwarmNotification::IncomingConnectionError), - btreemap! { - "error" => Ipld::String(error.to_string()) - }, - ); + NetworkNotification::IncomingConnectionError( + notification::IncomingConnectionError::new(error), + ), + ) } SwarmEvent::ListenerError { listener_id, error } => { error!(subject = "libp2p.listener.err", diff --git a/homestar-runtime/src/lib.rs b/homestar-runtime/src/lib.rs index 6a431ea1..fa01e995 100644 --- a/homestar-runtime/src/lib.rs +++ b/homestar-runtime/src/lib.rs @@ -68,14 +68,17 @@ pub mod test_utils; mod worker; pub mod workflow; -pub use db::Db; +pub use db::{utils::Health, Db}; pub(crate) mod libp2p; pub use logger::*; pub(crate) mod metrics; +#[cfg(feature = "websocket-notify")] +pub use event_handler::notification::{network::NetworkNotification, receipt::ReceiptNotification}; #[allow(unused_imports)] pub(crate) use event_handler::EventHandler; +pub use network::webserver::PrometheusData; pub use receipt::{Receipt, RECEIPT_TAG, VERSION_KEY}; -pub use runner::Runner; +pub use runner::{NodeInfo, Runner}; pub(crate) use scheduler::TaskScheduler; pub use settings::Settings; pub(crate) use worker::Worker; diff --git a/homestar-runtime/src/network/webserver.rs b/homestar-runtime/src/network/webserver.rs index 3ddd4585..0f69f9ed 100644 --- a/homestar-runtime/src/network/webserver.rs +++ b/homestar-runtime/src/network/webserver.rs @@ -43,6 +43,7 @@ mod rpc; #[cfg(feature = "websocket-notify")] pub(crate) use notifier::Notifier; +pub use prom::PrometheusData; #[cfg(feature = "websocket-notify")] pub(crate) use rpc::SUBSCRIBE_NETWORK_EVENTS_ENDPOINT; use rpc::{Context, JsonRpc}; @@ -250,6 +251,10 @@ impl Server { rpc::METRICS_ENDPOINT, )?) .layer(ProxyGetRequestLayer::new("/node", rpc::NODE_INFO_ENDPOINT)?) + .layer(ProxyGetRequestLayer::new( + "/rpc_discover", + rpc::DISCOVER_ENDPOINT, + )?) .layer(cors) .layer(SetSensitiveRequestHeadersLayer::new(once(AUTHORIZATION))) .timeout(self.webserver_timeout); diff --git a/homestar-runtime/src/network/webserver/prom.rs b/homestar-runtime/src/network/webserver/prom.rs index 45cd228e..b6471424 100644 --- a/homestar-runtime/src/network/webserver/prom.rs +++ b/homestar-runtime/src/network/webserver/prom.rs @@ -2,11 +2,22 @@ /// /// Influenced by https://crates.io/crates/prom2jsonrs/0.1.0. use anyhow::{anyhow, bail, Result}; +use const_format::formatcp; use dyn_clone::DynClone; use once_cell::sync::Lazy; use regex::Regex; +use schemars::{ + gen::SchemaGenerator, + schema::{InstanceType, Metadata, ObjectValidation, Schema, SchemaObject, SingleOrVec}, + JsonSchema, +}; use serde::{Deserialize, Serialize}; -use std::collections::HashMap; +use serde_json::json; +use std::{ + borrow::Cow, + collections::{BTreeMap, BTreeSet, HashMap}, + module_path, +}; #[allow(dead_code)] const HISTOGRAM_TYPE: &str = "HISTOGRAM"; @@ -38,9 +49,11 @@ static MULTI_NEWLINE: Lazy<&Regex> = Lazy::new(|| { type Labels = HashMap; type Value = String; -#[derive(Clone, Serialize)] +#[derive(Clone, Serialize, JsonSchema)] /// A parsed representation of the prometheus metrics data -pub(crate) struct PrometheusData { +#[allow(missing_debug_implementations)] +#[schemars(title = "Metrics data", description = "Prometheus metrics data")] +pub struct PrometheusData { metrics: Vec, } @@ -76,6 +89,45 @@ struct Metric { value: Value, } +impl JsonSchema for Metric { + fn schema_name() -> String { + "gauge".to_owned() + } + + fn schema_id() -> Cow<'static, str> { + Cow::Borrowed(formatcp!("{}::Metric", module_path!())) + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + let type_schema = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::String.into())), + const_value: Some(json!("metric")), + ..Default::default() + }; + + let schema = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), + metadata: Some(Box::new(Metadata { + title: Some("Gauge data".to_string()), + description: Some("A gauge metric".to_string()), + ..Default::default() + })), + object: Some(Box::new(ObjectValidation { + properties: BTreeMap::from([ + ("type".to_string(), Schema::Object(type_schema)), + ("labels".to_string(), >::json_schema(gen)), + ("value".to_string(), ::json_schema(gen)), + ]), + required: BTreeSet::from(["type".to_string(), "value".to_string()]), + ..Default::default() + })), + ..Default::default() + }; + + schema.into() + } +} + #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] struct Summary { labels: Option, @@ -92,8 +144,9 @@ struct Histogram { sum: Value, } -#[derive(Debug, Clone, PartialEq, Serialize)] +#[derive(Debug, Clone, PartialEq, Serialize, JsonSchema)] #[serde(rename_all = "lowercase")] +#[schemars(title = "Metric type")] enum MetricType { Gauge, Histogram, @@ -108,6 +161,88 @@ struct MetricFamily { data: Vec>, } +impl JsonSchema for MetricFamily { + fn schema_name() -> String { + "metric".to_owned() + } + + fn schema_id() -> Cow<'static, str> { + Cow::Borrowed(formatcp!("{}::MetricFamily", module_path!())) + } + + fn json_schema(gen: &mut SchemaGenerator) -> Schema { + struct DataConditional { + if_schema: Schema, + then_schema: Schema, + else_schema: Schema, + } + + fn data_conditional(gen: &mut SchemaGenerator) -> DataConditional { + let if_schema = SchemaObject { + instance_type: None, + object: Some(Box::new(ObjectValidation { + properties: BTreeMap::from([( + "metric_type".to_owned(), + Schema::Object(SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::String.into())), + const_value: Some(json!("gauge")), + ..Default::default() + }), + )]), + ..Default::default() + })), + ..Default::default() + }; + + let then_schema = SchemaObject { + instance_type: None, + object: Some(Box::new(ObjectValidation { + properties: BTreeMap::from([("data".to_string(), ::json_schema(gen))]), + ..Default::default() + })), + ..Default::default() + }; + + DataConditional { + if_schema: Schema::Object(if_schema), + then_schema: Schema::Object(then_schema), + else_schema: Schema::Bool(false), + } + } + + let mut schema = SchemaObject { + instance_type: Some(SingleOrVec::Single(InstanceType::Object.into())), + metadata: Some(Box::new(Metadata { + title: Some("Metric family".to_string()), + description: Some("A prometheus gauge, summary, or histogram metric".to_string()), + ..Default::default() + })), + object: Some(Box::new(ObjectValidation { + properties: BTreeMap::from([ + ("metric_type".to_string(), ::json_schema(gen)), + ("metric_name".to_string(), ::json_schema(gen)), + ("help".to_string(), ::json_schema(gen)), + ]), + required: BTreeSet::from([ + "metric_type".to_string(), + "metric_name".to_string(), + "help".to_string(), + "data".to_string(), + ]), + ..Default::default() + })), + ..Default::default() + }; + + let data = data_conditional(gen); + schema.subschemas().if_schema = Some(Box::new(data.if_schema)); + schema.subschemas().then_schema = Some(Box::new(data.then_schema)); + schema.subschemas().else_schema = Some(Box::new(data.else_schema)); + + schema.into() + } +} + #[typetag::serde(tag = "type")] trait MetricLike: DynClone { fn parse_from_string(s: &str) -> Result<(Value, Option)> diff --git a/homestar-runtime/src/network/webserver/rpc.rs b/homestar-runtime/src/network/webserver/rpc.rs index 2832733b..4cdc1512 100644 --- a/homestar-runtime/src/network/webserver/rpc.rs +++ b/homestar-runtime/src/network/webserver/rpc.rs @@ -6,7 +6,10 @@ use super::notifier::{self, Header, Notifier, SubscriptionTyp}; use super::{listener, prom::PrometheusData, Message}; #[cfg(feature = "websocket-notify")] use crate::channel::AsyncChannel; -use crate::{db::Database, runner::WsSender}; +use crate::{ + db::Database, + runner::{NodeInfo, WsSender}, +}; #[cfg(feature = "websocket-notify")] use anyhow::anyhow; use anyhow::Result; @@ -41,6 +44,11 @@ use tracing::debug; #[allow(unused_imports)] use tracing::{error, warn}; +/// OpenRPC API document +const API_SCHEMA_DOC: &str = include_str!("../../../schemas/api.json"); + +/// OpenRPC API discovery endpoint. +pub(crate) const DISCOVER_ENDPOINT: &str = "rpc_discover"; /// Health endpoint. pub(crate) const HEALTH_ENDPOINT: &str = "health"; /// Metrics endpoint for prometheus / openmetrics polling. @@ -152,11 +160,13 @@ where async fn register(ctx: Context) -> Result>> { let mut module = RpcModule::new(ctx); + module.register_method(DISCOVER_ENDPOINT, |_, _| serde_json::json!(API_SCHEMA_DOC))?; + module.register_async_method(HEALTH_ENDPOINT, |_, ctx| async move { match ctx.db.conn() { Ok(mut conn) => { - if DB::health_check(&mut conn).is_ok() { - Ok(serde_json::json!({"healthy": true})) + if let Ok(health) = DB::health_check(&mut conn) { + Ok(serde_json::json!(health)) } else { Err(internal_err("database query is unreachable".to_string())) } @@ -190,8 +200,7 @@ where .map_err(|err| internal_err(err.to_string()))?; if let Ok(Message::AckNodeInfo((static_info, dyn_info))) = rx.recv_async().await { - Ok(serde_json::json!({ - "nodeInfo": {"static": static_info, "dynamic": dyn_info}})) + Ok(serde_json::json!(NodeInfo::new(static_info, dyn_info))) } else { error!( subject = "call.node", diff --git a/homestar-runtime/src/runner.rs b/homestar-runtime/src/runner.rs index fa287c49..a38e5e67 100644 --- a/homestar-runtime/src/runner.rs +++ b/homestar-runtime/src/runner.rs @@ -47,6 +47,7 @@ pub(crate) mod file; mod nodeinfo; pub mod response; pub(crate) use error::Error; +pub use nodeinfo::NodeInfo; pub(crate) use nodeinfo::{DynamicNodeInfo, StaticNodeInfo}; /// Name of the thread used for the [Runner] / runtime. diff --git a/homestar-runtime/src/runner/nodeinfo.rs b/homestar-runtime/src/runner/nodeinfo.rs index 5b8f1804..64faa3a5 100644 --- a/homestar-runtime/src/runner/nodeinfo.rs +++ b/homestar-runtime/src/runner/nodeinfo.rs @@ -1,14 +1,36 @@ //! Node information. use libp2p::{Multiaddr, PeerId}; +use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use std::{collections::HashMap, fmt}; use tabled::Tabled; +/// Node information. +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[schemars(rename = "node_info")] +pub struct NodeInfo { + /// Static node information available at startup. + #[serde(rename = "static")] + pub(crate) stat: StaticNodeInfo, + /// Dynamic node information available through events + /// at runtime. + pub(crate) dynamic: DynamicNodeInfo, +} + +impl NodeInfo { + /// Create an instance of [NodeInfo]. + pub(crate) fn new(stat: StaticNodeInfo, dynamic: DynamicNodeInfo) -> Self { + Self { stat, dynamic } + } +} + /// Static node information available at startup. -#[derive(Debug, Clone, Serialize, Deserialize, Tabled)] +#[derive(Debug, Clone, Serialize, Deserialize, Tabled, JsonSchema)] +#[schemars(rename = "static")] pub(crate) struct StaticNodeInfo { /// The [PeerId] of a node. + #[schemars(with = "String", description = "The peer ID of the node")] pub(crate) peer_id: PeerId, } @@ -33,11 +55,17 @@ impl StaticNodeInfo { /// Dynamic node information available through events /// at runtime. -#[derive(Debug, Default, Clone, Serialize, Deserialize)] +#[derive(Debug, Default, Clone, Serialize, Deserialize, JsonSchema)] +#[schemars(rename = "dynamic")] pub(crate) struct DynamicNodeInfo { /// Listeners for the node. + #[schemars(with = "Vec", description = "Listen addresses for the node")] pub(crate) listeners: Vec, /// Connections for the node. + #[schemars( + with = "HashMap", + description = "Peers and their addresses that are connected to the node" + )] pub(crate) connections: HashMap, } diff --git a/homestar-runtime/tests/cli.rs b/homestar-runtime/tests/cli.rs index 8752b8a0..6a5e6bfc 100644 --- a/homestar-runtime/tests/cli.rs +++ b/homestar-runtime/tests/cli.rs @@ -125,6 +125,7 @@ fn test_server_integration() -> Result<()> { let config = make_config!(toml); Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .arg("start") .arg("-db") .arg(&proc_info.db_path) @@ -132,6 +133,7 @@ fn test_server_integration() -> Result<()> { .failure(); let homestar_proc = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .arg("start") .arg("-c") .arg(config.filename()) diff --git a/homestar-runtime/tests/network.rs b/homestar-runtime/tests/network.rs index 24d09298..54aaef7c 100644 --- a/homestar-runtime/tests/network.rs +++ b/homestar-runtime/tests/network.rs @@ -1,27 +1,27 @@ use crate::{ make_config, utils::{ - check_for_line_with, kill_homestar, listen_addr, multiaddr, retrieve_output, - wait_for_socket_connection_v6, ChildGuard, ProcInfo, BIN_NAME, ED25519MULTIHASH, - SECP256K1MULTIHASH, + check_for_line_with, kill_homestar, listen_addr, retrieve_output, + wait_for_socket_connection, wait_for_socket_connection_v6, ChildGuard, ProcInfo, BIN_NAME, + ED25519MULTIHASH, SECP256K1MULTIHASH, }, }; use anyhow::Result; -use libp2p::Multiaddr; use once_cell::sync::Lazy; use std::{ path::PathBuf, process::{Command, Stdio}, - time::Duration, }; +#[cfg(feature = "websocket-notify")] +mod connection; #[cfg(all(feature = "websocket-notify", feature = "test-utils"))] mod dht; #[cfg(feature = "websocket-notify")] mod gossip; +#[cfg(feature = "websocket-notify")] mod mdns; #[cfg(feature = "websocket-notify")] -mod notification; mod rendezvous; static BIN: Lazy = Lazy::new(|| assert_cmd::cargo::cargo_bin(BIN_NAME)); @@ -57,6 +57,7 @@ fn test_libp2p_generates_peer_id_integration() -> Result<()> { let config = make_config!(toml); let homestar_proc = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .arg("start") .arg("-c") .arg(config.filename()) @@ -112,6 +113,7 @@ fn test_libp2p_listens_on_address_integration() -> Result<()> { let config = make_config!(toml); let homestar_proc = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .arg("start") .arg("-c") .arg(config.filename()) @@ -173,6 +175,7 @@ fn test_rpc_listens_on_address_integration() -> Result<()> { let config = make_config!(toml); let homestar_proc = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .arg("start") .arg("-c") .arg(config.filename()) @@ -230,6 +233,7 @@ fn test_websocket_listens_on_address_integration() -> Result<()> { let config = make_config!(toml); let homestar_proc = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .arg("start") .arg("-c") .arg(config.filename()) @@ -258,46 +262,37 @@ fn test_websocket_listens_on_address_integration() -> Result<()> { #[test] #[serial_test::parallel] -fn test_libp2p_connect_known_peers_integration() -> Result<()> { - let proc_info1 = ProcInfo::new().unwrap(); - let proc_info2 = ProcInfo::new().unwrap(); - - let rpc_port1 = proc_info1.rpc_port; - let rpc_port2 = proc_info2.rpc_port; - let metrics_port1 = proc_info1.metrics_port; - let metrics_port2 = proc_info2.metrics_port; - let ws_port1 = proc_info1.ws_port; - let ws_port2 = proc_info2.ws_port; - let listen_addr1 = listen_addr(proc_info1.listen_port); - let listen_addr2 = listen_addr(proc_info2.listen_port); - let node_addra = multiaddr(proc_info1.listen_port, ED25519MULTIHASH); - let node_addrb = multiaddr(proc_info2.listen_port, SECP256K1MULTIHASH); - let toml1 = format!( +fn test_node_info_endpoint_integration() -> Result<()> { + let proc_info = ProcInfo::new().unwrap(); + + let rpc_port = proc_info.rpc_port; + let metrics_port = proc_info.metrics_port; + let ws_port = proc_info.ws_port; + let listen_addr = listen_addr(proc_info.listen_port); + + let toml = format!( r#" [node] [node.network.keypair_config] existing = {{ key_type = "ed25519", path = "./fixtures/__testkey_ed25519.pem" }} [node.network.libp2p] - listen_address = "{listen_addr1}" - node_addresses = ["{node_addrb}"] - bootstrap_interval = 1 + listen_address = "{listen_addr}" [node.network.libp2p.mdns] enable = false [node.network.libp2p.rendezvous] enable_client = false [node.network.metrics] - port = {metrics_port1} + port = {metrics_port} [node.network.rpc] - port = {rpc_port1} + port = {rpc_port} [node.network.webserver] - port = {ws_port1} + port = {ws_port} "# ); + let config1 = make_config!(toml); - let config1 = make_config!(toml1); - // Start two nodes configured to listen at 127.0.0.1 each with their own port. - // The nodes are configured to dial each other through the node_addresses config. let homestar_proc1 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -306,193 +301,67 @@ fn test_libp2p_connect_known_peers_integration() -> Result<()> { .arg("-c") .arg(config1.filename()) .arg("--db") - .arg(&proc_info1.db_path) - .stdout(Stdio::piped()) - .spawn() - .unwrap(); - let proc_guard1 = ChildGuard::new(homestar_proc1); - - if wait_for_socket_connection_v6(rpc_port1, 1000).is_err() { - panic!("Homestar server/runtime failed to start in time"); - } - - let toml2 = format!( - r#" - [node] - [node.network.keypair_config] - existing = {{ key_type = "secp256k1", path = "./fixtures/__testkey_secp256k1.der" }} - [node.network.libp2p] - listen_address = "{listen_addr2}" - node_addresses = ["{node_addra}"] - [node.network.libp2p.mdns] - enable = false - [node.network.metrics] - port = {metrics_port2} - [node.network.libp2p.rendezvous] - enable_client = false - [node.network.rpc] - port = {rpc_port2} - [node.network.webserver] - port = {ws_port2} - "# - ); - - let config2 = make_config!(toml2); - - let homestar_proc2 = Command::new(BIN.as_os_str()) - .env( - "RUST_LOG", - "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", - ) - .arg("start") - .arg("-c") - .arg(config2.filename()) - .arg("--db") - .arg(&proc_info2.db_path) + .arg(&proc_info.db_path) .stdout(Stdio::piped()) .spawn() .unwrap(); - let proc_guard2 = ChildGuard::new(homestar_proc2); + let _proc_guard1 = ChildGuard::new(homestar_proc1); - if wait_for_socket_connection_v6(rpc_port2, 1000).is_err() { + if wait_for_socket_connection(ws_port, 1000).is_err() { panic!("Homestar server/runtime failed to start in time"); } tokio_test::block_on(async { // Check node endpoint to match - let http_url = format!("http://localhost:{}", ws_port2); + let http_url = format!("http://localhost:{}", ws_port); let http_resp = reqwest::get(format!("{}/node", http_url)).await.unwrap(); assert_eq!(http_resp.status(), 200); let http_resp = http_resp.json::().await.unwrap(); - assert!(http_resp["nodeInfo"]["dynamic"]["connections"] - .as_object() - .unwrap() - .get(ED25519MULTIHASH) - .unwrap() - .as_str() - .unwrap() - .parse::() - .is_ok()); - let static_info = http_resp["nodeInfo"]["static"].as_object().unwrap(); - let listeners = http_resp["nodeInfo"]["dynamic"]["listeners"] - .as_array() - .unwrap(); - assert_eq!(static_info.get("peer_id").unwrap(), SECP256K1MULTIHASH); - assert_eq!(listeners, &[listen_addr2.to_string()]); + assert_eq!( + http_resp, + serde_json::json!({ + "static": {"peer_id": ED25519MULTIHASH}, + "dynamic": {"listeners": [format!("{listen_addr}")], "connections": {}} + }) + ); }); - // Collect logs for five seconds then kill proceses. - let dead_proc1 = kill_homestar(proc_guard1.take(), Some(Duration::from_secs(5))); - let dead_proc2 = kill_homestar(proc_guard2.take(), Some(Duration::from_secs(5))); - - // Retrieve logs. - let stdout1 = retrieve_output(dead_proc1); - let stdout2 = retrieve_output(dead_proc2); - - // Check that node bootsrapped itself on the 1 second delay. - let bootstrapped = check_for_line_with( - stdout1.clone(), - vec!["successfully bootstrapped node", ED25519MULTIHASH], - ); - - // Check node two was added to the Kademlia table - let two_added_to_dht = check_for_line_with( - stdout1.clone(), - vec![ - "added configured node to kademlia routing table", - SECP256K1MULTIHASH, - ], - ); - - // Check that DHT routing table was updated with node two - let two_in_dht_routing_table = check_for_line_with( - stdout1.clone(), - vec![ - "kademlia routing table updated with peer", - SECP256K1MULTIHASH, - ], - ); - - // Check that node one connected to node two. - let one_connected_to_two = check_for_line_with( - stdout1, - vec!["peer connection established", SECP256K1MULTIHASH], - ); - - assert!(bootstrapped); - assert!(one_connected_to_two); - assert!(two_in_dht_routing_table); - assert!(two_added_to_dht); - - // Check node one was added to the Kademlia table - let one_addded_to_dht = check_for_line_with( - stdout2.clone(), - vec![ - "added configured node to kademlia routing table", - ED25519MULTIHASH, - ], - ); - - // Check that DHT routing table was updated with node one - let one_in_dht_routing_table = check_for_line_with( - stdout2.clone(), - vec!["kademlia routing table updated with peer", ED25519MULTIHASH], - ); - - // Check that node two connected to node one. - let two_connected_to_one = check_for_line_with( - stdout2, - vec!["peer connection established", ED25519MULTIHASH], - ); - - assert!(one_addded_to_dht); - assert!(one_in_dht_routing_table); - assert!(two_connected_to_one); - Ok(()) } #[test] #[serial_test::parallel] -fn test_libp2p_disconnect_known_peers_integration() -> Result<()> { - let proc_info1 = ProcInfo::new().unwrap(); - let proc_info2 = ProcInfo::new().unwrap(); - - let rpc_port1 = proc_info1.rpc_port; - let rpc_port2 = proc_info2.rpc_port; - let metrics_port1 = proc_info1.metrics_port; - let metrics_port2 = proc_info2.metrics_port; - let ws_port1 = proc_info1.ws_port; - let ws_port2 = proc_info2.ws_port; - let listen_addr1 = listen_addr(proc_info1.listen_port); - let listen_addr2 = listen_addr(proc_info2.listen_port); - let node_addra = multiaddr(proc_info1.listen_port, ED25519MULTIHASH); - let node_addrb = multiaddr(proc_info2.listen_port, SECP256K1MULTIHASH); - let toml1 = format!( +fn test_discovery_endpoint_integration() -> Result<()> { + let proc_info = ProcInfo::new().unwrap(); + + let rpc_port = proc_info.rpc_port; + let metrics_port = proc_info.metrics_port; + let ws_port = proc_info.ws_port; + let listen_addr = listen_addr(proc_info.listen_port); + + let toml = format!( r#" [node] [node.network.keypair_config] existing = {{ key_type = "ed25519", path = "./fixtures/__testkey_ed25519.pem" }} [node.network.libp2p] - listen_address = "{listen_addr1}" - node_addresses = ["{node_addrb}"] + listen_address = "{listen_addr}" [node.network.libp2p.mdns] enable = false [node.network.libp2p.rendezvous] enable_client = false [node.network.metrics] - port = {metrics_port1} + port = {metrics_port} [node.network.rpc] - port = {rpc_port1} + port = {rpc_port} [node.network.webserver] - port = {ws_port1} + port = {ws_port} "# ); + let config1 = make_config!(toml); - let config1 = make_config!(toml1); - // Start two nodes configured to listen at 127.0.0.1 each with their own port. - // The nodes are configured to dial each other through the node_addresses config. let homestar_proc1 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -501,81 +370,28 @@ fn test_libp2p_disconnect_known_peers_integration() -> Result<()> { .arg("-c") .arg(config1.filename()) .arg("--db") - .arg(&proc_info1.db_path) - .stdout(Stdio::piped()) - .spawn() - .unwrap(); - let proc_guard1 = ChildGuard::new(homestar_proc1); - - if wait_for_socket_connection_v6(rpc_port1, 1000).is_err() { - panic!("Homestar server/runtime failed to start in time"); - } - - let toml2 = format!( - r#" - [node] - [node.network.keypair_config] - existing = {{ key_type = "secp256k1", path = "./fixtures/__testkey_secp256k1.der" }} - [node.network.libp2p] - listen_address = "{listen_addr2}" - node_addresses = ["{node_addra}"] - [node.network.libp2p.mdns] - enable = false - [node.network.metrics] - port = {metrics_port2} - [node.network.libp2p.rendezvous] - enable_client = false - [node.network.rpc] - port = {rpc_port2} - [node.network.webserver] - port = {ws_port2} - "# - ); - - let config2 = make_config!(toml2); - - let homestar_proc2 = Command::new(BIN.as_os_str()) - .env( - "RUST_LOG", - "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", - ) - .arg("start") - .arg("-c") - .arg(config2.filename()) - .arg("--db") - .arg(proc_info2.db_path.clone()) + .arg(&proc_info.db_path) .stdout(Stdio::piped()) .spawn() .unwrap(); - let proc_guard2 = ChildGuard::new(homestar_proc2); + let _proc_guard1 = ChildGuard::new(homestar_proc1); - if wait_for_socket_connection_v6(rpc_port2, 1000).is_err() { + if wait_for_socket_connection(ws_port, 1000).is_err() { panic!("Homestar server/runtime failed to start in time"); } - // Kill node two after seven seconds. - let _ = kill_homestar(proc_guard2.take(), Some(Duration::from_secs(7))); - - // Collect logs for eight seconds then kill node one. - let dead_proc1 = kill_homestar(proc_guard1.take(), Some(Duration::from_secs(8))); - - // Retrieve logs. - let stdout = retrieve_output(dead_proc1); - - // Check that node two disconnected from node one. - let two_disconnected_from_one = check_for_line_with( - stdout.clone(), - vec!["peer connection closed", SECP256K1MULTIHASH], - ); - - // Check that node two was not removed from the Kademlia table. - let two_removed_from_dht_table = check_for_line_with( - stdout.clone(), - vec!["removed peer from kademlia table", SECP256K1MULTIHASH], - ); + tokio_test::block_on(async { + // Check discovery endpoint to match + let http_url = format!("http://localhost:{}", ws_port); + let http_resp = reqwest::get(format!("{}/rpc_discover", http_url)) + .await + .unwrap(); + assert_eq!(http_resp.status(), 200); + let http_resp = http_resp.json::().await.unwrap(); - assert!(two_disconnected_from_one); - assert!(!two_removed_from_dht_table); + const API_SCHEMA_DOC: &str = include_str!("../schemas/api.json"); + assert_eq!(http_resp, serde_json::json!(API_SCHEMA_DOC)); + }); Ok(()) } @@ -617,6 +433,7 @@ fn test_libp2p_configured_with_known_dns_multiaddr() -> Result<()> { let config = make_config!(toml); let homestar_proc = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .arg("start") .arg("-c") .arg(config.filename()) diff --git a/homestar-runtime/tests/network/notification.rs b/homestar-runtime/tests/network/connection.rs similarity index 78% rename from homestar-runtime/tests/network/notification.rs rename to homestar-runtime/tests/network/connection.rs index 993bf0d5..36f3e2a8 100644 --- a/homestar-runtime/tests/network/notification.rs +++ b/homestar-runtime/tests/network/connection.rs @@ -1,27 +1,20 @@ use crate::{ make_config, utils::{ - kill_homestar, listen_addr, multiaddr, wait_for_socket_connection, ChildGuard, ProcInfo, + check_for_line_with, kill_homestar, listen_addr, multiaddr, retrieve_output, + subscribe_network_events, wait_for_socket_connection, ChildGuard, ProcInfo, TimeoutFutureExt, BIN_NAME, ED25519MULTIHASH, SECP256K1MULTIHASH, }, }; use anyhow::Result; -use jsonrpsee::{ - core::client::{Subscription, SubscriptionClientT}, - rpc_params, - ws_client::WsClientBuilder, -}; use once_cell::sync::Lazy; use std::{ - net::Ipv4Addr, path::PathBuf, process::{Command, Stdio}, time::Duration, }; static BIN: Lazy = Lazy::new(|| assert_cmd::cargo::cargo_bin(BIN_NAME)); -const SUBSCRIBE_NETWORK_EVENTS_ENDPOINT: &str = "subscribe_network_events"; -const UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT: &str = "unsubscribe_network_events"; #[test] #[serial_test::parallel] @@ -63,6 +56,7 @@ fn test_connection_notifications_integration() -> Result<()> { let config1 = make_config!(toml); let homestar_proc1 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -75,30 +69,15 @@ fn test_connection_notifications_integration() -> Result<()> { .stdout(Stdio::piped()) .spawn() .unwrap(); - let _proc_guard1 = ChildGuard::new(homestar_proc1); + let proc_guard1 = ChildGuard::new(homestar_proc1); if wait_for_socket_connection(ws_port1, 1000).is_err() { panic!("Homestar server/runtime failed to start in time"); } - let ws_url = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port1); tokio_test::block_on(async { - tokio_tungstenite::connect_async(ws_url.clone()) - .await - .unwrap(); - - let client = WsClientBuilder::default() - .build(ws_url.clone()) - .await - .unwrap(); - let mut sub: Subscription> = client - .subscribe( - SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - rpc_params![], - UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - ) - .await - .unwrap(); + let mut net_events1 = subscribe_network_events(ws_port1).await; + let sub1 = net_events1.sub(); let toml2 = format!( r#" @@ -123,6 +102,7 @@ fn test_connection_notifications_integration() -> Result<()> { let config2 = make_config!(toml2); let homestar_proc2 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -139,11 +119,11 @@ fn test_connection_notifications_integration() -> Result<()> { // Poll for connection established message loop { - if let Ok(msg) = sub.next().with_timeout(Duration::from_secs(30)).await { + if let Ok(msg) = sub1.next().with_timeout(Duration::from_secs(30)).await { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["type"].as_str().unwrap() == "network:connectionEstablished" { + if json["connection_established"].is_object() { break; } } else { @@ -151,15 +131,15 @@ fn test_connection_notifications_integration() -> Result<()> { } } - let _ = kill_homestar(proc_guard2.take(), None); + let dead_proc2 = kill_homestar(proc_guard2.take(), None); // Poll for connection closed message loop { - if let Ok(msg) = sub.next().with_timeout(Duration::from_secs(30)).await { + if let Ok(msg) = sub1.next().with_timeout(Duration::from_secs(30)).await { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["type"].as_str().unwrap() == "network:connectionClosed" { + if json["connection_closed"].is_object() { break; } } else { @@ -167,20 +147,79 @@ fn test_connection_notifications_integration() -> Result<()> { } } - // Check node endpoint to match - let http_url = format!("http://localhost:{}", ws_port1); - let http_resp = reqwest::get(format!("{}/node", http_url)).await.unwrap(); - assert_eq!(http_resp.status(), 200); - let http_resp = http_resp.json::().await.unwrap(); - assert_eq!( - http_resp, - serde_json::json!({ - "nodeInfo": { - "static": {"peer_id": ED25519MULTIHASH}, - "dynamic": {"listeners": [format!("{listen_addr1}")], "connections": {}} - } - }) + // Kill proceses. + let dead_proc1 = kill_homestar(proc_guard1.take(), None); + + // Retrieve logs. + let stdout1 = retrieve_output(dead_proc1); + let stdout2 = retrieve_output(dead_proc2); + + // Check node one added node two to Kademlia table + let two_added_to_dht = check_for_line_with( + stdout1.clone(), + vec![ + "added configured node to kademlia routing table", + SECP256K1MULTIHASH, + ], + ); + + // Check node one DHT routing table was updated with node two + let two_in_dht_routing_table = check_for_line_with( + stdout1.clone(), + vec![ + "kademlia routing table updated with peer", + SECP256K1MULTIHASH, + ], + ); + + // Check that node one connected to node two. + let one_connected_to_two = check_for_line_with( + stdout1.clone(), + vec!["peer connection established", SECP256K1MULTIHASH], + ); + + // Check that node two disconnected from node one. + let two_disconnected_from_one = check_for_line_with( + stdout1.clone(), + vec!["peer connection closed", SECP256K1MULTIHASH], ); + + // Check that node two was not removed from the Kademlia table. + let two_removed_from_dht_table = check_for_line_with( + stdout1.clone(), + vec!["removed peer from kademlia table", SECP256K1MULTIHASH], + ); + + assert!(one_connected_to_two); + assert!(two_in_dht_routing_table); + assert!(two_added_to_dht); + assert!(two_disconnected_from_one); + assert!(!two_removed_from_dht_table); + + // Check node two added node one to Kademlia table + let one_addded_to_dht = check_for_line_with( + stdout2.clone(), + vec![ + "added configured node to kademlia routing table", + ED25519MULTIHASH, + ], + ); + + // Check node two DHT routing table was updated with node one + let one_in_dht_routing_table = check_for_line_with( + stdout2.clone(), + vec!["kademlia routing table updated with peer", ED25519MULTIHASH], + ); + + // Check that node two connected to node one. + let two_connected_to_one = check_for_line_with( + stdout2, + vec!["peer connection established", ED25519MULTIHASH], + ); + + assert!(one_addded_to_dht); + assert!(one_in_dht_routing_table); + assert!(two_connected_to_one); }); Ok(()) @@ -249,6 +288,7 @@ fn test_libp2p_redial_on_connection_closed_integration() -> Result<()> { let config2 = make_config!(toml2); let homestar_proc1 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -268,22 +308,11 @@ fn test_libp2p_redial_on_connection_closed_integration() -> Result<()> { } tokio_test::block_on(async { - let ws_url = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port1); - let client = WsClientBuilder::default() - .build(ws_url.clone()) - .await - .unwrap(); - - let mut sub1: Subscription> = client - .subscribe( - SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - rpc_params![], - UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - ) - .await - .unwrap(); + let mut net_events1 = subscribe_network_events(ws_port1).await; + let sub1 = net_events1.sub(); let homestar_proc2 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -304,7 +333,7 @@ fn test_libp2p_redial_on_connection_closed_integration() -> Result<()> { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["type"].as_str().unwrap() == "network:connectionEstablished" { + if json["connection_established"].is_object() { break; } } else { @@ -320,7 +349,7 @@ fn test_libp2p_redial_on_connection_closed_integration() -> Result<()> { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["type"].as_str().unwrap() == "network:connectionClosed" { + if json["connection_closed"].is_object() { break; } } else { @@ -329,6 +358,7 @@ fn test_libp2p_redial_on_connection_closed_integration() -> Result<()> { } let homestar_proc2 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -349,7 +379,7 @@ fn test_libp2p_redial_on_connection_closed_integration() -> Result<()> { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["type"].as_str().unwrap() == "network:connectionEstablished" { + if json["connection_established"].is_object() { break; } } else { @@ -424,6 +454,7 @@ fn test_libp2p_redial_on_connection_error_integration() -> Result<()> { let config2 = make_config!(toml2); let homestar_proc1 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -443,22 +474,11 @@ fn test_libp2p_redial_on_connection_error_integration() -> Result<()> { } tokio_test::block_on(async { - let ws_url = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port1); - let client = WsClientBuilder::default() - .build(ws_url.clone()) - .await - .unwrap(); - - let mut sub1: Subscription> = client - .subscribe( - SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - rpc_params![], - UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - ) - .await - .unwrap(); + let mut net_events1 = subscribe_network_events(ws_port1).await; + let sub1 = net_events1.sub(); let homestar_proc2 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -479,7 +499,7 @@ fn test_libp2p_redial_on_connection_error_integration() -> Result<()> { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["type"].as_str().unwrap() == "network:connectionEstablished" { + if json["connection_established"].is_object() { break; } } else { @@ -495,7 +515,7 @@ fn test_libp2p_redial_on_connection_error_integration() -> Result<()> { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["type"].as_str().unwrap() == "network:connectionClosed" { + if json["connection_closed"].is_object() { break; } } else { @@ -509,7 +529,7 @@ fn test_libp2p_redial_on_connection_error_integration() -> Result<()> { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["type"].as_str().unwrap() == "network:outgoingConnectionError" { + if json["outgoing_connection_error"].is_object() { break; } } else { @@ -523,7 +543,7 @@ fn test_libp2p_redial_on_connection_error_integration() -> Result<()> { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["type"].as_str().unwrap() == "network:outgoingConnectionError" { + if json["outgoing_connection_error"].is_object() { break; } } else { @@ -532,6 +552,7 @@ fn test_libp2p_redial_on_connection_error_integration() -> Result<()> { } let homestar_proc2 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -552,7 +573,7 @@ fn test_libp2p_redial_on_connection_error_integration() -> Result<()> { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["type"].as_str().unwrap() == "network:connectionEstablished" { + if json["connection_established"].is_object() { break; } } else { diff --git a/homestar-runtime/tests/network/dht.rs b/homestar-runtime/tests/network/dht.rs index 1e596054..df13efff 100644 --- a/homestar-runtime/tests/network/dht.rs +++ b/homestar-runtime/tests/network/dht.rs @@ -2,9 +2,9 @@ use crate::{ make_config, utils::{ check_for_line_with, kill_homestar, listen_addr, multiaddr, retrieve_output, - wait_for_socket_connection, ChildGuard, ProcInfo, TimeoutFutureExt, BIN_NAME, - ED25519MULTIHASH, ED25519MULTIHASH2, ED25519MULTIHASH3, ED25519MULTIHASH5, - SECP256K1MULTIHASH, + subscribe_network_events, wait_for_socket_connection, ChildGuard, ProcInfo, + TimeoutFutureExt, BIN_NAME, ED25519MULTIHASH, ED25519MULTIHASH2, ED25519MULTIHASH3, + ED25519MULTIHASH5, SECP256K1MULTIHASH, }, }; use anyhow::Result; @@ -13,15 +13,9 @@ use homestar_runtime::{ db::{self, schema, Database}, Db, Settings, }; -use jsonrpsee::{ - core::client::{Subscription, SubscriptionClientT}, - rpc_params, - ws_client::WsClientBuilder, -}; use libipld::Cid; use once_cell::sync::Lazy; use std::{ - net::Ipv4Addr, path::PathBuf, process::{Command, Stdio}, str::FromStr, @@ -29,8 +23,6 @@ use std::{ }; static BIN: Lazy = Lazy::new(|| assert_cmd::cargo::cargo_bin(BIN_NAME)); -const SUBSCRIBE_NETWORK_EVENTS_ENDPOINT: &str = "subscribe_network_events"; -const UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT: &str = "unsubscribe_network_events"; #[test] #[serial_test::parallel] @@ -78,6 +70,7 @@ fn test_libp2p_dht_records_integration() -> Result<()> { let config1 = make_config!(toml1); let homestar_proc1 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -97,20 +90,8 @@ fn test_libp2p_dht_records_integration() -> Result<()> { } tokio_test::block_on(async { - let ws_url = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port1); - let client = WsClientBuilder::default() - .build(ws_url.clone()) - .await - .unwrap(); - - let mut sub1: Subscription> = client - .subscribe( - SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - rpc_params![], - UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - ) - .await - .unwrap(); + let mut net_events1 = subscribe_network_events(ws_port1).await; + let sub1 = net_events1.sub(); let toml2 = format!( r#" @@ -142,6 +123,7 @@ fn test_libp2p_dht_records_integration() -> Result<()> { let config2 = make_config!(toml2); let homestar_proc2 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -160,20 +142,8 @@ fn test_libp2p_dht_records_integration() -> Result<()> { panic!("Homestar server/runtime failed to start in time"); } - let ws_url2 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port2); - let client2 = WsClientBuilder::default() - .build(ws_url2.clone()) - .await - .unwrap(); - - let mut sub2: Subscription> = client2 - .subscribe( - SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - rpc_params![], - UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - ) - .await - .unwrap(); + let mut net_events2 = subscribe_network_events(ws_port2).await; + let sub2 = net_events2.sub(); // Poll for connection established message loop { @@ -181,7 +151,7 @@ fn test_libp2p_dht_records_integration() -> Result<()> { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["type"].as_str().unwrap() == "network:connectionEstablished" { + if json["connection_established"].is_object() { break; } } else { @@ -207,13 +177,13 @@ fn test_libp2p_dht_records_integration() -> Result<()> { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["type"].as_str().unwrap() == "network:putReceiptDht" { + if json["put_receipt_dht"].is_object() { put_receipt = true; - } else if json["type"].as_str().unwrap() == "network:putWorkflowInfoDht" { + } else if json["put_workflow_info_dht"].is_object() { put_workflow_info = true; - } else if json["type"].as_str().unwrap() == "network:receiptQuorumSuccess" { + } else if json["receipt_quorum_success_dht"].is_object() { receipt_quorum_success = true; - } else if json["type"].as_str().unwrap() == "network:workflowInfoQuorumSuccess" { + } else if json["workflow_info_quorum_success_dht"].is_object() { workflow_info_quorum_success = true; } } else { @@ -260,8 +230,8 @@ fn test_libp2p_dht_records_integration() -> Result<()> { // let json: serde_json::Value = // serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - // if json["type"].as_str().unwrap() == "network:gotReceiptDht" { - // received_receipt_cid = Cid::from_str(json["data"]["cid"].as_str().unwrap()) + // if json["got_receipt_dht"].is_object() { + // received_receipt_cid = Cid::from_str(json["got_receipt_dht"]["cid"].as_str().unwrap()) // .expect("Unable to parse received receipt CID."); // break; // } @@ -288,9 +258,9 @@ fn test_libp2p_dht_records_integration() -> Result<()> { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["type"].as_str().unwrap() == "network:gotWorkflowInfoDht" { + if json["got_workflow_info_dht"].is_object() { received_workflow_info_cid = - Cid::from_str(json["data"]["cid"].as_str().unwrap()) + Cid::from_str(json["got_workflow_info_dht"]["cid"].as_str().unwrap()) .expect("Unable to parse received workflow info CID."); break; } @@ -407,6 +377,7 @@ fn test_libp2p_dht_quorum_failure_intregration() -> Result<()> { let config1 = make_config!(toml1); let homestar_proc1 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -426,20 +397,8 @@ fn test_libp2p_dht_quorum_failure_intregration() -> Result<()> { } tokio_test::block_on(async { - let ws_url = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port1); - let client = WsClientBuilder::default() - .build(ws_url.clone()) - .await - .unwrap(); - - let mut sub1: Subscription> = client - .subscribe( - SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - rpc_params![], - UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - ) - .await - .unwrap(); + let mut net_events1 = subscribe_network_events(ws_port1).await; + let sub1 = net_events1.sub(); let toml2 = format!( r#" @@ -469,6 +428,7 @@ fn test_libp2p_dht_quorum_failure_intregration() -> Result<()> { let config2 = make_config!(toml2); let homestar_proc2 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -493,7 +453,7 @@ fn test_libp2p_dht_quorum_failure_intregration() -> Result<()> { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["type"].as_str().unwrap() == "network:connectionEstablished" { + if json["connection_established"].is_object() { break; } } else { @@ -517,9 +477,13 @@ fn test_libp2p_dht_quorum_failure_intregration() -> Result<()> { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["type"].as_str().unwrap() == "network:receiptQuorumFailure" { + if json["receipt_quorum_failure_dht"].is_object() { + receipt_quorum_failure = true + } + + if json["receipt_quorum_failure_dht"].is_object() { receipt_quorum_failure = true - } else if json["type"].as_str().unwrap() == "network:workflowInfoQuorumFailure" { + } else if json["workflow_info_quorum_failure_dht"].is_object() { workflow_info_quorum_failure = true } } else { @@ -608,6 +572,7 @@ fn test_libp2p_dht_workflow_info_provider_integration() -> Result<()> { let config1 = make_config!(toml1); let homestar_proc1 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -627,20 +592,8 @@ fn test_libp2p_dht_workflow_info_provider_integration() -> Result<()> { } tokio_test::block_on(async { - let ws_url = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port1); - let client = WsClientBuilder::default() - .build(ws_url.clone()) - .await - .unwrap(); - - let mut sub1: Subscription> = client - .subscribe( - SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - rpc_params![], - UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - ) - .await - .unwrap(); + let mut net_events1 = subscribe_network_events(ws_port1).await; + let sub1 = net_events1.sub(); let toml2 = format!( r#" @@ -672,6 +625,7 @@ fn test_libp2p_dht_workflow_info_provider_integration() -> Result<()> { let config2 = make_config!(toml2); let homestar_proc2 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -690,20 +644,8 @@ fn test_libp2p_dht_workflow_info_provider_integration() -> Result<()> { panic!("Homestar server/runtime failed to start in time"); } - let ws_url2 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port2); - let client2 = WsClientBuilder::default() - .build(ws_url2.clone()) - .await - .unwrap(); - - let mut sub2: Subscription> = client2 - .subscribe( - SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - rpc_params![], - UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - ) - .await - .unwrap(); + let mut net_events2 = subscribe_network_events(ws_port2).await; + let sub2 = net_events2.sub(); // Poll for connection established message loop { @@ -711,7 +653,7 @@ fn test_libp2p_dht_workflow_info_provider_integration() -> Result<()> { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["type"].as_str().unwrap() == "network:connectionEstablished" { + if json["connection_established"].is_object() { break; } } else { @@ -752,9 +694,10 @@ fn test_libp2p_dht_workflow_info_provider_integration() -> Result<()> { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["type"].as_str().unwrap() == "network:sentWorkflowInfo" { - sent_workflow_info_cid = Cid::from_str(json["data"]["cid"].as_str().unwrap()) - .expect("Unable to parse sent workflow info CID."); + if json["sent_workflow_info"].is_object() { + sent_workflow_info_cid = + Cid::from_str(json["sent_workflow_info"]["cid"].as_str().unwrap()) + .expect("Unable to parse sent workflow info CID."); break; } } else { @@ -771,9 +714,9 @@ fn test_libp2p_dht_workflow_info_provider_integration() -> Result<()> { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["type"].as_str().unwrap() == "network:receivedWorkflowInfo" { + if json["received_workflow_info"].is_object() { received_workflow_info_cid = - Cid::from_str(json["data"]["cid"].as_str().unwrap()) + Cid::from_str(json["received_workflow_info"]["cid"].as_str().unwrap()) .expect("Unable to parse received workflow info CID."); break; } @@ -866,9 +809,9 @@ fn test_libp2p_dht_workflow_info_provider_recursive_integration() -> Result<()> // 2. Wait for connection between a and b to be established // 3. Wait for connection between a and c to be established // 4. Run workflow on a - // 5. Wait for network:putWorkflowInfoDht on a + // 5. Wait for put_workflow_info_dht on a // 6. Run workflow on b - // 7. Wait for network:GotWorkflowInfoDht on b + // 7. Wait for got_workflow_info_dht on b // 8. Delete a's DB // 9. Run workflow on c // 10. Wait for network:receivedWorkflowInfo on c (from b, through a) @@ -923,6 +866,8 @@ fn test_libp2p_dht_workflow_info_provider_recursive_integration() -> Result<()> tokio_test::block_on(async move { let homestar_proc1 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") + .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -941,20 +886,8 @@ fn test_libp2p_dht_workflow_info_provider_recursive_integration() -> Result<()> panic!("Homestar server/runtime failed to start in time"); } - let ws_url1 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port1); - let client1 = WsClientBuilder::default() - .build(ws_url1.clone()) - .await - .unwrap(); - - let mut sub1: Subscription> = client1 - .subscribe( - SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - rpc_params![], - UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - ) - .await - .unwrap(); + let mut net_events1 = subscribe_network_events(ws_port1).await; + let sub1 = net_events1.sub(); let toml2 = format!( r#" @@ -986,6 +919,7 @@ fn test_libp2p_dht_workflow_info_provider_recursive_integration() -> Result<()> let config2 = make_config!(toml2); let homestar_proc2 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -1004,20 +938,8 @@ fn test_libp2p_dht_workflow_info_provider_recursive_integration() -> Result<()> panic!("Homestar server/runtime failed to start in time"); } - let ws_url2 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port2); - let client2 = WsClientBuilder::default() - .build(ws_url2.clone()) - .await - .unwrap(); - - let mut sub2: Subscription> = client2 - .subscribe( - SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - rpc_params![], - UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - ) - .await - .unwrap(); + let mut net_events2 = subscribe_network_events(ws_port2).await; + let sub2 = net_events2.sub(); let toml3 = format!( r#" @@ -1049,6 +971,7 @@ fn test_libp2p_dht_workflow_info_provider_recursive_integration() -> Result<()> let config3 = make_config!(toml3); let homestar_proc3 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -1067,20 +990,8 @@ fn test_libp2p_dht_workflow_info_provider_recursive_integration() -> Result<()> panic!("Homestar server/runtime failed to start in time"); } - let ws_url3 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port3); - let client3 = WsClientBuilder::default() - .build(ws_url3.clone()) - .await - .unwrap(); - - let mut sub3: Subscription> = client3 - .subscribe( - SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - rpc_params![], - UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - ) - .await - .unwrap(); + let mut net_events3 = subscribe_network_events(ws_port3).await; + let sub3 = net_events3.sub(); // Poll node one for connection established with node two message loop { @@ -1088,10 +999,11 @@ fn test_libp2p_dht_workflow_info_provider_recursive_integration() -> Result<()> let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - println!("node1: {json}"); - - if json["type"].as_str().unwrap() == "network:connectionEstablished" { - assert_eq!(json["data"]["peerId"], SECP256K1MULTIHASH.to_string()); + if json["connection_established"].is_object() { + assert_eq!( + json["connection_established"]["peer_id"], + SECP256K1MULTIHASH.to_string() + ); break; } @@ -1108,8 +1020,11 @@ fn test_libp2p_dht_workflow_info_provider_recursive_integration() -> Result<()> println!("node1: {json}"); - if json["type"].as_str().unwrap() == "network:connectionEstablished" { - assert_eq!(json["data"]["peerId"], ED25519MULTIHASH2.to_string()); + if json["connection_established"].is_object() { + assert_eq!( + json["connection_established"]["peerId"], + ED25519MULTIHASH2.to_string() + ); break; } @@ -1134,9 +1049,9 @@ fn test_libp2p_dht_workflow_info_provider_recursive_integration() -> Result<()> println!("node1: {json}"); - if json["type"].as_str().unwrap() == "network:putWorkflowInfoDht" { + if json["put_workflow_info_dht"].is_object() { assert_eq!( - json["data"]["cid"].as_str().unwrap(), + json["put_workflow_info_dht"]["cid"].as_str().unwrap(), "bafyrmihctgawsskx54qyt3clcaq2quc42pqxzhr73o6qjlc3rc4mhznotq" ); @@ -1163,9 +1078,9 @@ fn test_libp2p_dht_workflow_info_provider_recursive_integration() -> Result<()> println!("node2: {json}"); - if json["type"].as_str().unwrap() == "network:gotWorkflowInfoDht" { + if json["got_workflow_info_dht"].is_object() { assert_eq!( - json["data"]["cid"].as_str().unwrap(), + json["got_workflow_info_dht"]["cid"].as_str().unwrap(), "bafyrmihctgawsskx54qyt3clcaq2quc42pqxzhr73o6qjlc3rc4mhznotq" ); diff --git a/homestar-runtime/tests/network/gossip.rs b/homestar-runtime/tests/network/gossip.rs index a38292ca..cace455d 100644 --- a/homestar-runtime/tests/network/gossip.rs +++ b/homestar-runtime/tests/network/gossip.rs @@ -2,22 +2,16 @@ use crate::{ make_config, utils::{ check_for_line_with, kill_homestar, listen_addr, multiaddr, retrieve_output, - wait_for_socket_connection, ChildGuard, ProcInfo, TimeoutFutureExt, BIN_NAME, - ED25519MULTIHASH, SECP256K1MULTIHASH, + subscribe_network_events, wait_for_socket_connection, ChildGuard, ProcInfo, + TimeoutFutureExt, BIN_NAME, ED25519MULTIHASH, SECP256K1MULTIHASH, }, }; use anyhow::Result; use homestar_runtime::{db::Database, Db, Settings}; use itertools::Itertools; -use jsonrpsee::{ - core::client::{Subscription, SubscriptionClientT}, - rpc_params, - ws_client::WsClientBuilder, -}; use libipld::Cid; use once_cell::sync::Lazy; use std::{ - net::Ipv4Addr, path::PathBuf, process::{Command, Stdio}, str::FromStr, @@ -25,8 +19,6 @@ use std::{ }; static BIN: Lazy = Lazy::new(|| assert_cmd::cargo::cargo_bin(BIN_NAME)); -const SUBSCRIBE_NETWORK_EVENTS_ENDPOINT: &str = "subscribe_network_events"; -const UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT: &str = "unsubscribe_network_events"; #[test] #[serial_test::parallel] @@ -67,6 +59,7 @@ fn test_libp2p_receipt_gossip_integration() -> Result<()> { ); let config1 = make_config!(toml); let homestar_proc1 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -86,20 +79,8 @@ fn test_libp2p_receipt_gossip_integration() -> Result<()> { } tokio_test::block_on(async { - let ws_url = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port1); - let client = WsClientBuilder::default() - .build(ws_url.clone()) - .await - .unwrap(); - - let mut sub1: Subscription> = client - .subscribe( - SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - rpc_params![], - UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - ) - .await - .unwrap(); + let mut net_events1 = subscribe_network_events(ws_port1).await; + let sub1 = net_events1.sub(); let toml2 = format!( r#" @@ -124,6 +105,7 @@ fn test_libp2p_receipt_gossip_integration() -> Result<()> { let config2 = make_config!(toml2); let homestar_proc2 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -148,7 +130,7 @@ fn test_libp2p_receipt_gossip_integration() -> Result<()> { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["type"].as_str().unwrap() == "network:connectionEstablished" { + if json["connection_established"].is_object() { break; } } else { @@ -156,20 +138,8 @@ fn test_libp2p_receipt_gossip_integration() -> Result<()> { } } - let ws_url2 = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port2); - let client2 = WsClientBuilder::default() - .build(ws_url2.clone()) - .await - .unwrap(); - - let mut sub2: Subscription> = client2 - .subscribe( - SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - rpc_params![], - UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, - ) - .await - .unwrap(); + let mut net_events2 = subscribe_network_events(ws_port2).await; + let sub2 = net_events2.sub(); // Run test workflow on node one let _ = Command::new(BIN.as_os_str()) @@ -186,9 +156,9 @@ fn test_libp2p_receipt_gossip_integration() -> Result<()> { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["type"].as_str().unwrap() == "network:publishedReceiptPubsub" { + if json["published_receipt_pubsub"].is_object() { published_cids.push( - Cid::from_str(json["data"]["cid"].as_str().unwrap()) + Cid::from_str(json["published_receipt_pubsub"]["cid"].as_str().unwrap()) .expect("Unable to parse published receipt CID."), ); } @@ -208,9 +178,9 @@ fn test_libp2p_receipt_gossip_integration() -> Result<()> { let json: serde_json::Value = serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); - if json["type"].as_str().unwrap() == "network:receivedReceiptPubsub" { + if json["received_receipt_pubsub"].is_object() { received_cids.push( - Cid::from_str(json["data"]["cid"].as_str().unwrap()) + Cid::from_str(json["received_receipt_pubsub"]["cid"].as_str().unwrap()) .expect("Unable to parse received receipt CID."), ); } diff --git a/homestar-runtime/tests/network/mdns.rs b/homestar-runtime/tests/network/mdns.rs index 0181dd9c..00b312b8 100644 --- a/homestar-runtime/tests/network/mdns.rs +++ b/homestar-runtime/tests/network/mdns.rs @@ -1,9 +1,9 @@ use crate::{ make_config, utils::{ - check_for_line_with, kill_homestar, retrieve_output, wait_for_socket_connection, - wait_for_socket_connection_v6, ChildGuard, ProcInfo, BIN_NAME, ED25519MULTIHASH2, - ED25519MULTIHASH4, ED25519MULTIHASH5, + check_for_line_with, kill_homestar, retrieve_output, subscribe_network_events, + wait_for_socket_connection, wait_for_socket_connection_v6, ChildGuard, ProcInfo, + TimeoutFutureExt, BIN_NAME, ED25519MULTIHASH2, ED25519MULTIHASH5, }, }; use anyhow::Result; @@ -18,7 +18,7 @@ static BIN: Lazy = Lazy::new(|| assert_cmd::cargo::cargo_bin(BIN_NAME)) #[test] #[serial_test::file_serial] -fn test_libp2p_connect_after_mdns_discovery_serial() -> Result<()> { +fn test_libp2p_connection_after_mdns_discovery_serial() -> Result<()> { let proc_info1 = ProcInfo::new().unwrap(); let proc_info2 = ProcInfo::new().unwrap(); @@ -48,9 +48,10 @@ fn test_libp2p_connect_after_mdns_discovery_serial() -> Result<()> { ); let config1 = make_config!(toml1); - // Start two nodes each configured to listen at 0.0.0.0 with no known peers. - // The nodes are configured with port 0 to allow the OS to select a port. + // Start a node configured to listen at 0.0.0.0 with no known peers. + // The node is configured with port 0 to allow the OS to select a port. let homestar_proc1 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -71,224 +72,177 @@ fn test_libp2p_connect_after_mdns_discovery_serial() -> Result<()> { let toml2 = format!( r#" - [node] - [node.network.keypair_config] - existing = {{ key_type = "ed25519", path = "./fixtures/__testkey_ed25519_5.pem" }} - [node.network.libp2p] - listen_address = "/ip4/0.0.0.0/tcp/0" - [node.network.libp2p.rendezvous] - enable_client = false - [node.network.metrics] - port = {metrics_port2} - [node.network.rpc] - port = {rpc_port2} - [node.network.webserver] - port = {ws_port2} - "# - ); - let config2 = make_config!(toml2); - - let homestar_proc2 = Command::new(BIN.as_os_str()) - .env( - "RUST_LOG", - "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", - ) - .arg("start") - .arg("-c") - .arg(config2.filename()) - .arg("--db") - .arg(&proc_info2.db_path) - .stdout(Stdio::piped()) - .spawn() - .unwrap(); - let proc_guard2 = ChildGuard::new(homestar_proc2); - - if wait_for_socket_connection_v6(rpc_port2, 1000).is_err() { - panic!("Homestar server/runtime failed to start in time"); - } - - // Collect logs for seven seconds then kill processes. - let dead_proc1 = kill_homestar(proc_guard1.take(), Some(Duration::from_secs(7))); - let dead_proc2 = kill_homestar(proc_guard2.take(), Some(Duration::from_secs(7))); - - // Retrieve logs. - let stdout1 = retrieve_output(dead_proc1); - let stdout2 = retrieve_output(dead_proc2); - - // Check that node one connected to node two. - let one_connected_to_two = check_for_line_with( - stdout1.clone(), - vec!["peer connection established", ED25519MULTIHASH5], - ); - - // Check node two was added to the Kademlia table - let two_addded_to_dht = check_for_line_with( - stdout1.clone(), - vec![ - "added identified node to kademlia routing table", - ED25519MULTIHASH5, - ], - ); - - // Check that DHT routing table was updated with node two - let two_in_dht_routing_table = check_for_line_with( - stdout1, - vec![ - "kademlia routing table updated with peer", - ED25519MULTIHASH5, - ], - ); - - assert!(one_connected_to_two); - assert!(two_addded_to_dht); - assert!(two_in_dht_routing_table); - - // Check that node two connected to node one. - let two_connected_to_one = check_for_line_with( - stdout2.clone(), - vec!["peer connection established", ED25519MULTIHASH2], - ); - - // Check node one was added to the Kademlia table - let one_addded_to_dht = check_for_line_with( - stdout2.clone(), - vec![ - "added identified node to kademlia routing table", - ED25519MULTIHASH2, - ], - ); - - // Check that DHT routing table was updated with node one - let one_in_dht_routing_table = check_for_line_with( - stdout2, - vec![ - "kademlia routing table updated with peer", - ED25519MULTIHASH2, - ], - ); - - assert!(two_connected_to_one); - assert!(one_addded_to_dht); - assert!(one_in_dht_routing_table); - - Ok(()) -} - -#[test] -#[serial_test::file_serial] -fn test_libp2p_disconnect_mdns_discovery_serial() -> Result<()> { - // Start two nodes each configured to listen at 0.0.0.0 with no known peers. - // The nodes are configured with port 0 to allow the OS to select a port. - - let proc_info1 = ProcInfo::new().unwrap(); - let proc_info2 = ProcInfo::new().unwrap(); - - let rpc_port1 = proc_info1.rpc_port; - let rpc_port2 = proc_info2.rpc_port; - let metrics_port1 = proc_info1.metrics_port; - let metrics_port2 = proc_info2.metrics_port; - let ws_port1 = proc_info1.ws_port; - let ws_port2 = proc_info2.ws_port; - - let toml1 = format!( - r#" - [node] - [node.network.keypair_config] - existing = {{ key_type = "ed25519", path = "./fixtures/__testkey_ed25519_3.pem" }} - [node.network.libp2p] - listen_address = "/ip4/0.0.0.0/tcp/0" - [node.network.libp2p.rendezvous] - enable_client = false - [node.network.metrics] - port = {metrics_port1} - [node.network.rpc] - port = {rpc_port1} - [node.network.webserver] - port = {ws_port1} - "# - ); - let config1 = make_config!(toml1); - - let homestar_proc1 = Command::new(BIN.as_os_str()) - .env( - "RUST_LOG", - "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", - ) - .arg("start") - .arg("-c") - .arg(config1.filename()) - .arg("--db") - .arg(&proc_info1.db_path) - .stdout(Stdio::piped()) - .spawn() - .unwrap(); - let proc_guard1 = ChildGuard::new(homestar_proc1); - - if wait_for_socket_connection(ws_port1, 1000).is_err() { - panic!("Homestar server/runtime failed to start in time"); - } - - let toml2 = format!( - r#" - [node] - [node.network.keypair_config] - existing = {{ key_type = "ed25519", path = "./fixtures/__testkey_ed25519_4.pem" }} - [node.network.libp2p] - listen_address = "/ip4/0.0.0.0/tcp/0" - [node.network.libp2p.rendezvous] - enable_client = false - [node.network.metrics] - port = {metrics_port2} - [node.network.rpc] - port = {rpc_port2} - [node.network.webserver] - port = {ws_port2} - "# + [node] + [node.network.keypair_config] + existing = {{ key_type = "ed25519", path = "./fixtures/__testkey_ed25519_5.pem" }} + [node.network.libp2p] + listen_address = "/ip4/0.0.0.0/tcp/0" + [node.network.libp2p.rendezvous] + enable_client = false + [node.network.metrics] + port = {metrics_port2} + [node.network.rpc] + port = {rpc_port2} + [node.network.webserver] + port = {ws_port2} + "# ); let config2 = make_config!(toml2); - let homestar_proc2 = Command::new(BIN.as_os_str()) - .env( - "RUST_LOG", - "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", - ) - .arg("start") - .arg("-c") - .arg(config2.filename()) - .arg("--db") - .arg(&proc_info2.db_path) - .stdout(Stdio::piped()) - .spawn() - .unwrap(); - let proc_guard2 = ChildGuard::new(homestar_proc2); - - if wait_for_socket_connection(ws_port2, 1000).is_err() { - panic!("Homestar server/runtime failed to start in time"); - } - - // Kill node two after seven seconds. - let _ = kill_homestar(proc_guard2.take(), Some(Duration::from_secs(7))); - - // Collect logs for eight seconds then kill node one. - let dead_proc1 = kill_homestar(proc_guard1.take(), Some(Duration::from_secs(8))); - - // Retrieve logs. - let stdout = retrieve_output(dead_proc1); - - // Check that node two disconnected from node one. - let two_disconnected_from_one = check_for_line_with( - stdout.clone(), - vec!["peer connection closed", ED25519MULTIHASH4], - ); - - // Check that node two was removed from the Kademlia table - let two_removed_from_dht_table = check_for_line_with( - stdout.clone(), - vec!["removed peer from kademlia table", ED25519MULTIHASH4], - ); - - assert!(two_disconnected_from_one); - assert!(two_removed_from_dht_table); + tokio_test::block_on(async { + // Subscribe to node one + let mut net_events1 = subscribe_network_events(ws_port1).await; + let sub1 = net_events1.sub(); + + // Start a second node configured to listen at 0.0.0.0 with no known peers. + // The node is configured with port 0 to allow the OS to select a port. + let homestar_proc2 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") + .env( + "RUST_LOG", + "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", + ) + .arg("start") + .arg("-c") + .arg(config2.filename()) + .arg("--db") + .arg(&proc_info2.db_path) + .stdout(Stdio::piped()) + .spawn() + .unwrap(); + let proc_guard2 = ChildGuard::new(homestar_proc2); + + if wait_for_socket_connection(ws_port2, 1000).is_err() { + panic!("Homestar server/runtime failed to start in time"); + } + + // Poll for mDNS discovered message and connection established messages on node one. + let mut discovered_mdns = false; + let mut connection_established = false; + loop { + if let Ok(msg) = sub1.next().with_timeout(Duration::from_secs(30)).await { + let json: serde_json::Value = + serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); + + if json["discovered_mdns"].is_object() { + discovered_mdns = true; + } else if json["connection_established"].is_object() + && json["connection_established"]["peer_id"] == ED25519MULTIHASH5 + { + connection_established = true; + } + } else { + panic!( + r#"Expected notifications from node one did not arrive in time: + - mDNS discovered: {} + - Connection established: {} + "#, + discovered_mdns, connection_established + ); + } + + if connection_established && discovered_mdns { + break; + } + } + + // Kill node two. + let dead_proc2 = kill_homestar(proc_guard2.take(), None); + + // Poll for client two disconnected from client one. + loop { + if let Ok(msg) = sub1.next().with_timeout(Duration::from_secs(30)).await { + let json: serde_json::Value = + serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); + + if json["connection_closed"].is_object() + && json["connection_closed"]["peer_id"] == ED25519MULTIHASH5 + { + break; + } + } else { + panic!("Client two did not receive rendezvous discovery from server in time"); + } + } + + // Kill node one. + let dead_proc1 = kill_homestar(proc_guard1.take(), None); + + // Retrieve logs. + let stdout1 = retrieve_output(dead_proc1); + let stdout2 = retrieve_output(dead_proc2); + + // Check that node one connected to node two. + let one_connected_to_two = check_for_line_with( + stdout1.clone(), + vec!["peer connection established", ED25519MULTIHASH5], + ); + + // Check node two was added to the Kademlia table + let two_addded_to_dht = check_for_line_with( + stdout1.clone(), + vec![ + "added identified node to kademlia routing table", + ED25519MULTIHASH5, + ], + ); + + // Check that DHT routing table was updated with node two + let two_in_dht_routing_table = check_for_line_with( + stdout1.clone(), + vec![ + "kademlia routing table updated with peer", + ED25519MULTIHASH5, + ], + ); + + assert!(one_connected_to_two); + assert!(two_addded_to_dht); + assert!(two_in_dht_routing_table); + + // Check that node two connected to node one. + let two_connected_to_one = check_for_line_with( + stdout2.clone(), + vec!["peer connection established", ED25519MULTIHASH2], + ); + + // Check node one was added to the Kademlia table + let one_addded_to_dht = check_for_line_with( + stdout2.clone(), + vec![ + "added identified node to kademlia routing table", + ED25519MULTIHASH2, + ], + ); + + // Check that DHT routing table was updated with node one + let one_in_dht_routing_table = check_for_line_with( + stdout2, + vec![ + "kademlia routing table updated with peer", + ED25519MULTIHASH2, + ], + ); + + assert!(two_connected_to_one); + assert!(one_addded_to_dht); + assert!(one_in_dht_routing_table); + + // Check that node two disconnected from node one. + let two_disconnected_from_one = check_for_line_with( + stdout1.clone(), + vec!["peer connection closed", ED25519MULTIHASH5], + ); + + // Check that node two was removed from the Kademlia table + let two_removed_from_dht_table = check_for_line_with( + stdout1.clone(), + vec!["removed peer from kademlia table", ED25519MULTIHASH5], + ); + + assert!(two_disconnected_from_one); + assert!(two_removed_from_dht_table); + }); Ok(()) } diff --git a/homestar-runtime/tests/network/rendezvous.rs b/homestar-runtime/tests/network/rendezvous.rs index a126b36f..005ef8ee 100644 --- a/homestar-runtime/tests/network/rendezvous.rs +++ b/homestar-runtime/tests/network/rendezvous.rs @@ -2,9 +2,10 @@ use crate::{ make_config, utils::{ check_for_line_with, count_lines_where, kill_homestar, listen_addr, multiaddr, - retrieve_output, wait_for_socket_connection, wait_for_socket_connection_v6, ChildGuard, - ProcInfo, BIN_NAME, ED25519MULTIHASH, ED25519MULTIHASH2, ED25519MULTIHASH3, - ED25519MULTIHASH4, ED25519MULTIHASH5, SECP256K1MULTIHASH, + retrieve_output, subscribe_network_events, wait_for_socket_connection, + wait_for_socket_connection_v6, ChildGuard, ProcInfo, TimeoutFutureExt, BIN_NAME, + ED25519MULTIHASH, ED25519MULTIHASH2, ED25519MULTIHASH3, ED25519MULTIHASH4, + ED25519MULTIHASH5, SECP256K1MULTIHASH, }, }; use anyhow::Result; @@ -12,7 +13,6 @@ use once_cell::sync::Lazy; use std::{ path::PathBuf, process::{Command, Stdio}, - thread, time::Duration, }; @@ -20,7 +20,7 @@ static BIN: Lazy = Lazy::new(|| assert_cmd::cargo::cargo_bin(BIN_NAME)) #[test] #[serial_test::parallel] -fn test_libp2p_connect_rendezvous_discovery_integration() -> Result<()> { +fn test_libp2p_connection_rendezvous_discovery_integration() -> Result<()> { let proc_info1 = ProcInfo::new().unwrap(); let proc_info2 = ProcInfo::new().unwrap(); let proc_info3 = ProcInfo::new().unwrap(); @@ -63,6 +63,7 @@ fn test_libp2p_connect_rendezvous_discovery_integration() -> Result<()> { // Start a rendezvous server let rendezvous_server = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -102,300 +103,197 @@ fn test_libp2p_connect_rendezvous_discovery_integration() -> Result<()> { ); let config2 = make_config!(toml2); - // Start a peer that will register with the rendezvous server - let rendezvous_client1 = Command::new(BIN.as_os_str()) - .env( - "RUST_LOG", - "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", - ) - .arg("start") - .arg("-c") - .arg(config2.filename()) - .arg("--db") - .arg(&proc_info2.db_path) - .stdout(Stdio::piped()) - .spawn() - .unwrap(); - let proc_guard_client1 = ChildGuard::new(rendezvous_client1); - - if wait_for_socket_connection(ws_port2, 1000).is_err() { - panic!("Homestar server/runtime failed to start in time"); - } - - // Wait for registration to complete - // TODO When we have WebSocket push events, listen on a registration event instead of using an arbitrary sleep - thread::sleep(Duration::from_secs(2)); - - let toml3 = format!( - r#" - [node] - [node.network.keypair_config] - existing = {{ key_type = "ed25519", path = "./fixtures/__testkey_ed25519_2.pem" }} - [node.network.libp2p] - listen_address = "{listen_addr3}" - node_addresses = ["{node_addra}"] - [node.network.libp2p.mdns] - enable = false - [node.network.metrics] - port = {metrics_port3} - [node.network.rpc] - port = {rpc_port3} - [node.network.webserver] - port = {ws_port3} - "# - ); - let config3 = make_config!(toml3); - - // Start a peer that will discover the registrant through the rendezvous server - let rendezvous_client2 = Command::new(BIN.as_os_str()) - .env( - "RUST_LOG", - "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", - ) - .arg("start") - .arg("-c") - .arg(config3.filename()) - .arg("--db") - .arg(&proc_info3.db_path) - .stdout(Stdio::piped()) - .spawn() - .unwrap(); - let proc_guard_client2 = ChildGuard::new(rendezvous_client2); - - if wait_for_socket_connection(ws_port3, 1000).is_err() { - panic!("Homestar server/runtime failed to start in time"); - } - - // Collect logs for five seconds then kill proceses. - let dead_server = kill_homestar(proc_guard_server.take(), Some(Duration::from_secs(5))); - let _ = kill_homestar(proc_guard_client1.take(), Some(Duration::from_secs(5))); - let dead_client2 = kill_homestar(proc_guard_client2.take(), Some(Duration::from_secs(5))); - - // Retrieve logs. - let stdout_server = retrieve_output(dead_server); - let stdout_client2 = retrieve_output(dead_client2); - - // Check rendezvous server registered the client one - let registered_client_one = check_for_line_with( - stdout_server.clone(), - vec!["registered peer through rendezvous", SECP256K1MULTIHASH], - ); - - // Check rendezvous served a discover request to client two - let served_discovery_to_client_two = check_for_line_with( - stdout_server.clone(), - vec![ - "served rendezvous discover request to peer", - ED25519MULTIHASH2, - ], - ); - - assert!(registered_client_one); - assert!(served_discovery_to_client_two); - - // Check that client two connected to client one. - let two_connected_to_one = check_for_line_with( - stdout_client2.clone(), - vec!["peer connection established", SECP256K1MULTIHASH], - ); - - // Check client one was added to the Kademlia table - let one_addded_to_dht = check_for_line_with( - stdout_client2.clone(), - vec![ - "added identified node to kademlia routing table", - SECP256K1MULTIHASH, - ], - ); - - // Check that DHT routing table was updated with client one - let one_in_dht_routing_table = check_for_line_with( - stdout_client2.clone(), - vec![ - "kademlia routing table updated with peer", - SECP256K1MULTIHASH, - ], - ); - - assert!(one_addded_to_dht); - assert!(one_in_dht_routing_table); - assert!(two_connected_to_one); - - Ok(()) -} - -#[test] -#[serial_test::parallel] -fn test_libp2p_disconnect_rendezvous_discovery_integration() -> Result<()> { - let proc_info1 = ProcInfo::new().unwrap(); - let proc_info2 = ProcInfo::new().unwrap(); - let proc_info3 = ProcInfo::new().unwrap(); - - let rpc_port1 = proc_info1.rpc_port; - let rpc_port2 = proc_info2.rpc_port; - let rpc_port3 = proc_info3.rpc_port; - let metrics_port1 = proc_info1.metrics_port; - let metrics_port2 = proc_info2.metrics_port; - let metrics_port3 = proc_info3.metrics_port; - let ws_port1 = proc_info1.ws_port; - let ws_port2 = proc_info2.ws_port; - let ws_port3 = proc_info3.ws_port; - let listen_addr1 = listen_addr(proc_info1.listen_port); - let listen_addr2 = listen_addr(proc_info2.listen_port); - let listen_addr3 = listen_addr(proc_info3.listen_port); - let announce_addrb = multiaddr(proc_info2.listen_port, SECP256K1MULTIHASH); - let node_addra = multiaddr(proc_info1.listen_port, ED25519MULTIHASH); - - let toml1 = format!( - r#" - [node] - [node.network.keypair_config] - existing = {{ key_type = "ed25519", path = "./fixtures/__testkey_ed25519.pem" }} - [node.network.libp2p] - listen_address = "{listen_addr1}" - [node.network.libp2p.rendezvous] - enable_server = true - [node.network.libp2p.mdns] - enable = false - [node.network.metrics] - port = {metrics_port1} - [node.network.rpc] - port = {rpc_port1} - [node.network.webserver] - port = {ws_port1} - "# - ); - let config1 = make_config!(toml1); - - // Start a rendezvous server - let rendezvous_server = Command::new(BIN.as_os_str()) - .env( - "RUST_LOG", - "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", - ) - .arg("start") - .arg("-c") - .arg(config1.filename()) - .arg("--db") - .arg(&proc_info1.db_path) - .stdout(Stdio::piped()) - .spawn() - .unwrap(); - let proc_guard_server = ChildGuard::new(rendezvous_server); - - if wait_for_socket_connection(ws_port1, 1000).is_err() { - panic!("Homestar server/runtime failed to start in time"); - } - - let toml2 = format!( - r#" - [node] - [node.network.keypair_config] - existing = {{ key_type = "secp256k1", path = "./fixtures/__testkey_secp256k1.der" }} - [node.network.libp2p] - listen_address = "{listen_addr2}" - announce_addresses = ["{announce_addrb}"] - node_addresses = ["{node_addra}"] - [node.network.libp2p.mdns] - enable = false - [node.network.metrics] - port = {metrics_port2} - [node.network.rpc] - port = {rpc_port2} - [node.network.webserver] - port = {ws_port2} - "# - ); - let config2 = make_config!(toml2); - - // Start a peer that will register with the rendezvous server - let rendezvous_client1 = Command::new(BIN.as_os_str()) - .env( - "RUST_LOG", - "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", - ) - .arg("start") - .arg("-c") - .arg(config2.filename()) - .arg("--db") - .arg(&proc_info2.db_path) - .stdout(Stdio::piped()) - .spawn() - .unwrap(); - let proc_guard_client1 = ChildGuard::new(rendezvous_client1); - - if wait_for_socket_connection(ws_port2, 1000).is_err() { - panic!("Homestar server/runtime failed to start in time"); - } - - // Wait for registration to complete. - // TODO When we have WebSocket push events, listen on a registration event instead of using an arbitrary sleep. - thread::sleep(Duration::from_secs(2)); - - let toml3 = format!( - r#" - [node] - [node.network.keypair_config] - existing = {{ key_type = "ed25519", path = "./fixtures/__testkey_ed25519_2.pem" }} - [node.network.libp2p] - listen_address = "{listen_addr3}" - node_addresses = ["{node_addra}"] - [node.network.libp2p.mdns] - enable = false - [node.network.metrics] - port = {metrics_port3} - [node.network.rpc] - port = {rpc_port3} - [node.network.webserver] - port = {ws_port3} - "# - ); - let config3 = make_config!(toml3); - - // Start a peer that will discover the registrant through the rendezvous server - let rendezvous_client2 = Command::new(BIN.as_os_str()) - .env( - "RUST_LOG", - "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", - ) - .arg("start") - .arg("-c") - .arg(config3.filename()) - .arg("--db") - .arg(&proc_info3.db_path) - .stdout(Stdio::piped()) - .spawn() - .unwrap(); - let proc_guard_client2 = ChildGuard::new(rendezvous_client2); - - if wait_for_socket_connection(ws_port3, 1000).is_err() { - panic!("Homestar server/runtime failed to start in time"); - } - - // Kill server and client one after five seconds - let _ = kill_homestar(proc_guard_server.take(), Some(Duration::from_secs(5))); - let _ = kill_homestar(proc_guard_client1.take(), Some(Duration::from_secs(5))); - - // Collect logs for seven seconds then kill process. - let dead_client2 = kill_homestar(proc_guard_client2.take(), Some(Duration::from_secs(7))); - - // Retrieve logs. - let stdout = retrieve_output(dead_client2); - - // Check that client two disconnected from client one. - let two_disconnected_from_one = check_for_line_with( - stdout.clone(), - vec!["peer connection closed", SECP256K1MULTIHASH], - ); - - // Check that client two was removed from the Kademlia table - let two_removed_from_dht_table = check_for_line_with( - stdout.clone(), - vec!["removed peer from kademlia table", SECP256K1MULTIHASH], - ); - - assert!(two_disconnected_from_one); - assert!(two_removed_from_dht_table); + tokio_test::block_on(async { + // Subscribe to rendezvous server + let mut net_events1 = subscribe_network_events(ws_port1).await; + let sub1 = net_events1.sub(); + + // Start a peer that will register with the rendezvous server + let rendezvous_client1 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") + .env( + "RUST_LOG", + "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", + ) + .arg("start") + .arg("-c") + .arg(config2.filename()) + .arg("--db") + .arg(&proc_info2.db_path) + .stdout(Stdio::piped()) + .spawn() + .unwrap(); + let proc_guard_client1 = ChildGuard::new(rendezvous_client1); + + if wait_for_socket_connection(ws_port2, 1000).is_err() { + panic!("Homestar server/runtime failed to start in time"); + } + + // Poll for server registered client one + loop { + if let Ok(msg) = sub1.next().with_timeout(Duration::from_secs(30)).await { + let json: serde_json::Value = + serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); + + if json["peer_registered_rendezvous"].is_object() + && json["peer_registered_rendezvous"]["peer_id"] == SECP256K1MULTIHASH + { + break; + } + } else { + panic!("Rendezvous server did not confirm client one registration in time"); + } + } + + // Start a peer that will discover the registrant through the rendezvous server + let toml3 = format!( + r#" + [node] + [node.network.keypair_config] + existing = {{ key_type = "ed25519", path = "./fixtures/__testkey_ed25519_2.pem" }} + [node.network.libp2p] + listen_address = "{listen_addr3}" + node_addresses = ["{node_addra}"] + [node.network.libp2p.mdns] + enable = false + [node.network.metrics] + port = {metrics_port3} + [node.network.rpc] + port = {rpc_port3} + [node.network.webserver] + port = {ws_port3} + "# + ); + let config3 = make_config!(toml3); + + let rendezvous_client2 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") + .env( + "RUST_LOG", + "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", + ) + .arg("start") + .arg("-c") + .arg(config3.filename()) + .arg("--db") + .arg(&proc_info3.db_path) + .stdout(Stdio::piped()) + .spawn() + .unwrap(); + let proc_guard_client2 = ChildGuard::new(rendezvous_client2); + + if wait_for_socket_connection(ws_port3, 1000).is_err() { + panic!("Homestar server/runtime failed to start in time"); + } + + // Subscribe to rendezvous client two + let mut net_events3 = subscribe_network_events(ws_port3).await; + let sub3 = net_events3.sub(); + + // Poll for discovery served by rendezvous server + loop { + if let Ok(msg) = sub1.next().with_timeout(Duration::from_secs(30)).await { + let json: serde_json::Value = + serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); + + if json["discover_served_rendezvous"].is_object() + && json["discover_served_rendezvous"]["enquirer"] == ED25519MULTIHASH2 + { + break; + } + } else { + panic!("Rendezvous server did not serve discovery to client two in time"); + } + } + + // Kill server and registrant. + let dead_server = kill_homestar(proc_guard_server.take(), None); + let _ = kill_homestar(proc_guard_client1.take(), None); + + // Poll for client two disconnected from client one. + loop { + if let Ok(msg) = sub3.next().with_timeout(Duration::from_secs(30)).await { + let json: serde_json::Value = + serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); + + if json["connection_closed"].is_object() + && json["connection_closed"]["peer_id"] == SECP256K1MULTIHASH + { + break; + } + } else { + panic!("Client two did not receive rendezvous discovery from server in time"); + } + } + + // Kill discoverer. + let dead_client2 = kill_homestar(proc_guard_client2.take(), None); + + // Retrieve logs. + let stdout_server = retrieve_output(dead_server); + let stdout_client2 = retrieve_output(dead_client2); + + // Check rendezvous server registered the client one + let registered_client_one = check_for_line_with( + stdout_server.clone(), + vec!["registered peer through rendezvous", SECP256K1MULTIHASH], + ); + + // Check rendezvous served a discover request to client two + let served_discovery_to_client_two = check_for_line_with( + stdout_server.clone(), + vec![ + "served rendezvous discover request to peer", + ED25519MULTIHASH2, + ], + ); + + assert!(registered_client_one); + assert!(served_discovery_to_client_two); + + // Check that client two connected to client one. + let two_connected_to_one = check_for_line_with( + stdout_client2.clone(), + vec!["peer connection established", SECP256K1MULTIHASH], + ); + + // Check client one was added to the Kademlia table + let one_addded_to_dht = check_for_line_with( + stdout_client2.clone(), + vec![ + "added identified node to kademlia routing table", + SECP256K1MULTIHASH, + ], + ); + + // Check that DHT routing table was updated with client one + let one_in_dht_routing_table = check_for_line_with( + stdout_client2.clone(), + vec![ + "kademlia routing table updated with peer", + SECP256K1MULTIHASH, + ], + ); + + assert!(one_addded_to_dht); + assert!(one_in_dht_routing_table); + assert!(two_connected_to_one); + + // Check that client two disconnected from client one. + let two_disconnected_from_one = check_for_line_with( + stdout_client2.clone(), + vec!["peer connection closed", SECP256K1MULTIHASH], + ); + + // Check that client two was removed from the Kademlia table + let two_removed_from_dht_table = check_for_line_with( + stdout_client2.clone(), + vec!["removed peer from kademlia table", SECP256K1MULTIHASH], + ); + + assert!(two_disconnected_from_one); + assert!(two_removed_from_dht_table); + }); Ok(()) } @@ -440,6 +338,7 @@ fn test_libp2p_rendezvous_renew_registration_integration() -> Result<()> { // Start a rendezvous server let rendezvous_server = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -480,53 +379,81 @@ fn test_libp2p_rendezvous_renew_registration_integration() -> Result<()> { ); let config2 = make_config!(toml2); - // Start a peer that will renew registrations with the rendezvous server once per second - let rendezvous_client1 = Command::new(BIN.as_os_str()) - .env( - "RUST_LOG", - "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", - ) - .arg("start") - .arg("-c") - .arg(config2.filename()) - .arg("--db") - .arg(&proc_info2.db_path) - .stdout(Stdio::piped()) - .spawn() - .unwrap(); - - if wait_for_socket_connection(ws_port2, 1000).is_err() { - panic!("Homestar server/runtime failed to start in time"); - } - - // Collect logs for five seconds then kill proceses. - let dead_server = kill_homestar(rendezvous_server, Some(Duration::from_secs(5))); - let dead_client = kill_homestar(rendezvous_client1, Some(Duration::from_secs(5))); - - // Retrieve logs. - let stdout_server = retrieve_output(dead_server); - let stdout_client = retrieve_output(dead_client); - - // Count registrations on the server - let server_registration_count = count_lines_where( - stdout_server, - vec![ - "registered peer through rendezvous", - "12D3KooWJWoaqZhDaoEFshF7Rh1bpY9ohihFhzcW6d69Lr2NASuq", - ], - ); - - // Count registrations on the client - let client_registration_count = count_lines_where( - stdout_client, - vec![ - "registered self with rendezvous node", - "12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN", - ], - ); - - assert!(server_registration_count > 1); - assert!(client_registration_count > 1); + tokio_test::block_on(async { + // Subscribe to rendezvous server + let mut net_events1 = subscribe_network_events(ws_port1).await; + let sub1 = net_events1.sub(); + + // Start a peer that will renew registrations with the rendezvous server once per second + let rendezvous_client1 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") + .env( + "RUST_LOG", + "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", + ) + .arg("start") + .arg("-c") + .arg(config2.filename()) + .arg("--db") + .arg(&proc_info2.db_path) + .stdout(Stdio::piped()) + .spawn() + .unwrap(); + + if wait_for_socket_connection(ws_port2, 1000).is_err() { + panic!("Homestar server/runtime failed to start in time"); + } + + // Poll for server registered client twice. + let mut peer_registered_count = 0; + loop { + if let Ok(msg) = sub1.next().with_timeout(Duration::from_secs(30)).await { + let json: serde_json::Value = + serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); + + if json["peer_registered_rendezvous"].is_object() + && json["peer_registered_rendezvous"]["peer_id"] == ED25519MULTIHASH3 + { + peer_registered_count += 1; + } + } else { + panic!("Server did not register client twice in time"); + } + + if peer_registered_count == 2 { + break; + } + } + + // Collect logs for five seconds then kill proceses. + let dead_server = kill_homestar(rendezvous_server, None); + let dead_client = kill_homestar(rendezvous_client1, None); + + // Retrieve logs. + let stdout_server = retrieve_output(dead_server); + let stdout_client = retrieve_output(dead_client); + + // Count registrations on the server + let server_registration_count = count_lines_where( + stdout_server, + vec![ + "registered peer through rendezvous", + "12D3KooWJWoaqZhDaoEFshF7Rh1bpY9ohihFhzcW6d69Lr2NASuq", + ], + ); + + // Count registrations on the client + let client_registration_count = count_lines_where( + stdout_client, + vec![ + "registered self with rendezvous node", + "12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN", + ], + ); + + assert!(server_registration_count > 1); + assert!(client_registration_count > 1); + }); Ok(()) } @@ -570,6 +497,7 @@ fn test_libp2p_rendezvous_rediscovery_integration() -> Result<()> { // Start a rendezvous server let rendezvous_server = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -591,6 +519,8 @@ fn test_libp2p_rendezvous_rediscovery_integration() -> Result<()> { let toml2 = format!( r#" [node] + [node.network] + poll_cache_interval = 100 [node.network.keypair_config] existing = {{ key_type = "ed25519", path = "./fixtures/__testkey_ed25519_4.pem" }} [node.network.libp2p] @@ -610,54 +540,82 @@ fn test_libp2p_rendezvous_rediscovery_integration() -> Result<()> { ); let config2 = make_config!(toml2); - // Start a peer that will discover with the rendezvous server once per second - let rendezvous_client1 = Command::new(BIN.as_os_str()) - .env( - "RUST_LOG", - "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", - ) - .arg("start") - .arg("-c") - .arg(config2.filename()) - .arg("--db") - .arg(&proc_info2.db_path) - .stdout(Stdio::piped()) - .spawn() - .unwrap(); - let proc_guard_client1 = ChildGuard::new(rendezvous_client1); - - if wait_for_socket_connection_v6(rpc_port2, 1000).is_err() { - panic!("Homestar server/runtime failed to start in time"); - } - - // Collect logs for five seconds then kill proceses. - let dead_server = kill_homestar(proc_guard_server.take(), Some(Duration::from_secs(5))); - let dead_client = kill_homestar(proc_guard_client1.take(), Some(Duration::from_secs(5))); - - // Retrieve logs. - let stdout_server = retrieve_output(dead_server); - let stdout_client = retrieve_output(dead_client); - - // Count discover requests on the server - let server_discovery_count = count_lines_where( - stdout_server, - vec![ - "served rendezvous discover request to peer", - ED25519MULTIHASH4, - ], - ); - - // Count discovery responses the client - let client_discovery_count = count_lines_where( - stdout_client, - vec![ - "received discovery from rendezvous server", - ED25519MULTIHASH, - ], - ); - - assert!(server_discovery_count > 1); - assert!(client_discovery_count > 1); + tokio_test::block_on(async { + // Subscribe to rendezvous server + let mut net_events1 = subscribe_network_events(ws_port1).await; + let sub1 = net_events1.sub(); + + // Start a peer that will discover with the rendezvous server once per second + let rendezvous_client1 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") + .env( + "RUST_LOG", + "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", + ) + .arg("start") + .arg("-c") + .arg(config2.filename()) + .arg("--db") + .arg(&proc_info2.db_path) + .stdout(Stdio::piped()) + .spawn() + .unwrap(); + let proc_guard_client1 = ChildGuard::new(rendezvous_client1); + + if wait_for_socket_connection_v6(rpc_port2, 1000).is_err() { + panic!("Homestar server/runtime failed to start in time"); + } + + // Poll for server provided discovery twice twice + let mut discover_served_count = 0; + loop { + if let Ok(msg) = sub1.next().with_timeout(Duration::from_secs(30)).await { + let json: serde_json::Value = + serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); + + if json["discover_served_rendezvous"].is_object() + && json["discover_served_rendezvous"]["enquirer"] == ED25519MULTIHASH4 + { + discover_served_count += 1; + } + } else { + panic!("Server did not provide discovery twice in time"); + } + + if discover_served_count == 2 { + break; + } + } + + // Collect logs for five seconds then kill proceses. + let dead_server = kill_homestar(proc_guard_server.take(), None); + let dead_client = kill_homestar(proc_guard_client1.take(), None); + + // Retrieve logs. + let stdout_server = retrieve_output(dead_server); + let stdout_client = retrieve_output(dead_client); + + // Count discover requests on the server + let server_discovery_count = count_lines_where( + stdout_server, + vec![ + "served rendezvous discover request to peer", + ED25519MULTIHASH4, + ], + ); + + // Count discovery responses the client + let client_discovery_count = count_lines_where( + stdout_client, + vec![ + "received discovery from rendezvous server", + ED25519MULTIHASH, + ], + ); + + assert!(server_discovery_count > 1); + assert!(client_discovery_count > 1); + }); Ok(()) } @@ -707,6 +665,7 @@ fn test_libp2p_rendezvous_rediscover_on_expiration_integration() -> Result<()> { // Start a rendezvous server let rendezvous_server = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .env( "RUST_LOG", "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", @@ -748,102 +707,143 @@ fn test_libp2p_rendezvous_rediscover_on_expiration_integration() -> Result<()> { ); let config2 = make_config!(toml2); - // Start a peer that will renew registrations with the rendezvous server every five seconds - let rendezvous_client1 = Command::new(BIN.as_os_str()) - .env( - "RUST_LOG", - "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", - ) - .arg("start") - .arg("-c") - .arg(config2.filename()) - .arg("--db") - .arg(&proc_info2.db_path) - .stdout(Stdio::piped()) - .spawn() - .unwrap(); - let proc_guard_client1 = ChildGuard::new(rendezvous_client1); - - if wait_for_socket_connection_v6(rpc_port2, 1000).is_err() { - panic!("Homestar server/runtime failed to start in time"); - } - - // Wait for registration to complete. - // TODO When we have WebSocket push events, listen on a registration event instead of using an arbitrary sleep. - thread::sleep(Duration::from_secs(2)); - - // Start a peer that will discover with the rendezvous server when - // a discovered registration expires. Note that by default discovery only - // occurs every ten minutes, so discovery requests in this test are driven - // by expirations. - let toml3 = format!( - r#" - [node] - [node.network.keypair_config] - existing = {{ key_type = "ed25519", path = "./fixtures/__testkey_ed25519_2.pem" }} - [node.network.libp2p] - listen_address = "{listen_addr3}" - node_addresses = ["{node_addra}"] - [node.network.libp2p.mdns] - enable = false - [node.network.metrics] - port = {metrics_port3} - [node.network.rpc] - port = {rpc_port3} - [node.network.webserver] - port = {ws_port3} - "# - ); - let config3 = make_config!(toml3); - - let rendezvous_client2 = Command::new(BIN.as_os_str()) - .env( - "RUST_LOG", - "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", - ) - .arg("start") - .arg("-c") - .arg(config3.filename()) - .arg("--db") - .arg(&proc_info3.db_path) - .stdout(Stdio::piped()) - .spawn() - .unwrap(); - let proc_guard_client2 = ChildGuard::new(rendezvous_client2); - - if wait_for_socket_connection(ws_port3, 1000).is_err() { - panic!("Homestar server/runtime failed to start in time"); - } - - // Collect logs for seven seconds then kill proceses. - let dead_server = kill_homestar(proc_guard_server.take(), Some(Duration::from_secs(7))); - let _ = kill_homestar(proc_guard_client1.take(), Some(Duration::from_secs(7))); - let dead_client2 = kill_homestar(proc_guard_client2.take(), Some(Duration::from_secs(7))); - - // Retrieve logs. - let stdout_server = retrieve_output(dead_server); - let stdout_client2 = retrieve_output(dead_client2); - - // Count discover requests on the server - let server_discovery_count = count_lines_where( - stdout_server, - vec![ - "served rendezvous discover request to peer", - "12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", - ], - ); - - // Count discovery responses the client - let client_discovery_count = count_lines_where( - stdout_client2, - vec![ - "received discovery from rendezvous server", - "12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN", - ], - ); - - assert!(server_discovery_count > 1); - assert!(client_discovery_count > 1); + tokio_test::block_on(async { + // Subscribe to rendezvous server + let mut net_events1 = subscribe_network_events(ws_port1).await; + let sub1 = net_events1.sub(); + + // Start a peer that will renew registrations with the rendezvous server every five seconds + let rendezvous_client1 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") + .env( + "RUST_LOG", + "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", + ) + .arg("start") + .arg("-c") + .arg(config2.filename()) + .arg("--db") + .arg(&proc_info2.db_path) + .stdout(Stdio::piped()) + .spawn() + .unwrap(); + let proc_guard_client1 = ChildGuard::new(rendezvous_client1); + + if wait_for_socket_connection_v6(rpc_port2, 1000).is_err() { + panic!("Homestar server/runtime failed to start in time"); + } + + // Poll for server registered client one the first time + loop { + if let Ok(msg) = sub1.next().with_timeout(Duration::from_secs(30)).await { + let json: serde_json::Value = + serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); + + if json["peer_registered_rendezvous"].is_object() + && json["peer_registered_rendezvous"]["peer_id"] == ED25519MULTIHASH5 + { + break; + } + } else { + panic!("Server did not receive registration from client one in time"); + } + } + + // Start a peer that will discover with the rendezvous server when + // a discovered registration expires. Note that by default discovery only + // occurs every ten minutes, so discovery requests in this test are driven + // by client one expirations. + let toml3 = format!( + r#" + [node] + [node.network.keypair_config] + existing = {{ key_type = "ed25519", path = "./fixtures/__testkey_ed25519_2.pem" }} + [node.network.libp2p] + listen_address = "{listen_addr3}" + node_addresses = ["{node_addra}"] + [node.network.libp2p.mdns] + enable = false + [node.network.metrics] + port = {metrics_port3} + [node.network.rpc] + port = {rpc_port3} + [node.network.webserver] + port = {ws_port3} + "# + ); + let config3 = make_config!(toml3); + + let rendezvous_client2 = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") + .env( + "RUST_LOG", + "homestar=debug,homestar_runtime=debug,libp2p=debug,libp2p_gossipsub::behaviour=debug", + ) + .arg("start") + .arg("-c") + .arg(config3.filename()) + .arg("--db") + .arg(&proc_info3.db_path) + .stdout(Stdio::piped()) + .spawn() + .unwrap(); + let proc_guard_client2 = ChildGuard::new(rendezvous_client2); + + if wait_for_socket_connection(ws_port3, 1000).is_err() { + panic!("Homestar server/runtime failed to start in time"); + } + + // Poll for discovery served to client two twice + let mut discovered_count = 0; + loop { + if let Ok(msg) = sub1.next().with_timeout(Duration::from_secs(30)).await { + let json: serde_json::Value = + serde_json::from_slice(&msg.unwrap().unwrap()).unwrap(); + + if json["discover_served_rendezvous"].is_object() + && json["discover_served_rendezvous"]["enquirer"] == ED25519MULTIHASH2 + { + discovered_count += 1; + } + } else { + panic!("Server did not serve discovery to client two twice in time"); + } + + if discovered_count == 2 { + break; + } + } + + // Collect logs for seven seconds then kill proceses. + let dead_server = kill_homestar(proc_guard_server.take(), None); + let _ = kill_homestar(proc_guard_client1.take(), None); + let dead_client2 = kill_homestar(proc_guard_client2.take(), None); + + // Retrieve logs. + let stdout_server = retrieve_output(dead_server); + let stdout_client2 = retrieve_output(dead_client2); + + // Count discover requests on the server + let server_discovery_count = count_lines_where( + stdout_server, + vec![ + "served rendezvous discover request to peer", + "12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5", + ], + ); + + // Count discovery responses the client + let client_discovery_count = count_lines_where( + stdout_client2, + vec![ + "received discovery from rendezvous server", + "12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN", + ], + ); + + assert!(server_discovery_count > 1); + assert!(client_discovery_count > 1); + }); Ok(()) } diff --git a/homestar-runtime/tests/utils.rs b/homestar-runtime/tests/utils.rs index a5618de9..2030c113 100644 --- a/homestar-runtime/tests/utils.rs +++ b/homestar-runtime/tests/utils.rs @@ -2,6 +2,12 @@ use anyhow::{bail, Context, Result}; #[cfg(not(windows))] use assert_cmd::prelude::*; use chrono::{DateTime, FixedOffset}; +#[cfg(feature = "websocket-notify")] +use jsonrpsee::{ + core::client::{Client, Subscription, SubscriptionClientT}, + rpc_params, + ws_client::WsClientBuilder, +}; #[cfg(not(windows))] use nix::{ sys::signal::{self, Signal}, @@ -35,12 +41,16 @@ pub(crate) const BIN_NAME: &str = "homestar"; /// Test-default ed25519 multihash. pub(crate) const ED25519MULTIHASH: &str = "12D3KooWDpJ7As7BWAwRMfu1VU2WCqNjvq387JEYKDBj4kx6nXTN"; /// Test-default ed25519 multihash 2. +#[cfg(feature = "websocket-notify")] pub(crate) const ED25519MULTIHASH2: &str = "12D3KooWK99VoVxNE7XzyBwXEzW7xhK7Gpv85r9F3V3fyKSUKPH5"; /// Test-default ed25519 multihash 3. +#[cfg(feature = "websocket-notify")] pub(crate) const ED25519MULTIHASH3: &str = "12D3KooWJWoaqZhDaoEFshF7Rh1bpY9ohihFhzcW6d69Lr2NASuq"; /// Test-default ed25519 multihash 4. +#[cfg(feature = "websocket-notify")] pub(crate) const ED25519MULTIHASH4: &str = "12D3KooWRndVhVZPCiQwHBBBdg769GyrPUW13zxwqQyf9r3ANaba"; /// Test-default ed25519 multihash 5. +#[cfg(feature = "websocket-notify")] pub(crate) const ED25519MULTIHASH5: &str = "12D3KooWPT98FXMfDQYavZm66EeVjTqP9Nnehn1gyaydqV8L8BQw"; /// Test-default secp256k1 multihash. pub(crate) const SECP256K1MULTIHASH: &str = "16Uiu2HAm3g9AomQNeEctL2hPwLapap7AtPSNt8ZrBny4rLx1W5Dc"; @@ -51,6 +61,7 @@ pub(crate) fn listen_addr(port: u16) -> String { } /// Return multiaddr address. +#[cfg(feature = "websocket-notify")] pub(crate) fn multiaddr(port: u16, hash: &str) -> String { format!("/ip4/127.0.0.1/tcp/{port}/p2p/{hash}") } @@ -216,6 +227,7 @@ pub(crate) fn check_for_line_with(output: String, predicates: Vec<&str>) -> bool .any(|curr| curr) } +#[cfg(feature = "websocket-notify")] pub(crate) fn count_lines_where(output: String, predicates: Vec<&str>) -> i32 { output.split('\n').fold(0, |count, line| { if line_contains(line, &predicates) { @@ -372,6 +384,50 @@ pub(crate) fn wait_for_socket_connection_v6(port: u16, exp_retry_base: u64) -> R result.map_or_else(|_| Err(()), |_| Ok(())) } +/// Client and subscription. +#[cfg(feature = "websocket-notify")] +pub(crate) struct WsClientSub { + #[allow(dead_code)] + client: Client, + sub: Subscription>, +} + +#[cfg(feature = "websocket-notify")] +impl WsClientSub { + pub(crate) fn sub(&mut self) -> &mut Subscription> { + &mut self.sub + } +} + +/// Helper function to subscribe to network events +/// Note that the client must not be dropped of the sub will return only None. +#[cfg(feature = "websocket-notify")] +pub(crate) async fn subscribe_network_events(ws_port: u16) -> WsClientSub { + const SUBSCRIBE_NETWORK_EVENTS_ENDPOINT: &str = "subscribe_network_events"; + const UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT: &str = "unsubscribe_network_events"; + + let ws_url = format!("ws://{}:{}", Ipv4Addr::LOCALHOST, ws_port); + tokio_tungstenite::connect_async(ws_url.clone()) + .await + .unwrap(); + + let client = WsClientBuilder::default() + .build(ws_url.clone()) + .await + .unwrap(); + + let sub: Subscription> = client + .subscribe( + SUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + rpc_params![], + UNSUBSCRIBE_NETWORK_EVENTS_ENDPOINT, + ) + .await + .unwrap(); + + WsClientSub { client, sub } +} + /// Helper extension trait which allows to limit execution time for the futures. /// It is helpful in tests to ensure that no future will ever get stuck forever. pub(crate) trait TimeoutFutureExt: Future + Sized { diff --git a/homestar-runtime/tests/webserver.rs b/homestar-runtime/tests/webserver.rs index d6d2ddc2..7b72b506 100644 --- a/homestar-runtime/tests/webserver.rs +++ b/homestar-runtime/tests/webserver.rs @@ -46,6 +46,7 @@ fn test_workflow_run_integration() -> Result<()> { let config = make_config!(toml); let homestar_proc = Command::new(BIN.as_os_str()) + .env("RUST_BACKTRACE", "0") .arg("start") .arg("-c") .arg(config.filename()) diff --git a/homestar-schemas/Cargo.toml b/homestar-schemas/Cargo.toml new file mode 100644 index 00000000..47cfccdd --- /dev/null +++ b/homestar-schemas/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "homestar-schemas" +publish = false +version = { workspace = true } +edition = { workspace = true } +rust-version = { workspace = true } + +[dependencies] +homestar-invocation = { version = "0.1", path = "../homestar-invocation", default-features = false } +homestar-runtime = { version = "0.1", path = "../homestar-runtime", default-features = false, features = [ + "websocket-notify", +] } +homestar-workflow = { version = "0.1", path = "../homestar-workflow", default-features = false } +homestar-workspace-hack = { workspace = true } +schemars = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } + +[[bin]] +name = "schemas" +path = "src/main.rs" +bench = false +doc = false +test = false + +[features] +default = [] + +[package.metadata.cargo-machete] +ignored = ["homestar-workspace-hack"] + +[package.metadata.cargo-udeps.ignore] +normal = ["homestar-workspace-hack"] diff --git a/homestar-schemas/src/main.rs b/homestar-schemas/src/main.rs new file mode 100644 index 00000000..fcc30ac5 --- /dev/null +++ b/homestar-schemas/src/main.rs @@ -0,0 +1,343 @@ +//! Binary to generate OpenRPC API docs and JSON Schemas. + +use homestar_invocation::Receipt; +use homestar_runtime::{ + Health, NetworkNotification, NodeInfo, PrometheusData, ReceiptNotification, +}; +use homestar_workflow::Workflow; +use schemars::{ + schema::{RootSchema, SchemaObject}, + schema_for, +}; +use std::{fs, io::Write}; +mod openrpc; +use openrpc::document::{ + ContactObject, ContentDescriptorObject, ContentDescriptorOrReference, + ExternalDocumentationObject, InfoObject, JSONSchema, LicenseObject, MethodObject, + MethodObjectParamStructure, Openrpc, OpenrpcDocument, +}; + +fn main() { + fn schema_path(name: &str) -> String { + format!( + "{}/../homestar-runtime/schemas/{}", + env!("CARGO_MANIFEST_DIR"), + name + ) + } + + let health_schema = schema_for!(Health); + let _ = fs::File::create(schema_path("health.json")) + .unwrap() + .write_all(&serde_json::to_vec_pretty(&health_schema).unwrap()); + + let metrics_schema = schema_for!(PrometheusData); + let _ = fs::File::create(schema_path("metrics.json")) + .unwrap() + .write_all(&serde_json::to_vec_pretty(&metrics_schema).unwrap()); + + let node_info_schema = schema_for!(NodeInfo); + let _ = fs::File::create(schema_path("node_info.json")) + .unwrap() + .write_all(&serde_json::to_vec_pretty(&node_info_schema).unwrap()); + + let network_schema = schema_for!(NetworkNotification); + let _ = fs::File::create(schema_path("network.json")) + .unwrap() + .write_all(&serde_json::to_vec_pretty(&network_schema).unwrap()); + + let workflow_schema = schema_for!(Workflow<'static, ()>); + let _ = fs::File::create(schema_path("workflow.json")) + .unwrap() + .write_all(&serde_json::to_vec_pretty(&workflow_schema).unwrap()); + + let receipt_schema = schema_for!(Receipt<()>); + let _ = fs::File::create(schema_path("receipt.json")) + .unwrap() + .write_all(&serde_json::to_vec_pretty(&receipt_schema).unwrap()); + + let receipt_notification_schema = schema_for!(ReceiptNotification); + let _ = fs::File::create(schema_path("receipt_notification.json")) + .unwrap() + .write_all(&serde_json::to_vec_pretty(&receipt_notification_schema).unwrap()); + + let api_doc = generate_api_doc( + health_schema, + metrics_schema, + node_info_schema, + network_schema, + workflow_schema, + receipt_notification_schema, + ); + let _ = fs::File::create(schema_path("api.json")) + .unwrap() + .write_all(&serde_json::to_vec_pretty(&api_doc).unwrap()); +} + +// Spec: https://github.com/open-rpc/spec/blob/1.2.6/spec.md +fn generate_api_doc( + health_schema: RootSchema, + metrics_schema: RootSchema, + node_info_schema: RootSchema, + network_schema: RootSchema, + workflow_schema: RootSchema, + receipt_notification_schema: RootSchema, +) -> OpenrpcDocument { + let discover: MethodObject = MethodObject { + name: "rpc.discover".to_string(), + description: Some("OpenRPC schema as a description of this service".to_string()), + summary: None, + servers: None, + tags: None, + param_structure: Some(MethodObjectParamStructure::Either), + params: vec![], + result: ContentDescriptorOrReference::ContentDescriptorObject(ContentDescriptorObject { + name: "OpenRPC Schema".to_string(), + summary: None, + description: None, + required: Some(true), + schema: JSONSchema::JsonSchemaObject(RootSchema { + schema: SchemaObject { + reference: Some("https://github.com/ipvm-wg/homestar/blob/main/homestar-runtime/schemas/docs/api.json".to_string()), + ..Default::default() + }, + ..Default::default() + }), + deprecated: Some(false), + }), + external_docs: None, + errors: None, + links: None, + examples: None, + deprecated: Some(false), + x_messages: None, + }; + + let health: MethodObject = MethodObject { + name: "health".to_string(), + description: None, + summary: None, + servers: None, + tags: None, + param_structure: Some(MethodObjectParamStructure::Either), + params: vec![], + result: ContentDescriptorOrReference::ContentDescriptorObject(ContentDescriptorObject { + name: "health".to_string(), + summary: None, + description: None, + required: Some(true), + schema: JSONSchema::JsonSchemaObject(health_schema), + deprecated: Some(false), + }), + external_docs: None, + errors: None, + links: None, + examples: None, + deprecated: Some(false), + x_messages: None, + }; + + let metrics: MethodObject = MethodObject { + name: "metrics".to_string(), + description: None, + summary: None, + servers: None, + tags: None, + param_structure: Some(MethodObjectParamStructure::Either), + params: vec![], + result: ContentDescriptorOrReference::ContentDescriptorObject(ContentDescriptorObject { + name: "metrics".to_string(), + summary: None, + description: None, + required: Some(true), + schema: JSONSchema::JsonSchemaObject(metrics_schema), + deprecated: Some(false), + }), + external_docs: None, + errors: None, + links: None, + examples: None, + deprecated: Some(false), + x_messages: None, + }; + + let node_info: MethodObject = MethodObject { + name: "node".to_string(), + description: None, + summary: None, + servers: None, + tags: None, + param_structure: Some(MethodObjectParamStructure::Either), + params: vec![], + result: ContentDescriptorOrReference::ContentDescriptorObject(ContentDescriptorObject { + name: "node_info".to_string(), + summary: None, + description: None, + required: Some(true), + schema: JSONSchema::JsonSchemaObject(node_info_schema), + deprecated: Some(false), + }), + external_docs: None, + errors: None, + links: None, + examples: None, + deprecated: Some(false), + x_messages: None, + }; + + let network: MethodObject = MethodObject { + name: "subscribe_network_events".to_string(), + description: None, + summary: None, + servers: None, + tags: None, + param_structure: Some(MethodObjectParamStructure::Either), + params: vec![], + result: ContentDescriptorOrReference::ContentDescriptorObject(ContentDescriptorObject { + name: "subscription_id".to_string(), + summary: None, + description: None, + required: Some(true), + schema: JSONSchema::JsonSchemaObject(schema_for!(String)), + deprecated: Some(false), + }), + external_docs: None, + errors: None, + links: None, + examples: None, + deprecated: Some(false), + x_messages: Some(ContentDescriptorObject { + name: "network subscription messages".to_string(), + summary: None, + description: None, + required: Some(true), + schema: JSONSchema::JsonSchemaObject(network_schema), + deprecated: Some(false), + }), + }; + + let network_unsubscribe: MethodObject = MethodObject { + name: "unsubscribe_network_events".to_string(), + description: None, + summary: None, + servers: None, + tags: None, + param_structure: Some(MethodObjectParamStructure::Either), + params: vec![], + result: ContentDescriptorOrReference::ContentDescriptorObject(ContentDescriptorObject { + name: "unsubscribe result".to_string(), + summary: None, + description: None, + required: Some(true), + schema: JSONSchema::JsonSchemaObject(schema_for!(bool)), + deprecated: Some(false), + }), + external_docs: None, + errors: None, + links: None, + examples: None, + deprecated: Some(false), + x_messages: None, + }; + + let workflow: MethodObject = MethodObject { + name: "subscribe_run_workflow".to_string(), + description: None, + summary: None, + servers: None, + tags: None, + param_structure: Some(MethodObjectParamStructure::ByName), + params: vec![ContentDescriptorOrReference::ContentDescriptorObject( + ContentDescriptorObject { + name: "tasks".to_string(), + summary: None, + description: None, + required: Some(true), + schema: JSONSchema::JsonSchemaObject(workflow_schema), + deprecated: Some(false), + }, + )], + result: ContentDescriptorOrReference::ContentDescriptorObject(ContentDescriptorObject { + name: "subscription_id".to_string(), + summary: None, + description: None, + required: Some(true), + schema: JSONSchema::JsonSchemaObject(schema_for!(String)), + deprecated: Some(false), + }), + external_docs: None, + errors: None, + links: None, + examples: None, + deprecated: Some(false), + x_messages: Some(ContentDescriptorObject { + name: "workflow subscription messages".to_string(), + summary: Some("receipt notifications from a running workflow".to_string()), + description: None, + required: Some(true), + schema: JSONSchema::JsonSchemaObject(receipt_notification_schema), + deprecated: Some(false), + }), + }; + + let workflow_unsubscribe: MethodObject = MethodObject { + name: "unsubscribe_run_workflow".to_string(), + description: None, + summary: None, + servers: None, + tags: None, + param_structure: Some(MethodObjectParamStructure::Either), + params: vec![], + result: ContentDescriptorOrReference::ContentDescriptorObject(ContentDescriptorObject { + name: "unsubscribe result".to_string(), + summary: None, + description: None, + required: Some(true), + schema: JSONSchema::JsonSchemaObject(schema_for!(bool)), + deprecated: Some(false), + }), + external_docs: None, + errors: None, + links: None, + examples: None, + deprecated: Some(false), + x_messages: None, + }; + + OpenrpcDocument { + openrpc: Openrpc::V26, + info: InfoObject { + title: "homestar".to_string(), + description: Some(env!("CARGO_PKG_DESCRIPTION").into()), + terms_of_service: None, + // Version is tied to workspace, but use homestar-runtime version + // in the future. + version: env!("CARGO_PKG_VERSION").into(), + contact: Some(ContactObject { + name: None, + url: Some(env!("CARGO_PKG_REPOSITORY").into()), + email: None, + }), + license: Some(LicenseObject { + name: Some(env!("CARGO_PKG_LICENSE").into()), + url: None, + }), + }, + external_docs: Some(ExternalDocumentationObject { + description: None, + url: "https://docs.everywhere.computer/homestar/what-is-homestar/".to_string(), + }), + servers: None, + methods: vec![ + discover, + health, + metrics, + node_info, + network, + network_unsubscribe, + workflow, + workflow_unsubscribe, + ], + components: None, + } +} diff --git a/homestar-schemas/src/openrpc/document.rs b/homestar-schemas/src/openrpc/document.rs new file mode 100644 index 00000000..d4585fc8 --- /dev/null +++ b/homestar-schemas/src/openrpc/document.rs @@ -0,0 +1,645 @@ +#![allow(dead_code)] + +//! OpenRPC API document generator +//! +//! OpenRPC spec: https://github.com/open-rpc/spec +//! Module adapted from: https://github.com/austbot/rust-open-rpc-macros/tree/master/open-rpc-schema + +use schemars::{gen::SchemaSettings, schema::RootSchema, JsonSchema}; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; + +extern crate serde_json; + +#[derive(Serialize, Deserialize, Clone)] +pub enum Openrpc { + #[serde(rename = "1.2.6")] + V26, + #[serde(rename = "1.2.5")] + V25, + #[serde(rename = "1.2.4")] + V24, + #[serde(rename = "1.2.3")] + V23, + #[serde(rename = "1.2.2")] + V22, + #[serde(rename = "1.2.1")] + V21, + #[serde(rename = "1.2.0")] + V20, + #[serde(rename = "1.1.12")] + V112, + #[serde(rename = "1.1.11")] + V111, + #[serde(rename = "1.1.10")] + V110, + #[serde(rename = "1.1.9")] + V19, + #[serde(rename = "1.1.8")] + V18, + #[serde(rename = "1.1.7")] + V17, + #[serde(rename = "1.1.6")] + V16, + #[serde(rename = "1.1.5")] + V15, + #[serde(rename = "1.1.4")] + V14, + #[serde(rename = "1.1.3")] + V13, + #[serde(rename = "1.1.2")] + V12, + #[serde(rename = "1.1.1")] + V11, + #[serde(rename = "1.1.0")] + V10, + #[serde(rename = "1.0.0")] + V00, + #[serde(rename = "1.0.0-rc1")] + V00Rc1, + #[serde(rename = "1.0.0-rc0")] + V00Rc0, +} + +pub type InfoObjectProperties = String; +pub type InfoObjectDescription = String; +pub type InfoObjectTermsOfService = String; +pub type InfoObjectVersion = String; +pub type ContactObjectName = String; +pub type ContactObjectEmail = String; +pub type ContactObjectUrl = String; +pub type SpecificationExtension = serde_json::Value; + +#[derive(Serialize, Deserialize, Clone)] +pub struct ContactObject { + pub name: Option, + pub email: Option, + pub url: Option, +} + +pub type LicenseObjectName = String; +pub type LicenseObjectUrl = String; + +#[derive(Serialize, Deserialize, Clone)] +pub struct LicenseObject { + pub name: Option, + pub url: Option, +} + +#[derive(Serialize, Deserialize, Clone)] +pub struct InfoObject { + pub title: InfoObjectProperties, + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + #[serde(rename = "termsOfService")] + #[serde(skip_serializing_if = "Option::is_none")] + pub terms_of_service: Option, + pub version: InfoObjectVersion, + #[serde(skip_serializing_if = "Option::is_none")] + pub contact: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub license: Option, +} + +pub type ExternalDocumentationObjectDescription = String; +pub type ExternalDocumentationObjectUrl = String; + +/// ExternalDocumentationObject +/// +/// information about external documentation +/// +#[derive(Serialize, Deserialize, Clone)] +pub struct ExternalDocumentationObject { + pub description: Option, + pub url: ExternalDocumentationObjectUrl, +} + +pub type ServerObjectUrl = String; +pub type ServerObjectName = String; +pub type ServerObjectDescription = String; +pub type ServerObjectSummary = String; +pub type ServerObjectVariableDefault = String; +pub type ServerObjectVariableDescription = String; +pub type ServerObjectVariableEnumItem = String; +pub type ServerObjectVariableEnum = Vec; + +#[derive(Serialize, Deserialize, Clone)] +pub struct ServerObjectVariable { + pub default: ServerObjectVariableDefault, + pub description: Option, + #[serde(rename = "enum")] + pub variable_enum: Option, +} + +pub type ServerObjectVariables = HashMap>; + +#[derive(Serialize, Deserialize, Clone)] +pub struct ServerObject { + pub url: ServerObjectUrl, + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub summary: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub variables: Option, +} + +pub type Servers = Vec; +/// MethodObjectName +/// +/// The cannonical name for the method. The name MUST be unique within the methods array. +/// +pub type MethodObjectName = String; +/// MethodObjectDescription +/// +/// A verbose explanation of the method behavior. GitHub Flavored Markdown syntax MAY be used for rich text representation. +/// +pub type MethodObjectDescription = String; +/// MethodObjectSummary +/// +/// A short summary of what the method does. +/// +pub type MethodObjectSummary = String; +pub type TagObjectName = String; +pub type TagObjectDescription = String; + +#[derive(Serialize, Deserialize, Clone)] +pub struct TagObject { + pub name: TagObjectName, + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + #[serde(rename = "externalDocs")] + #[serde(skip_serializing_if = "Option::is_none")] + pub external_docs: Option, +} + +#[derive(Serialize, Deserialize, Clone)] +pub struct ReferenceObject { + #[serde(rename = "$ref")] + pub reference: String, +} + +#[derive(Serialize, Deserialize, Clone)] +#[serde(untagged)] +pub enum TagOrReference { + TagObject(TagObject), + ReferenceObject(ReferenceObject), +} + +pub type MethodObjectTags = Vec; + +/// MethodObjectParamStructure +/// +/// Format the server expects the params. Defaults to 'either'. +/// +/// # Default +/// +/// either +/// +#[derive(Serialize, Deserialize, Clone)] +pub enum MethodObjectParamStructure { + #[serde(rename = "by-position")] + ByPosition, + #[serde(rename = "by-name")] + ByName, + #[serde(rename = "either")] + Either, +} + +pub type ContentDescriptorObjectName = String; +pub type ContentDescriptorObjectDescription = String; +pub type ContentDescriptorObjectSummary = String; +pub type Id = String; +pub type Schema = String; +pub type Comment = String; +pub type Title = String; +pub type Description = String; +type AlwaysTrue = serde_json::Value; +pub type ReadOnly = bool; +pub type Examples = Vec; +pub type MultipleOf = f64; +pub type Maximum = f64; +pub type ExclusiveMaximum = f64; +pub type Minimum = f64; +pub type ExclusiveMinimum = f64; +pub type NonNegativeInteger = i64; +pub type NonNegativeIntegerDefaultZero = i64; +pub type Pattern = String; +pub type SchemaArray = Vec; + +#[derive(Serialize, Deserialize, Clone)] +#[serde(untagged)] +pub enum Items { + JSONSchema(JSONSchema), + SchemaArray(SchemaArray), +} + +pub type UniqueItems = bool; +pub type StringDoaGddGA = String; +/// StringArray +/// +/// # Default +/// +/// [] +/// +pub type StringArray = Vec; +/// Definitions +/// +/// # Default +/// +/// {} +/// +pub type Definitions = HashMap>; +/// Properties +/// +/// # Default +/// +/// {} +/// +pub type Properties = HashMap>; +/// PatternProperties +/// +/// # Default +/// +/// {} +/// +pub type PatternProperties = HashMap>; + +#[derive(Serialize, Deserialize, Clone)] +#[serde(untagged)] +pub enum DependenciesSet { + JSONSchema(JSONSchema), + StringArray(StringArray), +} + +pub type Dependencies = HashMap>; +pub type Enum = Vec; +pub type SimpleTypes = serde_json::Value; +pub type ArrayOfSimpleTypes = Vec; + +#[derive(Serialize, Deserialize, Clone)] +#[serde(untagged)] +pub enum Type { + SimpleTypes(SimpleTypes), + ArrayOfSimpleTypes(ArrayOfSimpleTypes), +} + +pub type Format = String; +pub type ContentMediaType = String; +pub type ContentEncoding = String; + +/// JSONSchemaBoolean +/// +/// Always valid if true. Never valid if false. Is constant. +/// +#[derive(Serialize, Deserialize, Clone)] +#[serde(untagged)] +pub enum JSONSchema { + JsonSchemaObject(RootSchema), + JSONSchemaBoolean(bool), +} + +pub type ContentDescriptorObjectRequired = bool; +pub type ContentDescriptorObjectDeprecated = bool; + +#[derive(Serialize, Deserialize, Clone)] +pub struct ContentDescriptorObject { + pub name: ContentDescriptorObjectName, + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub summary: Option, + pub schema: JSONSchema, + #[serde(skip_serializing_if = "Option::is_none")] + pub required: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub deprecated: Option, +} + +#[derive(Serialize, Deserialize, Clone)] +#[serde(untagged)] +pub enum ContentDescriptorOrReference { + ContentDescriptorObject(ContentDescriptorObject), + ReferenceObject(ReferenceObject), +} + +pub type MethodObjectParams = Vec; + +/// ErrorObjectCode +/// +/// A Number that indicates the error type that occurred. This MUST be an integer. The error codes from and including -32768 to -32000 are reserved for pre-defined errors. These pre-defined errors SHOULD be assumed to be returned from any JSON-RPC api. +/// +pub type ErrorObjectCode = i64; +/// ErrorObjectMessage +/// +/// A String providing a short description of the error. The message SHOULD be limited to a concise single sentence. +/// +pub type ErrorObjectMessage = String; +/// ErrorObjectData +/// +/// A Primitive or Structured value that contains additional information about the error. This may be omitted. The value of this member is defined by the Server (e.g. detailed error information, nested errors etc.). +/// +pub type ErrorObjectData = serde_json::Value; + +/// ErrorObject +/// +/// Defines an application level error. +/// +#[derive(Serialize, Deserialize, Clone)] +pub struct ErrorObject { + pub code: ErrorObjectCode, + pub message: ErrorObjectMessage, + #[serde(skip_serializing_if = "Option::is_none")] + pub data: Option, +} + +#[derive(Serialize, Deserialize, Clone)] +#[serde(untagged)] +pub enum ErrorOrReference { + ErrorObject(ErrorObject), + ReferenceObject(ReferenceObject), +} + +/// MethodObjectErrors +/// +/// Defines an application level error. +/// +pub type MethodObjectErrors = Vec; +pub type LinkObjectName = String; +pub type LinkObjectSummary = String; +pub type LinkObjectMethod = String; +pub type LinkObjectDescription = String; +pub type LinkObjectParams = serde_json::Value; + +#[derive(Serialize, Deserialize, Clone)] +pub struct LinkObjectServer { + pub url: ServerObjectUrl, + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub summary: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub variables: Option, +} + +#[derive(Serialize, Deserialize, Clone)] +pub struct LinkObject { + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub summary: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub method: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub params: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub server: Option, +} + +#[derive(Serialize, Deserialize, Clone)] +#[serde(untagged)] +pub enum LinkOrReference { + LinkObject(LinkObject), + ReferenceObject(ReferenceObject), +} + +pub type MethodObjectLinks = Vec; +pub type ExamplePairingObjectName = String; +pub type ExamplePairingObjectDescription = String; +pub type ExampleObjectSummary = String; +pub type ExampleObjectValue = serde_json::Value; +pub type ExampleObjectDescription = String; +pub type ExampleObjectName = String; + +#[derive(Serialize, Deserialize, Clone)] +pub struct ExampleObject { + #[serde(skip_serializing_if = "Option::is_none")] + pub summary: Option, + pub value: ExampleObjectValue, + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + pub name: ExampleObjectName, +} + +#[derive(Serialize, Deserialize, Clone)] +#[serde(untagged)] +pub enum ExampleOrReference { + ExampleObject(ExampleObject), + ReferenceObject(ReferenceObject), +} + +pub type ExamplePairingObjectParams = Vec; + +#[derive(Serialize, Deserialize, Clone)] +#[serde(untagged)] +pub enum ExamplePairingObjectResult { + ExampleObject(ExampleObject), + ReferenceObject(ReferenceObject), +} + +#[derive(Serialize, Deserialize, Clone)] +pub struct ExamplePairingObject { + pub name: ExamplePairingObjectName, + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + pub params: ExamplePairingObjectParams, + pub result: ExamplePairingObjectResult, +} + +#[derive(Serialize, Deserialize, Clone)] +#[serde(untagged)] +pub enum ExamplePairingOrReference { + ExampleObject(ExampleObject), + ReferenceObject(ReferenceObject), +} + +pub type MethodObjectExamples = Vec; +pub type MethodObjectDeprecated = bool; + +#[derive(Serialize, Deserialize, Clone)] +pub struct MethodObject { + pub name: MethodObjectName, + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub summary: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub servers: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option, + #[serde(rename = "paramStructure")] + #[serde(skip_serializing_if = "Option::is_none")] + pub param_structure: Option, + pub params: MethodObjectParams, + pub result: ContentDescriptorOrReference, + #[serde(skip_serializing_if = "Option::is_none")] + pub errors: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub links: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub examples: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub deprecated: Option, + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(rename = "externalDocs")] + pub external_docs: Option, + #[serde(skip_serializing_if = "Option::is_none")] + #[serde(rename = "x-messages")] + pub x_messages: Option, +} + +pub type Methods = Vec; +pub type SchemaComponents = HashMap>; +pub type LinkComponents = HashMap>; +pub type ErrorComponents = HashMap>; +pub type ExampleComponents = HashMap>; +pub type ExamplePairingComponents = HashMap>; +pub type ContentDescriptorComponents = HashMap>; +pub type TagComponents = HashMap>; + +#[derive(Serialize, Deserialize, Clone)] +pub struct Components { + #[serde(skip_serializing_if = "Option::is_none")] + pub schemas: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub links: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub errors: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub examples: Option, + #[serde(rename = "examplePairings")] + pub example_pairings: Option, + #[serde(rename = "contentDescriptors")] + #[serde(skip_serializing_if = "Option::is_none")] + pub content_descriptors: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub tags: Option, +} + +#[derive(Serialize, Deserialize, Clone)] +pub struct OpenrpcDocument { + pub openrpc: Openrpc, + pub info: InfoObject, + #[serde(rename = "externalDocs")] + #[serde(skip_serializing_if = "Option::is_none")] + pub external_docs: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub servers: Option, + pub methods: Methods, + #[serde(skip_serializing_if = "Option::is_none")] + pub components: Option, +} + +impl Default for OpenrpcDocument { + fn default() -> Self { + OpenrpcDocument { + openrpc: Openrpc::V26, + info: InfoObject { + title: "".to_string(), + description: None, + terms_of_service: None, + version: "".to_string(), + contact: None, + license: None, + }, + external_docs: None, + servers: None, + methods: vec![], + components: None, + } + } +} + +impl OpenrpcDocument { + pub fn set_info(mut self, info: InfoObject) -> Self { + self.info = info; + self + } + pub fn add_object_method(&mut self, method: MethodObject) { + self.methods.push(method) + } +} + +impl ContentDescriptorOrReference { + pub fn new_content_descriptor( + name: ContactObjectName, + description: Option, + ) -> Self { + let mut setting = SchemaSettings::draft07(); + setting.inline_subschemas = true; + let schema = schemars::gen::SchemaGenerator::new(setting).into_root_schema_for::(); + let json_schema = JSONSchema::JsonSchemaObject(schema); + ContentDescriptorOrReference::ContentDescriptorObject(ContentDescriptorObject { + name, + description, + summary: None, + schema: json_schema, + required: None, + deprecated: None, + }) + } +} + +impl MethodObject { + pub fn new(name: MethodObjectName, description: Option) -> Self { + Self { + name, + description, + summary: None, + servers: None, + tags: None, + param_structure: None, + params: vec![], + result: ContentDescriptorOrReference::ReferenceObject(ReferenceObject { + reference: "".to_string(), + }), + errors: None, + links: None, + examples: None, + deprecated: None, + external_docs: None, + x_messages: None, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + #[derive(JsonSchema)] + pub struct MyType([u8; 8]); + + #[derive(JsonSchema)] + pub struct MyParam { + pub my_int: i32, + pub my_bool: bool, + pub my_type: Box, + } + + #[derive(JsonSchema)] + pub struct MyRet { + pub success: Box, + } + + #[test] + fn test_openrpc_document() { + let mut document = OpenrpcDocument::default(); + let mut method = MethodObject::new("method1".to_string(), None); + let param = ContentDescriptorOrReference::new_content_descriptor::( + "first_param".to_string(), + Some("no desc".to_string()), + ); + method.params.push(param); + method.result = + ContentDescriptorOrReference::new_content_descriptor::("ret".to_string(), None); + document.add_object_method(method); + let j = serde_json::to_string_pretty(&document).unwrap(); + println!("{}", j); + } +} diff --git a/homestar-schemas/src/openrpc/mod.rs b/homestar-schemas/src/openrpc/mod.rs new file mode 100644 index 00000000..ef6324cb --- /dev/null +++ b/homestar-schemas/src/openrpc/mod.rs @@ -0,0 +1 @@ +pub(crate) mod document; diff --git a/homestar-workflow/Cargo.toml b/homestar-workflow/Cargo.toml index 53dccd64..32228129 100644 --- a/homestar-workflow/Cargo.toml +++ b/homestar-workflow/Cargo.toml @@ -23,6 +23,7 @@ homestar-invocation = { version = "0.1", path = "../homestar-invocation" } homestar-workspace-hack = { workspace = true } indexmap = { version = "2.1", default-features = false } libipld = { workspace = true } +schemars = { workspace = true } serde = { workspace = true } thiserror = { workspace = true } diff --git a/homestar-workflow/src/workflow.rs b/homestar-workflow/src/workflow.rs index 2d44175c..b50d47d9 100644 --- a/homestar-workflow/src/workflow.rs +++ b/homestar-workflow/src/workflow.rs @@ -9,6 +9,7 @@ use homestar_invocation::{ Task, Unit, }; use libipld::{serde::from_ipld, Ipld}; +use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use std::collections::BTreeMap; @@ -17,7 +18,8 @@ const TASKS_KEY: &str = "tasks"; /// Workflow composed of [tasks]. /// /// [tasks]: Task -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Clone, JsonSchema, PartialEq, Serialize, Deserialize)] +#[schemars(title = "Workflow", description = "Workflow composed of tasks")] pub struct Workflow<'a, T> { tasks: Vec>, }