From 473c462ab1d7911b8288b99238f98df6d3751702 Mon Sep 17 00:00:00 2001 From: Feliciss <10203-feliciss@users.noreply.0xacab.org> Date: Wed, 5 Jul 2023 14:23:55 -0600 Subject: [PATCH 1/3] [config] add ws config --- crates/rooch-config/src/lib.rs | 1 + crates/rooch-config/src/rpc/server_config.rs | 4 +- crates/rooch-config/src/ws/mod.rs | 4 ++ crates/rooch-config/src/ws/relay_config.rs | 44 ++++++++++++++++++++ crates/rooch-rpc-client/src/client_config.rs | 7 ++-- crates/rooch/src/commands/init.rs | 13 +++--- 6 files changed, 62 insertions(+), 11 deletions(-) create mode 100644 crates/rooch-config/src/ws/mod.rs create mode 100644 crates/rooch-config/src/ws/relay_config.rs diff --git a/crates/rooch-config/src/lib.rs b/crates/rooch-config/src/lib.rs index 310b6b7fa4..93ecd6bd42 100644 --- a/crates/rooch-config/src/lib.rs +++ b/crates/rooch-config/src/lib.rs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 pub mod rpc; +pub mod ws; use anyhow::{Context, Result}; use serde::{de::DeserializeOwned, Serialize}; diff --git a/crates/rooch-config/src/rpc/server_config.rs b/crates/rooch-config/src/rpc/server_config.rs index 78b3bcfcff..bae7b34991 100644 --- a/crates/rooch-config/src/rpc/server_config.rs +++ b/crates/rooch-config/src/rpc/server_config.rs @@ -16,7 +16,7 @@ pub struct ServerConfig { } impl ServerConfig { - pub fn url(&self, https: bool) -> String { + pub fn rpc_url(&self, https: bool) -> String { let schema = if https { "https" } else { "http" }; format!("{}://{}:{}", schema, self.host, self.port) @@ -37,7 +37,7 @@ impl Display for ServerConfig { impl Default for ServerConfig { fn default() -> Self { Self { - host: "0.0.0.0".to_string(), + host: "0.0.0.0".to_owned(), port: 50051, proposer_address: None, sequencer_address: None, diff --git a/crates/rooch-config/src/ws/mod.rs b/crates/rooch-config/src/ws/mod.rs new file mode 100644 index 0000000000..a0a1b9904a --- /dev/null +++ b/crates/rooch-config/src/ws/mod.rs @@ -0,0 +1,4 @@ +// Copyright (c) RoochNetwork +// SPDX-License-Identifier: Apache-2.0 + +pub mod relay_config; diff --git a/crates/rooch-config/src/ws/relay_config.rs b/crates/rooch-config/src/ws/relay_config.rs new file mode 100644 index 0000000000..30f6ab78ca --- /dev/null +++ b/crates/rooch-config/src/ws/relay_config.rs @@ -0,0 +1,44 @@ +// Copyright (c) RoochNetwork +// SPDX-License-Identifier: Apache-2.0 + +use serde::Deserialize; +use serde::Serialize; +use std::fmt::{Display, Formatter, Result, Write}; + +#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq, Hash)] +pub struct RelayConfig { + pub host: String, + pub port: u16, + pub remote_ip_header: Option, + pub ping_interval_seconds: u32, +} + +impl RelayConfig { + pub fn ws_url(&self, https: bool) -> String { + let schema = if https { "wss" } else { "ws" }; + + format!("{}://{}:{}", schema, self.host, self.port) + } +} + +impl Display for RelayConfig { + fn fmt(&self, f: &mut Formatter<'_>) -> Result { + let mut writer = String::new(); + + writeln!(writer, "host : {}", self.host)?; + writeln!(writer, "port : {}", self.port)?; + + write!(f, "{}", writer) + } +} + +impl Default for RelayConfig { + fn default() -> Self { + Self { + host: "0.0.0.0".to_owned(), + port: 8080, + remote_ip_header: None, + ping_interval_seconds: 300, + } + } +} diff --git a/crates/rooch-rpc-client/src/client_config.rs b/crates/rooch-rpc-client/src/client_config.rs index 5bd5ad92bb..a72cc12904 100644 --- a/crates/rooch-rpc-client/src/client_config.rs +++ b/crates/rooch-rpc-client/src/client_config.rs @@ -4,6 +4,7 @@ use crate::{Client, ClientBuilder}; use anyhow::anyhow; use rooch_config::rpc::server_config::ServerConfig; +use rooch_config::ws::relay_config::RelayConfig; use rooch_config::Config; use rooch_key::keystore::{AccountKeystore, Keystore}; use rooch_types::address::RoochAddress; @@ -91,9 +92,9 @@ impl Env { impl Default for Env { fn default() -> Self { Env { - alias: "default".to_string(), - rpc: ServerConfig::default().url(false), - ws: None, + alias: "default".to_owned(), + rpc: ServerConfig::default().rpc_url(false), + ws: Some(RelayConfig::default().ws_url(false)), } } } diff --git a/crates/rooch/src/commands/init.rs b/crates/rooch/src/commands/init.rs index 47dd80a442..ad37790946 100644 --- a/crates/rooch/src/commands/init.rs +++ b/crates/rooch/src/commands/init.rs @@ -33,11 +33,11 @@ impl CommandAction for Init { }; // Prompt user for connect to devnet fullnode if config does not exist. if !client_config_path.exists() { - let env = match std::env::var_os("ROOCH_CONFIG_WITH_RPC_URL") { + let env = match std::env::var_os("ROOCH_CONFIG_WITH_RPC_WS_URL") { Some(v) => Some(Env { - alias: "custom".to_string(), - rpc: v.into_string().unwrap(), - ws: None, + alias: "custom".to_owned(), + rpc: v.clone().into_string().unwrap(), + ws: Some(v.into_string().unwrap()), }), None => { if self.accept_defaults { @@ -63,16 +63,17 @@ impl CommandAction for Init { Env::default() } else { print!("Environment alias for [{url}] : "); + let cloned_url = url.clone(); let alias = read_line()?; let alias = if alias.trim().is_empty() { - "custom".to_string() + "custom".to_owned() } else { alias }; Env { alias, rpc: url, - ws: None, + ws: Some(cloned_url), } }) } else { From 20e24bd9f5b90e8d69e053a7c778101c4ea38e5f Mon Sep 17 00:00:00 2001 From: Feliciss <10203-feliciss@users.noreply.0xacab.org> Date: Thu, 6 Jul 2023 02:21:46 -0600 Subject: [PATCH 2/3] [command] add command `rooch relay`. --- Cargo.toml | 34 +- crates/rooch-config/src/lib.rs | 1 + crates/rooch-types/src/lib.rs | 1 + crates/rooch-ws-relay/Cargo.toml | 83 + crates/rooch-ws-relay/build.rs | 7 + crates/rooch-ws-relay/proto/nauthz.proto | 60 + crates/rooch-ws-relay/src/lib.rs | 1498 +++++++++++++++++ crates/rooch-ws-relay/src/nauthz.rs | 111 ++ crates/rooch-ws-relay/src/payment/handler.rs | 274 +++ crates/rooch-ws-relay/src/payment/lnbits.rs | 172 ++ crates/rooch-ws-relay/src/payment/mod.rs | 5 + crates/rooch-ws-relay/src/repo/mod.rs | 6 + crates/rooch-ws-relay/src/repo/nostr.rs | 93 + crates/rooch-ws-relay/src/repo/sqlite.rs | 1389 +++++++++++++++ .../src/repo/sqlite_migration.rs | 841 +++++++++ crates/rooch-ws-relay/src/server/close.rs | 32 + crates/rooch-ws-relay/src/server/config.rs | 346 ++++ crates/rooch-ws-relay/src/server/conn.rs | 229 +++ crates/rooch-ws-relay/src/server/db.rs | 431 +++++ .../rooch-ws-relay/src/server/delegation.rs | 406 +++++ crates/rooch-ws-relay/src/server/error.rs | 192 +++ crates/rooch-ws-relay/src/server/event.rs | 794 +++++++++ crates/rooch-ws-relay/src/server/hexrange.rs | 159 ++ crates/rooch-ws-relay/src/server/info.rs | 129 ++ crates/rooch-ws-relay/src/server/mod.rs | 17 + crates/rooch-ws-relay/src/server/nip05.rs | 607 +++++++ crates/rooch-ws-relay/src/server/notice.rs | 99 ++ .../rooch-ws-relay/src/server/subscription.rs | 650 +++++++ crates/rooch-ws-relay/src/server/utils.rs | 72 + crates/rooch/Cargo.toml | 4 + crates/rooch/src/commands/mod.rs | 1 + .../rooch/src/commands/relay/commands/mod.rs | 4 + .../src/commands/relay/commands/start.rs | 111 ++ crates/rooch/src/commands/relay/mod.rs | 31 + crates/rooch/src/lib.rs | 3 + 35 files changed, 8889 insertions(+), 3 deletions(-) create mode 100644 crates/rooch-ws-relay/Cargo.toml create mode 100644 crates/rooch-ws-relay/build.rs create mode 100644 crates/rooch-ws-relay/proto/nauthz.proto create mode 100644 crates/rooch-ws-relay/src/lib.rs create mode 100644 crates/rooch-ws-relay/src/nauthz.rs create mode 100644 crates/rooch-ws-relay/src/payment/handler.rs create mode 100644 crates/rooch-ws-relay/src/payment/lnbits.rs create mode 100644 crates/rooch-ws-relay/src/payment/mod.rs create mode 100644 crates/rooch-ws-relay/src/repo/mod.rs create mode 100644 crates/rooch-ws-relay/src/repo/nostr.rs create mode 100644 crates/rooch-ws-relay/src/repo/sqlite.rs create mode 100644 crates/rooch-ws-relay/src/repo/sqlite_migration.rs create mode 100644 crates/rooch-ws-relay/src/server/close.rs create mode 100644 crates/rooch-ws-relay/src/server/config.rs create mode 100644 crates/rooch-ws-relay/src/server/conn.rs create mode 100644 crates/rooch-ws-relay/src/server/db.rs create mode 100644 crates/rooch-ws-relay/src/server/delegation.rs create mode 100644 crates/rooch-ws-relay/src/server/error.rs create mode 100644 crates/rooch-ws-relay/src/server/event.rs create mode 100644 crates/rooch-ws-relay/src/server/hexrange.rs create mode 100644 crates/rooch-ws-relay/src/server/info.rs create mode 100644 crates/rooch-ws-relay/src/server/mod.rs create mode 100644 crates/rooch-ws-relay/src/server/nip05.rs create mode 100644 crates/rooch-ws-relay/src/server/notice.rs create mode 100644 crates/rooch-ws-relay/src/server/subscription.rs create mode 100644 crates/rooch-ws-relay/src/server/utils.rs create mode 100644 crates/rooch/src/commands/relay/commands/mod.rs create mode 100644 crates/rooch/src/commands/relay/commands/start.rs create mode 100644 crates/rooch/src/commands/relay/mod.rs diff --git a/Cargo.toml b/Cargo.toml index 4d33e0f50c..0160f4a105 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,7 +28,8 @@ members = [ "crates/rooch-open-rpc", "crates/rooch-open-rpc-spec", "crates/rooch-open-rpc-macros", - "crates/rooch-store" + "crates/rooch-store", + "crates/rooch-ws-relay" ] default-members = [ @@ -82,6 +83,7 @@ rooch-open-rpc = { path = "crates/rooch-open-rpc" } rooch-open-rpc-spec = { path = "crates/rooch-open-rpc-spec" } rooch-open-rpc-macros = { path = "crates/rooch-open-rpc-macros" } rooch-store = { path = "crates/rooch-store" } +rooch-ws-relay = { path = "crates/rooch-ws-relay" } # External crate dependencies. # Please do not add any test features here: they should be declared by the individual crate. @@ -150,7 +152,7 @@ thiserror = "1.0.34" tiny-keccak = { version = "2", features = ["keccak", "sha3"] } tiny-bip39 = "1.0.0" tokio = { version = "1", features = ["full"] } -tonic = { version = "0.8", features = ["gzip"] } +tonic = { version = "0.8.3", features = ["gzip"] } tracing = "0.1" tracing-subscriber = "0.3" codespan-reporting = "0.11.1" @@ -166,7 +168,33 @@ unescape = "0.1.0" tempfile = "3.2.0" regex = "1.8.4" walkdir = "2.3.3" - +futures-util = "0.3" +tracing-appender = "0.2.2" +console-subscriber = "0.1.8" +tokio-tungstenite = "0.17" +tungstenite = "0.17" +uuid = { version = "1.1.2", features = ["v4"] } +config = { version = "0.12", features = ["toml"] } +bitcoin_hashes = { version = "0.10", features = ["serde"] } +secp256k1 = {version = "0.21", features = ["rand", "rand-std", "serde", "bitcoin_hashes"] } +rusqlite = { version = "0.26", features = ["limits","bundled","modern_sqlite", "trace"]} +r2d2 = "0.8" +r2d2_sqlite = "0.19" +governor = "0.4" +nonzero_ext = "0.3" +hyper = { version="0.14", features=["client", "server","http1","http2","tcp"] } +hyper-tls = "0.5" +http = { version = "0.2" } +parse_duration = "2" +const_format = "0.2.28" +async-std = "1.12.0" +sqlx = { version ="0.6.2", features=["runtime-tokio-rustls", "postgres", "chrono"]} +prometheus = "0.13.3" +indicatif = "0.17.3" +url = "2.3.1" +qrcode = { version = "0.12.0", default-features = false, features = ["svg"] } +nostr = { version = "0.18.0", default-features = false, features = ["base", "nip04", "nip19"] } +tonic-build = { version="0.8.3", features = ["prost"] } # Note: the BEGIN and END comments below are required for external tooling. Do not remove. # BEGIN MOVE DEPENDENCIES diff --git a/crates/rooch-config/src/lib.rs b/crates/rooch-config/src/lib.rs index 93ecd6bd42..3675cd4c46 100644 --- a/crates/rooch-config/src/lib.rs +++ b/crates/rooch-config/src/lib.rs @@ -12,6 +12,7 @@ pub const ROOCH_DIR: &str = ".rooch"; pub const ROOCH_CONFIG_DIR: &str = "rooch_config"; pub const ROOCH_CLIENT_CONFIG: &str = "rooch.yaml"; pub const ROOCH_SERVER_CONFIG: &str = "server.yaml"; +pub const ROOCH_RELAY_CONFIG: &str = "relay.yaml"; pub const ROOCH_KEYSTORE_FILENAME: &str = "rooch.keystore"; pub fn rooch_config_dir() -> anyhow::Result { diff --git a/crates/rooch-types/src/lib.rs b/crates/rooch-types/src/lib.rs index 7a4a0f28d4..fc482a3f98 100644 --- a/crates/rooch-types/src/lib.rs +++ b/crates/rooch-types/src/lib.rs @@ -11,3 +11,4 @@ pub mod error; pub mod transaction; pub use ethers::types::{H160, H256, H512}; +pub use bitcoin::secp256k1::{XOnlyPublicKey, SecretKey, KeyPair}; \ No newline at end of file diff --git a/crates/rooch-ws-relay/Cargo.toml b/crates/rooch-ws-relay/Cargo.toml new file mode 100644 index 0000000000..dbfa218ec1 --- /dev/null +++ b/crates/rooch-ws-relay/Cargo.toml @@ -0,0 +1,83 @@ +[package] +name = "rooch-ws-relay" +version = "0.1.0" + +# Workspace inherited keys +authors = { workspace = true } +edition = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +publish = { workspace = true } +repository = { workspace = true } +rust-version = { workspace = true } + +[dependencies] +clap = { workspace = true } +tracing = { workspace = true } +tracing-appender = { workspace = true } +tracing-subscriber = { workspace = true } +tokio = { workspace = true } +prost = { workspace = true } +tonic = { workspace = true } +console-subscriber = { workspace = true } +futures = { workspace = true } +futures-util = { workspace = true } +tokio-tungstenite = { workspace = true } +tungstenite = { workspace = true } +thiserror = { workspace = true } +uuid = { workspace = true } +config = { workspace = true } +bitcoin_hashes = { workspace = true } +secp256k1 = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +hex = { workspace = true } +rusqlite = { workspace = true } +r2d2 = { workspace = true } +r2d2_sqlite = { workspace = true } +lazy_static = { workspace = true } +governor = { workspace = true } +nonzero_ext = { workspace = true } +hyper = { workspace = true } +hyper-tls = { workspace = true } +http = { workspace = true } +parse_duration = { workspace = true } +rand = { workspace = true } +const_format = { workspace = true } +regex = { workspace = true } +async-trait = { workspace = true } +async-std = { workspace = true } +sqlx = { workspace = true } +chrono = { workspace = true } +prometheus = { workspace = true } +indicatif = { workspace = true } +bech32 = { workspace = true } +url = { workspace = true } +qrcode = { workspace = true } +nostr = { workspace = true } +anyhow = { workspace = true } +sha3 = { workspace = true } +jsonrpsee = { workspace = true } +coerce = { workspace = true } + +move-core-types = { workspace = true } +move-resource-viewer = { workspace = true } +move-binary-format = { workspace = true } + +moveos-store = { workspace = true } +moveos-types = { workspace = true } +move-bytecode-utils = { workspace = true } + +rooch-config = { workspace = true } +rooch-types = { workspace = true } +rooch-executor = { workspace = true } +rooch-sequencer = { workspace = true } +rooch-proposer = { workspace = true } +rooch-key = { workspace = true } +rooch-open-rpc = { workspace = true } +rooch-open-rpc-macros = { workspace = true } +rooch-store = { workspace = true } +rooch-rpc-api = { workspace = true } + +[build-dependencies] +tonic-build = { workspace = true } \ No newline at end of file diff --git a/crates/rooch-ws-relay/build.rs b/crates/rooch-ws-relay/build.rs new file mode 100644 index 0000000000..443a819529 --- /dev/null +++ b/crates/rooch-ws-relay/build.rs @@ -0,0 +1,7 @@ +fn main() -> Result<(), Box> { + tonic_build::configure() + .build_server(false) + .protoc_arg("--experimental_allow_proto3_optional") + .compile(&["proto/nauthz.proto"], &["proto"])?; + Ok(()) +} \ No newline at end of file diff --git a/crates/rooch-ws-relay/proto/nauthz.proto b/crates/rooch-ws-relay/proto/nauthz.proto new file mode 100644 index 0000000000..d9511ed11c --- /dev/null +++ b/crates/rooch-ws-relay/proto/nauthz.proto @@ -0,0 +1,60 @@ +syntax = "proto3"; + +// Nostr Authorization Services +package nauthz; + +// Authorization for actions against a relay +service Authorization { + // Determine if an event should be admitted to the relay + rpc EventAdmit(EventRequest) returns (EventReply) {} +} + +message Event { + bytes id = 1; // 32-byte SHA256 hash of serialized event + bytes pubkey = 2; // 32-byte public key of event creator + fixed64 created_at = 3; // UNIX timestamp provided by event creator + uint64 kind = 4; // event kind + string content = 5; // arbitrary event contents + repeated TagEntry tags = 6; // event tag array + bytes sig = 7; // 32-byte signature of the event id + // Individual values for a single tag + message TagEntry { + repeated string values = 1; + } +} + +// Event data and metadata for authorization decisions +message EventRequest { + Event event = + 1; // the event to be admitted for further relay processing + optional string ip_addr = + 2; // IP address of the client that submitted the event + optional string origin = + 3; // HTTP origin header from the client, if one exists + optional string user_agent = + 4; // HTTP user-agent header from the client, if one exists + optional bytes auth_pubkey = + 5; // the public key associated with a NIP-42 AUTH'd session, if + // authentication occurred + optional Nip05Name nip05 = + 6; // NIP-05 address associated with the event pubkey, if it is + // known and has been validated by the relay + // A NIP_05 verification record + message Nip05Name { + string local = 1; + string domain = 2; + } +} + +// A permit or deny decision +enum Decision { + DECISION_UNSPECIFIED = 0; + DECISION_PERMIT = 1; // Admit this event for further processing + DECISION_DENY = 2; // Deny persisting or propagating this event +} + +// Response to a event authorization request +message EventReply { + Decision decision = 1; // decision to enforce + optional string message = 2; // informative message for the client +} \ No newline at end of file diff --git a/crates/rooch-ws-relay/src/lib.rs b/crates/rooch-ws-relay/src/lib.rs new file mode 100644 index 0000000000..464b186cf9 --- /dev/null +++ b/crates/rooch-ws-relay/src/lib.rs @@ -0,0 +1,1498 @@ +//! Server process +pub mod server; +pub mod payment; +pub mod repo; +pub mod nauthz; + +use crate::server::close::Close; +use crate::server::close::CloseCmd; +use crate::server::config::{Settings, VerifiedUsersMode}; +use crate::server::conn; +use crate::server::db; +use crate::server::db::SubmittedEvent; +use crate::server::error::{Error, Result}; +use crate::server::event::Event; +use crate::server::event::EventCmd; +use crate::server::event::EventWrapper; +use crate::server::info::RelayInfo; +use crate::server::nip05; +use crate::server::notice::Notice; +use crate::payment::handler::InvoiceInfo; +use crate::payment::handler::PaymentMessage; +use crate::server::error::Error::CommandUnknownError; +use crate::server::event::EventWrapper::{WrappedAuth, WrappedEvent}; +use crate::server::subscription::Subscription; +use futures::SinkExt; +use futures::StreamExt; +use governor::{Jitter, Quota, RateLimiter}; +use http::header::HeaderMap; +use hyper::body::to_bytes; +use hyper::header::ACCEPT; +use hyper::service::{make_service_fn, service_fn}; +use hyper::upgrade::Upgraded; +use hyper::{ + header, server::conn::AddrStream, upgrade, Body, Request, Response, Server, StatusCode, +}; +use prometheus::IntCounterVec; +use prometheus::IntGauge; +use prometheus::{Encoder, Histogram, HistogramOpts, IntCounter, Opts, Registry, TextEncoder}; +use qrcode::render::svg; +use qrcode::QrCode; +use repo::nostr::NostrRepo; +use serde::{Deserialize, Serialize}; +use serde_json::json; +use std::collections::HashMap; +use std::convert::Infallible; +use std::fs::File; +use std::io::BufReader; +use std::io::Read; +use std::net::SocketAddr; +use std::path::Path; +use std::sync::atomic::Ordering; +use std::sync::mpsc::Receiver as MpscReceiver; +use std::sync::Arc; +use std::thread; +use std::time::Duration; +use std::time::Instant; +use tokio::runtime::Builder; +use tokio::sync::broadcast::{self, Receiver, Sender}; +use tokio::sync::mpsc; +use tokio::sync::oneshot; +use tokio_tungstenite::WebSocketStream; +use tracing::{debug, error, info, trace, warn}; +use tungstenite::error::CapacityError::MessageTooLong; +use tungstenite::error::Error as WsError; +use tungstenite::handshake; +use tungstenite::protocol::Message; +use tungstenite::protocol::WebSocketConfig; +use nostr::key::FromPkStr; +use nostr::key::Keys; +use std::fmt::Debug; +use coerce::actor::scheduler::timer::Timer; +use jsonrpsee::http_client::{HttpClient, HttpClientBuilder}; + +pub fn http_client(url: impl AsRef) -> anyhow::Result { + let client = HttpClientBuilder::default().build(url)?; + Ok(client) +} + +pub struct ServerHandle { + handle: jsonrpsee::server::ServerHandle, + timer: Timer, +} + +impl ServerHandle { + fn stop(self) -> anyhow::Result<()> { + self.handle.stop()?; + self.timer.stop(); + Ok(()) + } +} + +impl Debug for ServerHandle { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("ServerHandle") + .field("handle", &self.handle) + .finish() + } +} + +#[derive(Debug, Default)] +pub struct Service { + handle: Option, +} + +impl Service { + pub fn new() -> Self { + Self { handle: None } + } + + pub fn start(&mut self, settings: Settings, shutdown_rx: MpscReceiver<()>) -> anyhow::Result<()> { + // run this in a new thread + let handle = thread::spawn(move || { + let _svr = start_server(&settings, shutdown_rx); + }); + // block on nostr thread to finish. + handle.join().unwrap(); + Ok(()) + } + + pub fn stop(self) -> anyhow::Result<()> { + if let Some(handle) = self.handle { + handle.stop()? + } + Ok(()) + } +} + +/// Handle arbitrary HTTP requests, including for `WebSocket` upgrades. +#[allow(clippy::too_many_arguments)] +async fn handle_web_request( + mut request: Request, + repo: Arc, + settings: Settings, + remote_addr: SocketAddr, + broadcast: Sender, + event_tx: tokio::sync::mpsc::Sender, + payment_tx: tokio::sync::broadcast::Sender, + shutdown: Receiver<()>, + favicon: Option>, + registry: Registry, + metrics: NostrMetrics, +) -> Result, Infallible> { + match ( + request.uri().path(), + request.headers().contains_key(header::UPGRADE), + ) { + // Request for / as websocket + ("/", true) => { + trace!("websocket with upgrade request"); + //assume request is a handshake, so create the handshake response + let response = match handshake::server::create_response_with_body(&request, || { + Body::empty() + }) { + Ok(response) => { + //in case the handshake response creation succeeds, + //spawn a task to handle the websocket connection + tokio::spawn(async move { + //using the hyper feature of upgrading a connection + match upgrade::on(&mut request).await { + //if successfully upgraded + Ok(upgraded) => { + // set WebSocket configuration options + let config = WebSocketConfig { + max_send_queue: Some(1024), + max_message_size: settings.limits.max_ws_message_bytes, + max_frame_size: settings.limits.max_ws_frame_bytes, + ..Default::default() + }; + //create a websocket stream from the upgraded object + let ws_stream = WebSocketStream::from_raw_socket( + //pass the upgraded object + //as the base layer stream of the Websocket + upgraded, + tokio_tungstenite::tungstenite::protocol::Role::Server, + Some(config), + ) + .await; + let origin = get_header_string("origin", request.headers()); + let user_agent = get_header_string("user-agent", request.headers()); + // determine the remote IP from headers if the exist + let header_ip = settings + .network + .remote_ip_header + .as_ref() + .and_then(|x| get_header_string(x, request.headers())); + // use the socket addr as a backup + let remote_ip = + header_ip.unwrap_or_else(|| remote_addr.ip().to_string()); + let client_info = ClientInfo { + remote_ip, + user_agent, + origin, + }; + // spawn a nostr server with our websocket + tokio::spawn(nostr_server( + repo, + client_info, + settings, + ws_stream, + broadcast, + event_tx, + shutdown, + metrics, + )); + } + // todo: trace, don't print... + Err(e) => println!( + "error when trying to upgrade connection \ + from address {remote_addr} to websocket connection. \ + Error is: {e}", + ), + } + }); + //return the response to the handshake request + response + } + Err(error) => { + warn!("websocket response failed"); + let mut res = + Response::new(Body::from(format!("Failed to create websocket: {error}"))); + *res.status_mut() = StatusCode::BAD_REQUEST; + return Ok(res); + } + }; + Ok::<_, Infallible>(response) + } + // Request for Relay info + ("/", false) => { + // handle request at root with no upgrade header + // Check if this is a nostr server info request + let accept_header = &request.headers().get(ACCEPT); + // check if application/nostr+json is included + if let Some(media_types) = accept_header { + if let Ok(mt_str) = media_types.to_str() { + if mt_str.contains("application/nostr+json") { + // build a relay info response + debug!("Responding to server info request"); + let rinfo = RelayInfo::from(settings); + let b = Body::from(serde_json::to_string_pretty(&rinfo).unwrap()); + return Ok(Response::builder() + .status(200) + .header("Content-Type", "application/nostr+json") + .header("Access-Control-Allow-Origin", "*") + .body(b) + .unwrap()); + } + } + } + + // Redirect users to join page when pay to relay enabled + if settings.pay_to_relay.enabled { + return Ok(Response::builder() + .status(StatusCode::TEMPORARY_REDIRECT) + .header("location", "/join") + .body(Body::empty()) + .unwrap()); + } + + Ok(Response::builder() + .status(200) + .header("Content-Type", "text/plain") + .body(Body::from("Please use a Nostr client to connect.")) + .unwrap()) + } + ("/metrics", false) => { + let mut buffer = vec![]; + let encoder = TextEncoder::new(); + let metric_families = registry.gather(); + encoder.encode(&metric_families, &mut buffer).unwrap(); + + Ok(Response::builder() + .status(StatusCode::OK) + .header("Content-Type", "text/plain") + .body(Body::from(buffer)) + .unwrap()) + } + ("/favicon.ico", false) => { + if let Some(favicon_bytes) = favicon { + info!("returning favicon"); + Ok(Response::builder() + .status(StatusCode::OK) + .header("Content-Type", "image/x-icon") + // 1 month cache + .header("Cache-Control", "public, max-age=2419200") + .body(Body::from(favicon_bytes)) + .unwrap()) + } else { + Ok(Response::builder() + .status(StatusCode::NOT_FOUND) + .body(Body::from("")) + .unwrap()) + } + } + // LN bits callback endpoint for paid invoices + ("/lnbits", false) => { + let callback: payment::lnbits::LNBitsCallback = + serde_json::from_slice(&to_bytes(request.into_body()).await.unwrap()).unwrap(); + debug!("LNBits callback: {callback:?}"); + + if let Err(e) = payment_tx.send(PaymentMessage::InvoicePaid(callback.payment_hash)) { + warn!("Could not send invoice update: {}", e); + return Ok(Response::builder() + .status(StatusCode::INTERNAL_SERVER_ERROR) + .body(Body::from("Error processing callback")) + .unwrap()); + } + + Ok(Response::builder() + .status(StatusCode::OK) + .body(Body::from("ok")) + .unwrap()) + } + // Endpoint for relays terms + ("/terms", false) => Ok(Response::builder() + .status(200) + .header("Content-Type", "text/plain") + .body(Body::from(settings.pay_to_relay.terms_message)) + .unwrap()), + // Endpoint to allow users to sign up + ("/join", false) => { + // Stops sign ups if disabled + if !settings.pay_to_relay.sign_ups { + return Ok(Response::builder() + .status(401) + .header("Content-Type", "text/plain") + .body(Body::from("Sorry, joining is not allowed at the moment")) + .unwrap()); + } + + let html = r#" + + + + + + +
+

Enter your pubkey

+
+

+ +

+ +
+ +
+ + + + "#; + Ok(Response::builder() + .status(StatusCode::OK) + .body(Body::from(html)) + .unwrap()) + } + // Endpoint to display invoice + ("/invoice", false) => { + // Stops sign ups if disabled + if !settings.pay_to_relay.sign_ups { + return Ok(Response::builder() + .status(401) + .header("Content-Type", "text/plain") + .body(Body::from("Sorry, joining is not allowed at the moment")) + .unwrap()); + } + + // Get query pubkey from query string + let pubkey = get_pubkey(request); + + // Redirect back to join page if no pub key is found in query string + if pubkey.is_none() { + return Ok(Response::builder() + .status(404) + .header("location", "/join") + .body(Body::empty()) + .unwrap()); + } + + // Checks key is valid + let pubkey = pubkey.unwrap(); + let key = Keys::from_pk_str(&pubkey); + if key.is_err() { + return Ok(Response::builder() + .status(401) + .header("Content-Type", "text/plain") + .body(Body::from("Looks like your key is invalid")) + .unwrap()); + } + + // Checks if user is already admitted + let payment_message; + if let Ok((admission_status, _)) = repo.get_account_balance(&key.unwrap()).await { + if admission_status { + return Ok(Response::builder() + .status(StatusCode::OK) + .body(Body::from("Already admitted")) + .unwrap()); + } else { + payment_message = PaymentMessage::CheckAccount(pubkey.clone()); + } + } else { + payment_message = PaymentMessage::NewAccount(pubkey.clone()); + } + + // Send message on payment channel requesting invoice + if payment_tx.send(payment_message).is_err() { + warn!("Could not send payment tx"); + return Ok(Response::builder() + .status(501) + .header("Content-Type", "text/plain") + .body(Body::from("Sorry, something went wrong")) + .unwrap()); + } + + // wait for message with invoice back that matched the pub key + let mut invoice_info: Option = None; + while let Ok(msg) = payment_tx.subscribe().recv().await { + match msg { + PaymentMessage::Invoice(m_pubkey, m_invoice_info) => { + if m_pubkey == pubkey.clone() { + invoice_info = Some(m_invoice_info); + break; + } + } + PaymentMessage::AccountAdmitted(m_pubkey) => { + if m_pubkey == pubkey.clone() { + return Ok(Response::builder() + .status(StatusCode::OK) + .body(Body::from("Already admitted")) + .unwrap()); + } + } + _ => (), + } + } + + // Return early if cant get invoice + if invoice_info.is_none() { + return Ok(Response::builder() + .status(StatusCode::INTERNAL_SERVER_ERROR) + .body(Body::from("Sorry, could not get invoice")) + .unwrap()); + } + + // Since invoice is checked to be not none, unwrap + let invoice_info = invoice_info.unwrap(); + + let qr_code: String; + if let Ok(code) = QrCode::new(invoice_info.bolt11.as_bytes()) { + qr_code = code + .render() + .min_dimensions(200, 200) + .dark_color(svg::Color("#800000")) + .light_color(svg::Color("#ffff80")) + .build(); + } else { + qr_code = "Could not render image".to_string(); + } + + let html_result = format!( + r#" + + + + + + + +
+

+ To use this relay, an admission fee of {} sats is required. By paying the fee, you agree to the terms. +

+
+
+
+ {} +
+
+
+
+

{}

+ +
+
+

This page will not refresh

+

Verify admission here once you have paid

+
+
+ + + + + +"#, + settings.pay_to_relay.admission_cost, + qr_code, + invoice_info.bolt11, + pubkey, + invoice_info.bolt11 + ); + + Ok(Response::builder() + .status(StatusCode::OK) + .body(Body::from(html_result)) + .unwrap()) + } + ("/account", false) => { + // Stops sign ups if disabled + if !settings.pay_to_relay.enabled { + return Ok(Response::builder() + .status(401) + .header("Content-Type", "text/plain") + .body(Body::from("This relay is not paid")) + .unwrap()); + } + + // Gets the pubkey from query string + let pubkey = get_pubkey(request); + + // Redirect back to join page if no pub key is found in query string + if pubkey.is_none() { + return Ok(Response::builder() + .status(404) + .header("location", "/join") + .body(Body::empty()) + .unwrap()); + } + + // Checks key is valid + let pubkey = pubkey.unwrap(); + let key = Keys::from_pk_str(&pubkey); + if key.is_err() { + return Ok(Response::builder() + .status(401) + .header("Content-Type", "text/plain") + .body(Body::from("Looks like your key is invalid")) + .unwrap()); + } + + // Checks if user is already admitted + let text = + if let Ok((admission_status, _)) = repo.get_account_balance(&key.unwrap()).await { + if admission_status { + r#"is"# + } else { + r#"is not"# + } + } else { + "Could not get admission status" + }; + + let html_result = format!( + r#" + + + + + + + +
+
{} {} admitted
+
+ + + + + "#, + pubkey, text + ); + + Ok(Response::builder() + .status(StatusCode::OK) + .body(Body::from(html_result)) + .unwrap()) + } + // later balance + (_, _) => { + // handle any other url + Ok(Response::builder() + .status(StatusCode::NOT_FOUND) + .body(Body::from("Nothing here.")) + .unwrap()) + } + } +} + +// Get pubkey from request query string +fn get_pubkey(request: Request) -> Option { + let query = request.uri().query().unwrap_or("").to_string(); + + // Gets the pubkey value from query string + query.split('&').fold(None, |acc, pair| { + let mut parts = pair.splitn(2, '='); + let key = parts.next(); + let value = parts.next(); + if key == Some("pubkey") { + return value.map(|s| s.to_owned()); + } + acc + }) +} + +fn get_header_string(header: &str, headers: &HeaderMap) -> Option { + headers + .get(header) + .and_then(|x| x.to_str().ok().map(std::string::ToString::to_string)) +} + +// return on a control-c or internally requested shutdown signal +async fn ctrl_c_or_signal(mut shutdown_signal: Receiver<()>) { + let mut term_signal = tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate()) + .expect("could not define signal"); + loop { + tokio::select! { + _ = shutdown_signal.recv() => { + info!("Shutting down webserver as requested"); + // server shutting down, exit loop + break; + }, + _ = tokio::signal::ctrl_c() => { + info!("Shutting down webserver due to SIGINT"); + break; + }, + _ = term_signal.recv() => { + info!("Shutting down webserver due to SIGTERM"); + break; + }, + } + } +} + +fn create_metrics() -> (Registry, NostrMetrics) { + // setup prometheus registry + let registry = Registry::new(); + + let query_sub = Histogram::with_opts(HistogramOpts::new( + "nostr_query_seconds", + "Subscription response times", + )) + .unwrap(); + let query_db = Histogram::with_opts(HistogramOpts::new( + "nostr_filter_seconds", + "Filter SQL query times", + )) + .unwrap(); + let write_events = Histogram::with_opts(HistogramOpts::new( + "nostr_events_write_seconds", + "Event writing response times", + )) + .unwrap(); + let sent_events = IntCounterVec::new( + Opts::new("nostr_events_sent_total", "Events sent to clients"), + vec!["source"].as_slice(), + ) + .unwrap(); + let connections = + IntCounter::with_opts(Opts::new("nostr_connections_total", "New connections")).unwrap(); + let db_connections = IntGauge::with_opts(Opts::new( + "nostr_db_connections", + "Active database connections", + )) + .unwrap(); + let query_aborts = IntCounterVec::new( + Opts::new("nostr_query_abort_total", "Aborted queries"), + vec!["reason"].as_slice(), + ) + .unwrap(); + let cmd_req = IntCounter::with_opts(Opts::new("nostr_cmd_req_total", "REQ commands")).unwrap(); + let cmd_event = + IntCounter::with_opts(Opts::new("nostr_cmd_event_total", "EVENT commands")).unwrap(); + let cmd_close = + IntCounter::with_opts(Opts::new("nostr_cmd_close_total", "CLOSE commands")).unwrap(); + let cmd_auth = + IntCounter::with_opts(Opts::new("nostr_cmd_auth_total", "AUTH commands")).unwrap(); + let disconnects = IntCounterVec::new( + Opts::new("nostr_disconnects_total", "Client disconnects"), + vec!["reason"].as_slice(), + ) + .unwrap(); + registry.register(Box::new(query_sub.clone())).unwrap(); + registry.register(Box::new(query_db.clone())).unwrap(); + registry.register(Box::new(write_events.clone())).unwrap(); + registry.register(Box::new(sent_events.clone())).unwrap(); + registry.register(Box::new(connections.clone())).unwrap(); + registry.register(Box::new(db_connections.clone())).unwrap(); + registry.register(Box::new(query_aborts.clone())).unwrap(); + registry.register(Box::new(cmd_req.clone())).unwrap(); + registry.register(Box::new(cmd_event.clone())).unwrap(); + registry.register(Box::new(cmd_close.clone())).unwrap(); + registry.register(Box::new(cmd_auth.clone())).unwrap(); + registry.register(Box::new(disconnects.clone())).unwrap(); + let metrics = NostrMetrics { + query_sub, + query_db, + write_events, + sent_events, + connections, + db_connections, + disconnects, + query_aborts, + cmd_req, + cmd_event, + cmd_close, + cmd_auth, + }; + (registry, metrics) +} + +fn file_bytes(path: &str) -> Result> { + let f = File::open(path)?; + let mut reader = BufReader::new(f); + let mut buffer = Vec::new(); + // Read file into vector. + reader.read_to_end(&mut buffer)?; + Ok(buffer) +} + +/// Start running a Nostr relay server. +pub fn start_server(settings: &Settings, shutdown_rx: MpscReceiver<()>) -> Result<(), Error> { + trace!("Config: {:?}", settings); + // do some config validation. + if !Path::new(&settings.database.data_directory).is_dir() { + error!("Database directory does not exist"); + return Err(Error::DatabaseDirError); + } + let addr = format!( + "{}:{}", + settings.network.address.trim(), + settings.network.port + ); + let socket_addr = addr.parse().expect("listening address not valid"); + // address whitelisting settings + if let Some(addr_whitelist) = &settings.authorization.pubkey_whitelist { + info!( + "Event publishing restricted to {} pubkey(s)", + addr_whitelist.len() + ); + } + // check if NIP-05 enforced user verification is on + if settings.verified_users.is_active() { + info!( + "NIP-05 user verification mode:{:?}", + settings.verified_users.mode + ); + if let Some(d) = settings.verified_users.verify_update_duration() { + info!("NIP-05 check user verification every: {:?}", d); + } + if let Some(d) = settings.verified_users.verify_expiration_duration() { + info!("NIP-05 user verification expires after: {:?}", d); + } + if let Some(wl) = &settings.verified_users.domain_whitelist { + info!("NIP-05 domain whitelist: {:?}", wl); + } + if let Some(bl) = &settings.verified_users.domain_blacklist { + info!("NIP-05 domain blacklist: {:?}", bl); + } + } + // configure tokio runtime + let rt = Builder::new_multi_thread() + .enable_all() + .thread_name_fn(|| { + // give each thread a unique numeric name + static ATOMIC_ID: std::sync::atomic::AtomicUsize = + std::sync::atomic::AtomicUsize::new(0); + let id = ATOMIC_ID.fetch_add(1, Ordering::SeqCst); + format!("tokio-ws-{id}") + }) + // limit concurrent SQLite blocking threads + .max_blocking_threads(settings.limits.max_blocking_threads) + .on_thread_start(|| { + trace!("started new thread: {:?}", std::thread::current().name()); + }) + .on_thread_stop(|| { + trace!("stopped thread: {:?}", std::thread::current().name()); + }) + .build() + .unwrap(); + // start tokio + rt.block_on(async { + let broadcast_buffer_limit = settings.limits.broadcast_buffer; + let persist_buffer_limit = settings.limits.event_persist_buffer; + let verified_users_active = settings.verified_users.is_active(); + let settings = settings.clone(); + info!("listening on: {}", socket_addr); + // all client-submitted valid events are broadcast to every + // other client on this channel. This should be large enough + // to accommodate slower readers (messages are dropped if + // clients can not keep up). + let (bcast_tx, _) = broadcast::channel::(broadcast_buffer_limit); + // validated events that need to be persisted are sent to the + // database on via this channel. + let (event_tx, event_rx) = mpsc::channel::(persist_buffer_limit); + // establish a channel for letting all threads now about a + // requested server shutdown. + let (invoke_shutdown, shutdown_listen) = broadcast::channel::<()>(1); + // create a channel for sending any new metadata event. These + // will get processed relatively slowly (a potentially + // multi-second blocking HTTP call) on a single thread, so we + // buffer requests on the channel. No harm in dropping events + // here, since we are protecting against DoS. This can make + // it difficult to setup initial metadata in bulk, since + // overwhelming this will drop events and won't register + // metadata events. + let (metadata_tx, metadata_rx) = broadcast::channel::(4096); + + let (payment_tx, payment_rx) = broadcast::channel::(4096); + + let (registry, metrics) = create_metrics(); + + // build a repository for events + let repo = db::build_repo(&settings, metrics.clone()).await; + // start the database writer task. Give it a channel for + // writing events, and for publishing events that have been + // written (to all connected clients). + tokio::task::spawn(db::db_writer( + repo.clone(), + settings.clone(), + event_rx, + bcast_tx.clone(), + metadata_tx.clone(), + payment_tx.clone(), + shutdown_listen, + )); + info!("db writer created"); + + // create a nip-05 verifier thread; if enabled. + if settings.verified_users.mode != VerifiedUsersMode::Disabled { + let verifier_opt = nip05::Verifier::new( + repo.clone(), + metadata_rx, + bcast_tx.clone(), + settings.clone(), + ); + if let Ok(mut v) = verifier_opt { + if verified_users_active { + tokio::task::spawn(async move { + info!("starting up NIP-05 verifier..."); + v.run().await; + }); + } + } + } + + // Create payments thread if pay to relay enabled + if settings.pay_to_relay.enabled { + let payment_opt = payment::handler::Payment::new( + repo.clone(), + payment_tx.clone(), + payment_rx, + bcast_tx.clone(), + settings.clone(), + ); + if let Ok(mut p) = payment_opt { + tokio::task::spawn(async move { + info!("starting payment process ..."); + p.run().await; + }); + } + } + + // listen for (external to tokio) shutdown request + let controlled_shutdown = invoke_shutdown.clone(); + tokio::spawn(async move { + info!("control message listener started"); + match shutdown_rx.recv() { + Ok(()) => { + info!("control message requesting shutdown"); + controlled_shutdown.send(()).ok(); + } + Err(std::sync::mpsc::RecvError) => { + trace!("shutdown requestor is disconnected (this is normal)"); + } + }; + }); + // listen for ctrl-c interruupts + let ctrl_c_shutdown = invoke_shutdown.clone(); + // listener for webserver shutdown + let webserver_shutdown_listen = invoke_shutdown.subscribe(); + + tokio::spawn(async move { + tokio::signal::ctrl_c().await.unwrap(); + info!("shutting down due to SIGINT (main)"); + ctrl_c_shutdown.send(()).ok(); + }); + // spawn a task to check the pool size. + //let pool_monitor = pool.clone(); + //tokio::spawn(async move {db::monitor_pool("reader", pool_monitor).await;}); + + // Read in the favicon if it exists + let favicon = settings.info.favicon.as_ref().and_then(|x| { + info!("reading favicon..."); + file_bytes(x).ok() + }); + + // A `Service` is needed for every connection, so this + // creates one from our `handle_request` function. + let make_svc = make_service_fn(|conn: &AddrStream| { + let repo = repo.clone(); + let remote_addr = conn.remote_addr(); + let bcast = bcast_tx.clone(); + let event = event_tx.clone(); + let payment_tx = payment_tx.clone(); + let stop = invoke_shutdown.clone(); + let settings = settings.clone(); + let favicon = favicon.clone(); + let registry = registry.clone(); + let metrics = metrics.clone(); + async move { + // service_fn converts our function into a `Service` + Ok::<_, Infallible>(service_fn(move |request: Request| { + handle_web_request( + request, + repo.clone(), + settings.clone(), + remote_addr, + bcast.clone(), + event.clone(), + payment_tx.clone(), + stop.subscribe(), + favicon.clone(), + registry.clone(), + metrics.clone(), + ) + })) + } + }); + let server = Server::bind(&socket_addr) + .serve(make_svc) + .with_graceful_shutdown(ctrl_c_or_signal(webserver_shutdown_listen)); + // run hyper in this thread. This is why the thread does not return. + if let Err(e) = server.await { + eprintln!("server error: {e}"); + } + }); + Ok(()) +} + +/// Nostr protocol messages from a client +#[derive(Deserialize, Serialize, Clone, PartialEq, Eq, Debug)] +#[serde(untagged)] +pub enum NostrMessage { + /// `EVENT` and `AUTH` messages + EventMsg(EventCmd), + /// A `REQ` message + SubMsg(Subscription), + /// A `CLOSE` message + CloseMsg(CloseCmd), +} + +/// Convert Message to `NostrMessage` +fn convert_to_msg(msg: &str, max_bytes: Option) -> Result { + let parsed_res: Result = + serde_json::from_str(msg).map_err(std::convert::Into::into); + match parsed_res { + Ok(m) => { + if let NostrMessage::SubMsg(_) = m { + // note; this only prints the first 16k of a REQ and then truncates. + trace!("REQ: {:?}", msg); + }; + if let NostrMessage::EventMsg(_) = m { + if let Some(max_size) = max_bytes { + // check length, ensure that some max size is set. + if msg.len() > max_size && max_size > 0 { + return Err(Error::EventMaxLengthError(msg.len())); + } + } + } + Ok(m) + } + Err(e) => { + trace!("proto parse error: {:?}", e); + trace!("parse error on message: {:?}", msg.trim()); + Err(Error::ProtoParseError) + } + } +} + +/// Turn a string into a NOTICE message ready to send over a `WebSocket` +fn make_notice_message(notice: &Notice) -> Message { + let json = match notice { + Notice::Message(ref msg) => json!(["NOTICE", msg]), + Notice::EventResult(ref res) => json!(["OK", res.id, res.status.to_bool(), res.msg]), + Notice::AuthChallenge(ref challenge) => json!(["AUTH", challenge]), + }; + + Message::text(json.to_string()) +} + +fn allowed_to_send(event_str: &String, conn: &conn::ClientConn, settings: &Settings) -> bool { + // TODO: pass in kind so that we can avoid deserialization for most events + if settings.authorization.nip42_dms { + match serde_json::from_str::(event_str) { + Ok(event) => { + if event.kind == 4 { + match (conn.auth_pubkey(), event.tag_values_by_name("p").first()) { + (Some(auth_pubkey), Some(recipient_pubkey)) => { + recipient_pubkey == auth_pubkey || &event.pubkey == auth_pubkey + }, + (_, _) => { + false + }, + } + } else { + true + } + }, + Err(_) => false + } + } else { + true + } +} + +struct ClientInfo { + remote_ip: String, + user_agent: Option, + origin: Option, +} + +/// Handle new client connections. This runs through an event loop +/// for all client communication. +#[allow(clippy::too_many_arguments)] +async fn nostr_server( + repo: Arc, + client_info: ClientInfo, + settings: Settings, + mut ws_stream: WebSocketStream, + broadcast: Sender, + event_tx: mpsc::Sender, + mut shutdown: Receiver<()>, + metrics: NostrMetrics, +) { + // the time this websocket nostr server started + let orig_start = Instant::now(); + // get a broadcast channel for clients to communicate on + let mut bcast_rx = broadcast.subscribe(); + // Track internal client state + let mut conn = conn::ClientConn::new(client_info.remote_ip); + // subscription creation rate limiting + let mut sub_lim_opt = None; + // 100ms jitter when the rate limiter returns + let jitter = Jitter::up_to(Duration::from_millis(100)); + let sub_per_min_setting = settings.limits.subscriptions_per_min; + if let Some(sub_per_min) = sub_per_min_setting { + if sub_per_min > 0 { + trace!("Rate limits for sub creation ({}/min)", sub_per_min); + let quota_time = core::num::NonZeroU32::new(sub_per_min).unwrap(); + let quota = Quota::per_minute(quota_time); + sub_lim_opt = Some(RateLimiter::direct(quota)); + } + } + // Use the remote IP as the client identifier + let cid = conn.get_client_prefix(); + // Create a channel for receiving query results from the database. + // we will send out the tx handle to any query we generate. + // this has capacity for some of the larger requests we see, which + // should allow the DB thread to release the handle earlier. + let (query_tx, mut query_rx) = mpsc::channel::(20_000); + // Create channel for receiving NOTICEs + let (notice_tx, mut notice_rx) = mpsc::channel::(128); + + // last time this client sent data (message, ping, etc.) + let mut last_message_time = Instant::now(); + + // ping interval (every 5 minutes) + let default_ping_dur = Duration::from_secs(settings.network.ping_interval_seconds.into()); + + // disconnect after 20 minutes without a ping response or event. + let max_quiet_time = Duration::from_secs(60 * 20); + + let start = tokio::time::Instant::now() + default_ping_dur; + let mut ping_interval = tokio::time::interval_at(start, default_ping_dur); + + // maintain a hashmap of a oneshot channel for active subscriptions. + // when these subscriptions are cancelled, make a message + // available to the executing query so it knows to stop. + let mut running_queries: HashMap> = HashMap::new(); + // for stats, keep track of how many events the client published, + // and how many it received from queries. + let mut client_published_event_count: usize = 0; + let mut client_received_event_count: usize = 0; + + let unspec = "".to_string(); + info!("new client connection (cid: {}, ip: {:?})", cid, conn.ip()); + let origin = client_info.origin.as_ref().unwrap_or(&unspec); + let user_agent = client_info.user_agent.as_ref().unwrap_or(&unspec); + info!( + "cid: {}, origin: {:?}, user-agent: {:?}", + cid, origin, user_agent + ); + + // Measure connections + metrics.connections.inc(); + + if settings.authorization.nip42_auth { + conn.generate_auth_challenge(); + if let Some(challenge) = conn.auth_challenge() { + ws_stream + .send(make_notice_message(&Notice::AuthChallenge( + challenge.to_string(), + ))) + .await + .ok(); + } + } + + loop { + tokio::select! { + _ = shutdown.recv() => { + metrics.disconnects.with_label_values(&["shutdown"]).inc(); + info!("Close connection down due to shutdown, client: {}, ip: {:?}, connected: {:?}", cid, conn.ip(), orig_start.elapsed()); + // server shutting down, exit loop + break; + }, + _ = ping_interval.tick() => { + // check how long since we talked to client + // if it has been too long, disconnect + if last_message_time.elapsed() > max_quiet_time { + debug!("ending connection due to lack of client ping response"); + metrics.disconnects.with_label_values(&["timeout"]).inc(); + break; + } + // Send a ping + ws_stream.send(Message::Ping(Vec::new())).await.ok(); + }, + Some(notice_msg) = notice_rx.recv() => { + ws_stream.send(make_notice_message(¬ice_msg)).await.ok(); + }, + Some(query_result) = query_rx.recv() => { + // database informed us of a query result we asked for + let subesc = query_result.sub_id.replace('"', ""); + if query_result.event == "EOSE" { + let send_str = format!("[\"EOSE\",\"{subesc}\"]"); + ws_stream.send(Message::Text(send_str)).await.ok(); + } else if allowed_to_send(&query_result.event, &conn, &settings) { + metrics.sent_events.with_label_values(&["db"]).inc(); + client_received_event_count += 1; + // send a result + let send_str = format!("[\"EVENT\",\"{}\",{}]", subesc, &query_result.event); + ws_stream.send(Message::Text(send_str)).await.ok(); + } + }, + // TODO: consider logging the LaggedRecv error + Ok(global_event) = bcast_rx.recv() => { + // an event has been broadcast to all clients + // first check if there is a subscription for this event. + for (s, sub) in conn.subscriptions() { + if !sub.interested_in_event(&global_event) { + continue; + } + // TODO: serialize at broadcast time, instead of + // once for each consumer. + if let Ok(event_str) = serde_json::to_string(&global_event) { + if allowed_to_send(&event_str, &conn, &settings) { + // create an event response and send it + trace!("sub match for client: {}, sub: {:?}, event: {:?}", + cid, s, + global_event.get_event_id_prefix()); + let subesc = s.replace('"', ""); + metrics.sent_events.with_label_values(&["realtime"]).inc(); + ws_stream.send(Message::Text(format!("[\"EVENT\",\"{subesc}\",{event_str}]"))).await.ok(); + } + } else { + warn!("could not serialize event: {:?}", global_event.get_event_id_prefix()); + } + } + }, + ws_next = ws_stream.next() => { + // update most recent message time for client + last_message_time = Instant::now(); + // Consume text messages from the client, parse into Nostr messages. + let nostr_msg = match ws_next { + Some(Ok(Message::Text(m))) => { + convert_to_msg(&m,settings.limits.max_event_bytes) + }, + Some(Ok(Message::Binary(_))) => { + ws_stream.send( + make_notice_message(&Notice::message("binary messages are not accepted".into()))).await.ok(); + continue; + }, + Some(Ok(Message::Ping(_) | Message::Pong(_))) => { + // get a ping/pong, ignore. tungstenite will + // send responses automatically. + continue; + }, + Some(Err(WsError::Capacity(MessageTooLong{size, max_size}))) => { + ws_stream.send( + make_notice_message(&Notice::message(format!("message too large ({size} > {max_size})")))).await.ok(); + continue; + }, + None | + Some(Ok(Message::Close(_)) | + Err(WsError::AlreadyClosed | WsError::ConnectionClosed | + WsError::Protocol(tungstenite::error::ProtocolError::ResetWithoutClosingHandshake))) + => { + debug!("websocket close from client (cid: {}, ip: {:?})",cid, conn.ip()); + metrics.disconnects.with_label_values(&["normal"]).inc(); + break; + }, + Some(Err(WsError::Io(e))) => { + // IO errors are considered fatal + warn!("IO error (cid: {}, ip: {:?}): {:?}", cid, conn.ip(), e); + metrics.disconnects.with_label_values(&["error"]).inc(); + + break; + } + x => { + // default condition on error is to close the client connection + info!("unknown error (cid: {}, ip: {:?}): {:?} (closing conn)", cid, conn.ip(), x); + metrics.disconnects.with_label_values(&["error"]).inc(); + + break; + } + }; + + // convert ws_next into proto_next + match nostr_msg { + Ok(NostrMessage::EventMsg(ec)) => { + // An EventCmd needs to be validated to be converted into an Event + // handle each type of message + let evid = ec.event_id().to_owned(); + let parsed : Result = Result::::from(ec); + metrics.cmd_event.inc(); + match parsed { + Ok(WrappedEvent(e)) => { + metrics.cmd_event.inc(); + let id_prefix:String = e.id.chars().take(8).collect(); + debug!("successfully parsed/validated event: {:?} (cid: {}, kind: {})", id_prefix, cid, e.kind); + // check if event is expired + if e.is_expired() { + let notice = Notice::invalid(e.id, "The event has already expired"); + ws_stream.send(make_notice_message(¬ice)).await.ok(); + // check if the event is too far in the future. + } else if e.is_valid_timestamp(settings.options.reject_future_seconds) { + // Write this to the database. + let auth_pubkey = conn.auth_pubkey().and_then(|pubkey| hex::decode(pubkey).ok()); + let submit_event = SubmittedEvent { + event: e.clone(), + notice_tx: notice_tx.clone(), + source_ip: conn.ip().to_string(), + origin: client_info.origin.clone(), + user_agent: client_info.user_agent.clone(), + auth_pubkey }; + event_tx.send(submit_event).await.ok(); + client_published_event_count += 1; + } else { + info!("client: {} sent a far future-dated event", cid); + if let Some(fut_sec) = settings.options.reject_future_seconds { + let msg = format!("The event created_at field is out of the acceptable range (+{fut_sec}sec) for this relay."); + let notice = Notice::invalid(e.id, &msg); + ws_stream.send(make_notice_message(¬ice)).await.ok(); + } + } + }, + Ok(WrappedAuth(event)) => { + metrics.cmd_auth.inc(); + if settings.authorization.nip42_auth { + let id_prefix:String = event.id.chars().take(8).collect(); + debug!("successfully parsed auth: {:?} (cid: {})", id_prefix, cid); + match &settings.info.relay_url { + None => { + error!("AUTH command received, but relay_url is not set in the config file (cid: {})", cid); + }, + Some(relay) => { + match conn.authenticate(&event, relay) { + Ok(_) => { + let pubkey = match conn.auth_pubkey() { + Some(k) => k.chars().take(8).collect(), + None => "".to_string(), + }; + info!("client is authenticated: (cid: {}, pubkey: {:?})", cid, pubkey); + }, + Err(e) => { + info!("authentication error: {} (cid: {})", e, cid); + ws_stream.send(make_notice_message(&Notice::restricted(event.id, format!("authentication error: {e}").as_str()))).await.ok(); + }, + } + } + } + } else { + let e = CommandUnknownError; + info!("client sent an invalid event (cid: {})", cid); + ws_stream.send(make_notice_message(&Notice::invalid(evid, &format!("{e}")))).await.ok(); + } + }, + Err(e) => { + metrics.cmd_event.inc(); + info!("client sent an invalid event (cid: {})", cid); + ws_stream.send(make_notice_message(&Notice::invalid(evid, &format!("{e}")))).await.ok(); + } + } + }, + Ok(NostrMessage::SubMsg(s)) => { + debug!("subscription requested (cid: {}, sub: {:?})", cid, s.id); + // subscription handling consists of: + // * check for rate limits + // * registering the subscription so future events can be matched + // * making a channel to cancel to request later + // * sending a request for a SQL query + // Do nothing if the sub already exists. + if conn.has_subscription(&s) { + info!("client sent duplicate subscription, ignoring (cid: {}, sub: {:?})", cid, s.id); + } else { + metrics.cmd_req.inc(); + if let Some(ref lim) = sub_lim_opt { + lim.until_ready_with_jitter(jitter).await; + } + let (abandon_query_tx, abandon_query_rx) = oneshot::channel::<()>(); + match conn.subscribe(s.clone()) { + Ok(()) => { + // when we insert, if there was a previous query running with the same name, cancel it. + if let Some(previous_query) = running_queries.insert(s.id.clone(), abandon_query_tx) { + previous_query.send(()).ok(); + } + if s.needs_historical_events() { + // start a database query. this spawns a blocking database query on a worker thread. + repo.query_subscription(s, cid.clone(), query_tx.clone(), abandon_query_rx).await.ok(); + } + }, + Err(e) => { + info!("Subscription error: {} (cid: {}, sub: {:?})", e, cid, s.id); + ws_stream.send(make_notice_message(&Notice::message(format!("Subscription error: {e}")))).await.ok(); + } + } + } + }, + Ok(NostrMessage::CloseMsg(cc)) => { + // closing a request simply removes the subscription. + let parsed : Result = Result::::from(cc); + if let Ok(c) = parsed { + metrics.cmd_close.inc(); + // check if a query is currently + // running, and remove it if so. + let stop_tx = running_queries.remove(&c.id); + if let Some(tx) = stop_tx { + tx.send(()).ok(); + } + // stop checking new events against + // the subscription + conn.unsubscribe(&c); + } else { + info!("invalid command ignored"); + ws_stream.send(make_notice_message(&Notice::message("could not parse command".into()))).await.ok(); + } + }, + Err(Error::ConnError) => { + debug!("got connection close/error, disconnecting cid: {}, ip: {:?}",cid, conn.ip()); + break; + } + Err(Error::EventMaxLengthError(s)) => { + info!("client sent command larger ({} bytes) than max size (cid: {})", s, cid); + ws_stream.send(make_notice_message(&Notice::message("event exceeded max size".into()))).await.ok(); + }, + Err(Error::ProtoParseError) => { + info!("client sent command that could not be parsed (cid: {})", cid); + ws_stream.send(make_notice_message(&Notice::message("could not parse command".into()))).await.ok(); + }, + Err(e) => { + info!("got non-fatal error from client (cid: {}, error: {:?}", cid, e); + }, + } + }, + } + } + // connection cleanup - ensure any still running queries are terminated. + for (_, stop_tx) in running_queries { + stop_tx.send(()).ok(); + } + info!( + "stopping client connection (cid: {}, ip: {:?}, sent: {} events, recv: {} events, connected: {:?})", + cid, + conn.ip(), + client_published_event_count, + client_received_event_count, + orig_start.elapsed() + ); +} + +#[derive(Clone)] +pub struct NostrMetrics { + pub query_sub: Histogram, // response time of successful subscriptions + pub query_db: Histogram, // individual database query execution time + pub db_connections: IntGauge, // database connections in use + pub write_events: Histogram, // response time of event writes + pub sent_events: IntCounterVec, // count of events sent to clients + pub connections: IntCounter, // count of websocket connections + pub disconnects: IntCounterVec, // client disconnects + pub query_aborts: IntCounterVec, // count of queries aborted by server + pub cmd_req: IntCounter, // count of REQ commands received + pub cmd_event: IntCounter, // count of EVENT commands received + pub cmd_close: IntCounter, // count of CLOSE commands received + pub cmd_auth: IntCounter, // count of AUTH commands received +} \ No newline at end of file diff --git a/crates/rooch-ws-relay/src/nauthz.rs b/crates/rooch-ws-relay/src/nauthz.rs new file mode 100644 index 0000000000..af1cb6bbea --- /dev/null +++ b/crates/rooch-ws-relay/src/nauthz.rs @@ -0,0 +1,111 @@ +use crate::server::error::{Error, Result}; +use crate::server::{event::Event, nip05::Nip05Name}; +use nauthz_grpc::authorization_client::AuthorizationClient; +use nauthz_grpc::event::TagEntry; +use nauthz_grpc::{Decision, Event as GrpcEvent, EventReply, EventRequest}; +use tracing::{info, warn}; + +pub mod nauthz_grpc { + tonic::include_proto!("nauthz"); +} + +// A decision for the DB to act upon +pub trait AuthzDecision: Send + Sync { + fn permitted(&self) -> bool; + fn message(&self) -> Option; +} + +impl AuthzDecision for EventReply { + fn permitted(&self) -> bool { + self.decision == Decision::Permit as i32 + } + fn message(&self) -> Option { + self.message.clone() + } +} + +// A connection to an event admission GRPC server +pub struct EventAuthzService { + server_addr: String, + conn: Option>, +} + +// conversion of Nip05Names into GRPC type +impl std::convert::From for nauthz_grpc::event_request::Nip05Name { + fn from(value: Nip05Name) -> Self { + nauthz_grpc::event_request::Nip05Name { + local: value.local.clone(), + domain: value.domain, + } + } +} + +// conversion of event tags into gprc struct +fn tags_to_protobuf(tags: &Vec>) -> Vec { + tags.iter() + .map(|x| TagEntry { values: x.clone() }) + .collect() +} + +impl EventAuthzService { + pub async fn connect(server_addr: &str) -> EventAuthzService { + let mut eas = EventAuthzService { + server_addr: server_addr.to_string(), + conn: None, + }; + eas.ready_connection().await; + eas + } + + pub async fn ready_connection(&mut self) { + if self.conn.is_none() { + let client = AuthorizationClient::connect(self.server_addr.to_string()).await; + if let Err(ref msg) = client { + warn!("could not connect to nostr authz GRPC server: {:?}", msg); + } else { + info!("connected to nostr authorization GRPC server"); + } + self.conn = client.ok(); + } + } + + pub async fn admit_event( + &mut self, + event: &Event, + ip: &str, + origin: Option, + user_agent: Option, + nip05: Option, + auth_pubkey: Option>, + ) -> Result> { + self.ready_connection().await; + let id_blob = hex::decode(&event.id)?; + let pubkey_blob = hex::decode(&event.pubkey)?; + let sig_blob = hex::decode(&event.sig)?; + if let Some(ref mut c) = self.conn { + let gevent = GrpcEvent { + id: id_blob, + pubkey: pubkey_blob, + sig: sig_blob, + created_at: event.created_at, + kind: event.kind, + content: event.content.clone(), + tags: tags_to_protobuf(&event.tags), + }; + let svr_res = c + .event_admit(EventRequest { + event: Some(gevent), + ip_addr: Some(ip.to_string()), + origin, + user_agent, + auth_pubkey, + nip05: nip05.map(nauthz_grpc::event_request::Nip05Name::from), + }) + .await?; + let reply = svr_res.into_inner(); + Ok(Box::new(reply)) + } else { + Err(Error::AuthzError) + } + } +} diff --git a/crates/rooch-ws-relay/src/payment/handler.rs b/crates/rooch-ws-relay/src/payment/handler.rs new file mode 100644 index 0000000000..2474b4b640 --- /dev/null +++ b/crates/rooch-ws-relay/src/payment/handler.rs @@ -0,0 +1,274 @@ +use crate::server::error::{Error, Result}; +use crate::server::event::Event; +use crate::payment::lnbits::LNBitsPaymentProcessor; +use serde::{Deserialize, Serialize}; +use std::sync::Arc; +use tracing::{info, warn}; +use crate::repo::nostr::NostrRepo; + +use async_trait::async_trait; +use nostr::key::{FromPkStr, FromSkStr}; +use nostr::{key::Keys, Event as NostrEvent, EventBuilder}; + +/// Payment handler +pub struct Payment { + /// Repository for saving/retrieving events and events + repo: Arc, + /// Newly validated events get written and then broadcast on this channel to subscribers + event_tx: tokio::sync::broadcast::Sender, + /// Payment message sender + payment_tx: tokio::sync::broadcast::Sender, + /// Payment message receiver + payment_rx: tokio::sync::broadcast::Receiver, + /// Settings + settings: crate::server::config::Settings, + // Nostr Keys + nostr_keys: Option, + /// Payment Processor + processor: Arc, +} + +#[async_trait] +pub trait PaymentProcessor: Send + Sync { + /// Get invoice from processor + async fn get_invoice(&self, keys: &Keys, amount: u64) -> Result; + /// Check payment status of an invoice + async fn check_invoice(&self, payment_hash: &str) -> Result; +} + +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +pub enum Processor { + LNBits, +} + +/// Possible states of an invoice +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, sqlx::Type)] +#[sqlx(type_name = "status")] +pub enum InvoiceStatus { + Unpaid, + Paid, + Expired, +} + +impl ToString for InvoiceStatus { + fn to_string(&self) -> String { + match self { + InvoiceStatus::Paid => "Paid".to_string(), + InvoiceStatus::Unpaid => "Unpaid".to_string(), + InvoiceStatus::Expired => "Expired".to_string(), + } + } +} + +/// Invoice information +#[derive(Debug, Clone)] +pub struct InvoiceInfo { + pub pubkey: String, + pub payment_hash: String, + pub bolt11: String, + pub amount: u64, + pub status: InvoiceStatus, + pub memo: String, + pub confirmed_at: Option, +} + +/// Message variants for the payment channel +#[derive(Debug, Clone)] +pub enum PaymentMessage { + /// New account + NewAccount(String), + /// Check account, + CheckAccount(String), + /// Account Admitted + AccountAdmitted(String), + /// Invoice generated + Invoice(String, InvoiceInfo), + /// Invoice call back + /// Payment hash is passed + // This may have to be changed to better support other processors + InvoicePaid(String), +} + +impl Payment { + pub fn new( + repo: Arc, + payment_tx: tokio::sync::broadcast::Sender, + payment_rx: tokio::sync::broadcast::Receiver, + event_tx: tokio::sync::broadcast::Sender, + settings: crate::server::config::Settings, + ) -> Result { + info!("Create payment handler"); + + // Create nostr key from sk string + let nostr_keys = if let Some(secret_key) = &settings.pay_to_relay.secret_key { + Some(Keys::from_sk_str(secret_key)?) + } else { + None + }; + + // Create processor kind defined in settings + let processor = match &settings.pay_to_relay.processor { + Processor::LNBits => Arc::new(LNBitsPaymentProcessor::new(&settings)), + }; + + Ok(Payment { + repo, + payment_tx, + payment_rx, + event_tx, + settings, + nostr_keys, + processor, + }) + } + + /// Perform Payment tasks + pub async fn run(&mut self) { + loop { + let res = self.run_internal().await; + if let Err(e) = res { + info!("error in payment: {:?}", e); + } + } + } + + /// Internal select loop for preforming payment operations + async fn run_internal(&mut self) -> Result<()> { + tokio::select! { + m = self.payment_rx.recv() => { + match m { + Ok(PaymentMessage::NewAccount(pubkey)) => { + info!("payment event for {:?}", pubkey); + // REVIEW: This will need to change for cost per event + let amount = self.settings.pay_to_relay.admission_cost; + let invoice_info = self.get_invoice_info(&pubkey, amount).await?; + // TODO: should handle this error + self.payment_tx.send(PaymentMessage::Invoice(pubkey, invoice_info)).ok(); + }, + // Gets the most recent unpaid invoice from database + // Checks LNbits to verify if paid/unpaid + Ok(PaymentMessage::CheckAccount(pubkey)) => { + let keys = Keys::from_pk_str(&pubkey)?; + + if let Ok(Some(invoice_info)) = self.repo.get_unpaid_invoice(&keys).await { + match self.check_invoice_status(&invoice_info.payment_hash).await? { + InvoiceStatus::Paid => { + self.repo.admit_account(&keys, self.settings.pay_to_relay.admission_cost).await?; + self.payment_tx.send(PaymentMessage::AccountAdmitted(pubkey)).ok(); + } + _ => { + self.payment_tx.send(PaymentMessage::Invoice(pubkey, invoice_info)).ok(); + } + } + } else { + let amount = self.settings.pay_to_relay.admission_cost; + let invoice_info = self.get_invoice_info(&pubkey, amount).await?; + self.payment_tx.send(PaymentMessage::Invoice(pubkey, invoice_info)).ok(); + } + } + Ok(PaymentMessage::InvoicePaid(payment_hash)) => { + if self.check_invoice_status(&payment_hash).await?.eq(&InvoiceStatus::Paid) { + let pubkey = self.repo + .update_invoice(&payment_hash, InvoiceStatus::Paid) + .await?; + + let key = Keys::from_pk_str(&pubkey)?; + self.repo.admit_account(&key, self.settings.pay_to_relay.admission_cost).await?; + } + } + Ok(_) => { + // For this variant nothing need to be done here + // it is used by `server` + } + Err(err) => warn!("Payment RX: {err}") + } + } + } + + Ok(()) + } + + /// Sends Nostr DM to pubkey that requested invoice + /// Two events the terms followed by the bolt11 invoice + pub async fn send_admission_message( + &self, + pubkey: &str, + invoice_info: &InvoiceInfo, + ) -> Result<()> { + let nostr_keys = match &self.nostr_keys { + Some(key) => key, + None => return Err(Error::CustomError("Nostr key not defined".to_string())), + }; + + // Create Nostr key from pk + let key = Keys::from_pk_str(pubkey)?; + + let pubkey = key.public_key(); + + // Event DM with terms of service + let message_event: NostrEvent = EventBuilder::new_encrypted_direct_msg( + nostr_keys, + pubkey, + &self.settings.pay_to_relay.terms_message, + )? + .to_event(nostr_keys)?; + + // Event DM with invoice + let invoice_event: NostrEvent = + EventBuilder::new_encrypted_direct_msg(nostr_keys, pubkey, &invoice_info.bolt11)? + .to_event(nostr_keys)?; + + // Persist DM events to DB + self.repo.write_event(&message_event.clone().into()).await?; + self.repo.write_event(&invoice_event.clone().into()).await?; + + // Broadcast DM events + self.event_tx.send(message_event.clone().into()).ok(); + self.event_tx.send(invoice_event.clone().into()).ok(); + + Ok(()) + } + + /// Get Invoice Info + /// If the has an active invoice that will be return + /// Otherwise a new invoice will be generated by the payment processor + pub async fn get_invoice_info(&self, pubkey: &str, amount: u64) -> Result { + // If user is already in DB this will be false + // This avoids recreating admission invoices + // I think it will continue to send DMs with the invoice + // If client continues to try and write to the relay (will be same invoice) + let key = Keys::from_pk_str(pubkey)?; + if !self.repo.create_account(&key).await? { + if let Ok(Some(invoice_info)) = self.repo.get_unpaid_invoice(&key).await { + return Ok(invoice_info); + } + } + + let key = Keys::from_pk_str(pubkey)?; + + let invoice_info = self.processor.get_invoice(&key, amount).await?; + + // Persist invoice to DB + self.repo + .create_invoice_record(&key, invoice_info.clone()) + .await?; + + if self.settings.pay_to_relay.direct_message { + // Admission event invoice and terms to pubkey that is joining + self.send_admission_message(pubkey, &invoice_info).await?; + } + + Ok(invoice_info) + } + + /// Check paid status of invoice with LNbits + pub async fn check_invoice_status(&self, payment_hash: &str) -> Result { + // Check base if passed expiry time + let status = self.processor.check_invoice(payment_hash).await?; + self.repo + .update_invoice(payment_hash, status.clone()) + .await?; + + Ok(status) + } +} \ No newline at end of file diff --git a/crates/rooch-ws-relay/src/payment/lnbits.rs b/crates/rooch-ws-relay/src/payment/lnbits.rs new file mode 100644 index 0000000000..7bfa356cc1 --- /dev/null +++ b/crates/rooch-ws-relay/src/payment/lnbits.rs @@ -0,0 +1,172 @@ +//! LNBits payment processor +use http::Uri; +use hyper::client::connect::HttpConnector; +use hyper::Client; +use hyper_tls::HttpsConnector; +use nostr::Keys; +use serde::{Deserialize, Serialize}; +use serde_json::Value; + +use async_trait::async_trait; +use rand::Rng; + +use std::str::FromStr; +use url::Url; + +use crate::server::{config::Settings, error::Error}; + +use super::handler::{InvoiceInfo, InvoiceStatus, PaymentProcessor}; + +const APIPATH: &str = "/api/v1/payments/"; + +/// Info LNBits expects in create invoice request +#[derive(Serialize, Deserialize, Debug)] +pub struct LNBitsCreateInvoice { + out: bool, + amount: u64, + memo: String, + webhook: String, + unit: String, + internal: bool, + expiry: u64, +} + +/// Invoice response for LN bits +#[derive(Debug, Serialize, Deserialize)] +pub struct LNBitsCreateInvoiceResponse { + payment_hash: String, + payment_request: String, +} + +/// LNBits call back response +/// Used when an invoice is paid +/// lnbits to post the status change to relay +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct LNBitsCallback { + pub checking_id: String, + pub pending: bool, + pub amount: u64, + pub memo: String, + pub time: u64, + pub bolt11: String, + pub preimage: String, + pub payment_hash: String, + pub wallet_id: String, + pub webhook: String, + pub webhook_status: Option, +} + +/// LN Bits repose for check invoice endpoint +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct LNBitsCheckInvoiceResponse { + paid: bool, +} + +#[derive(Clone)] +pub struct LNBitsPaymentProcessor { + /// HTTP client + client: hyper::Client, hyper::Body>, + settings: Settings, +} + +impl LNBitsPaymentProcessor { + pub fn new(settings: &Settings) -> Self { + // setup hyper client + let https = HttpsConnector::new(); + let client = Client::builder().build::<_, hyper::Body>(https); + + Self { + client, + settings: settings.clone(), + } + } +} + +#[async_trait] +impl PaymentProcessor for LNBitsPaymentProcessor { + /// Calls LNBits api to ger new invoice + async fn get_invoice(&self, key: &Keys, amount: u64) -> Result { + let random_number: u16 = rand::thread_rng().gen(); + let memo = format!("{}: {}", random_number, key.public_key()); + + let callback_url = Url::parse( + &self + .settings + .info + .relay_url + .clone() + .unwrap() + .replace("ws", "http"), + )? + .join("lnbits")?; + + let body = LNBitsCreateInvoice { + out: false, + amount, + memo: memo.clone(), + webhook: callback_url.to_string(), + unit: "sat".to_string(), + internal: false, + expiry: 3600, + }; + let url = Url::parse(&self.settings.pay_to_relay.node_url)?.join(APIPATH)?; + let uri = Uri::from_str(url.as_str().strip_suffix('/').unwrap_or(url.as_str())).unwrap(); + + let req = hyper::Request::builder() + .method(hyper::Method::POST) + .uri(uri) + .header("X-Api-Key", &self.settings.pay_to_relay.api_secret) + .body(hyper::Body::from(serde_json::to_string(&body)?)) + .expect("request builder"); + + let res = self.client.request(req).await?; + + // Json to Struct of LNbits callback + let body = hyper::body::to_bytes(res.into_body()).await?; + let invoice_response: LNBitsCreateInvoiceResponse = serde_json::from_slice(&body)?; + + Ok(InvoiceInfo { + pubkey: key.public_key().to_string(), + payment_hash: invoice_response.payment_hash, + bolt11: invoice_response.payment_request, + amount, + memo, + status: InvoiceStatus::Unpaid, + confirmed_at: None, + }) + } + + /// Calls LNBits Api to check the payment status of invoice + async fn check_invoice(&self, payment_hash: &str) -> Result { + let url = Url::parse(&self.settings.pay_to_relay.node_url)? + .join(APIPATH)? + .join(payment_hash)?; + let uri = Uri::from_str(url.as_str()).unwrap(); + + let req = hyper::Request::builder() + .method(hyper::Method::GET) + .uri(uri) + .header("X-Api-Key", &self.settings.pay_to_relay.api_secret) + .body(hyper::Body::empty()) + .expect("request builder"); + + let res = self.client.request(req).await?; + // Json to Struct of LNbits callback + let body = hyper::body::to_bytes(res.into_body()).await?; + let invoice_response: Value = serde_json::from_slice(&body)?; + + let status = if let Ok(invoice_response) = + serde_json::from_value::(invoice_response) + { + if invoice_response.paid { + InvoiceStatus::Paid + } else { + InvoiceStatus::Unpaid + } + } else { + InvoiceStatus::Expired + }; + + Ok(status) + } +} \ No newline at end of file diff --git a/crates/rooch-ws-relay/src/payment/mod.rs b/crates/rooch-ws-relay/src/payment/mod.rs new file mode 100644 index 0000000000..2d3fda2ed9 --- /dev/null +++ b/crates/rooch-ws-relay/src/payment/mod.rs @@ -0,0 +1,5 @@ +// Copyright (c) RoochNetwork +// SPDX-License-Identifier: Apache-2.0 + +pub mod handler; +pub mod lnbits; diff --git a/crates/rooch-ws-relay/src/repo/mod.rs b/crates/rooch-ws-relay/src/repo/mod.rs new file mode 100644 index 0000000000..d085502ff1 --- /dev/null +++ b/crates/rooch-ws-relay/src/repo/mod.rs @@ -0,0 +1,6 @@ +// Copyright (c) RoochNetwork +// SPDX-License-Identifier: Apache-2.0 + +pub mod nostr; +pub mod sqlite; +pub mod sqlite_migration; diff --git a/crates/rooch-ws-relay/src/repo/nostr.rs b/crates/rooch-ws-relay/src/repo/nostr.rs new file mode 100644 index 0000000000..35047a8f51 --- /dev/null +++ b/crates/rooch-ws-relay/src/repo/nostr.rs @@ -0,0 +1,93 @@ +use crate::db::QueryResult; +use crate::server::error::Result; +use crate::server::event::Event; +use crate::nip05::VerificationRecord; +use crate::payment::handler::{InvoiceInfo, InvoiceStatus}; +use crate::server::subscription::Subscription; +use crate::server::utils::unix_time; +use async_trait::async_trait; +use nostr::Keys; +use rand::Rng; + +#[async_trait] +pub trait NostrRepo: Send + Sync { + /// Start the repository (any initialization or maintenance tasks can be kicked off here) + async fn start(&self) -> Result<()>; + + /// Run migrations and return current version + async fn migrate_up(&self) -> Result; + + /// Persist event to database + async fn write_event(&self, e: &Event) -> Result; + + /// Perform a database query using a subscription. + /// + /// The [`Subscription`] is converted into a SQL query. Each result + /// is published on the `query_tx` channel as it is returned. If a + /// message becomes available on the `abandon_query_rx` channel, the + /// query is immediately aborted. + async fn query_subscription( + &self, + sub: Subscription, + client_id: String, + query_tx: tokio::sync::mpsc::Sender, + mut abandon_query_rx: tokio::sync::oneshot::Receiver<()>, + ) -> Result<()>; + + /// Perform normal maintenance + async fn optimize_db(&self) -> Result<()>; + + /// Create a new verification record connected to a specific event + async fn create_verification_record(&self, event_id: &str, name: &str) -> Result<()>; + + /// Update verification timestamp + async fn update_verification_timestamp(&self, id: u64) -> Result<()>; + + /// Update verification record as failed + async fn fail_verification(&self, id: u64) -> Result<()>; + + /// Delete verification record + async fn delete_verification(&self, id: u64) -> Result<()>; + + /// Get the latest verification record for a given pubkey. + async fn get_latest_user_verification(&self, pub_key: &str) -> Result; + + /// Get oldest verification before timestamp + async fn get_oldest_user_verification(&self, before: u64) -> Result; + + /// Create a new account + async fn create_account(&self, pubkey: &Keys) -> Result; + + /// Admit an account + async fn admit_account(&self, pubkey: &Keys, admission_cost: u64) -> Result<()>; + + /// Gets user balance if they are an admitted pubkey + async fn get_account_balance(&self, pubkey: &Keys) -> Result<(bool, u64)>; + + /// Update account balance + async fn update_account_balance( + &self, + pub_key: &Keys, + positive: bool, + new_balance: u64, + ) -> Result<()>; + + /// Create invoice record + async fn create_invoice_record(&self, pubkey: &Keys, invoice_info: InvoiceInfo) -> Result<()>; + + /// Update Invoice for given payment hash + async fn update_invoice(&self, payment_hash: &str, status: InvoiceStatus) -> Result; + + /// Get the most recent invoice for a given pubkey + /// invoice must be unpaid and not expired + async fn get_unpaid_invoice(&self, pubkey: &Keys) -> Result>; +} + +// Current time, with a slight forward jitter in seconds +pub(crate) fn now_jitter(sec: u64) -> u64 { + // random time between now, and 10min in future. + let mut rng = rand::thread_rng(); + let jitter_amount = rng.gen_range(0..sec); + let now = unix_time(); + now.saturating_add(jitter_amount) +} \ No newline at end of file diff --git a/crates/rooch-ws-relay/src/repo/sqlite.rs b/crates/rooch-ws-relay/src/repo/sqlite.rs new file mode 100644 index 0000000000..310d45f42f --- /dev/null +++ b/crates/rooch-ws-relay/src/repo/sqlite.rs @@ -0,0 +1,1389 @@ +//! Event persistence and querying +//use crate::config::SETTINGS; +use crate::server::config::Settings; +use crate::db::QueryResult; +use crate::server::error::{Error::SqlError, Result}; +use crate::server::event::{single_char_tagname, Event}; +use crate::server::hexrange::hex_range; +use crate::server::hexrange::HexSearch; +use crate::server::nip05::{Nip05Name, VerificationRecord}; +use crate::payment::handler::{InvoiceInfo, InvoiceStatus}; +use super::sqlite_migration::{upgrade_db, STARTUP_SQL}; +use crate::NostrMetrics; +use crate::server::subscription::{ReqFilter, Subscription}; +use crate::server::utils::{is_hex, unix_time}; +use async_trait::async_trait; +use hex; +use r2d2; +use r2d2_sqlite::SqliteConnectionManager; +use rusqlite::params; +use rusqlite::types::ToSql; +use rusqlite::OpenFlags; +use std::fmt::Write as _; +use std::path::Path; +use std::sync::Arc; +use std::thread; +use std::time::Duration; +use std::time::Instant; +use tokio::sync::{Mutex, MutexGuard, Semaphore}; +use tokio::task; +use tracing::{debug, info, trace, warn}; + +use super::nostr::{now_jitter, NostrRepo}; +use nostr::key::Keys; + +pub type SqlitePool = r2d2::Pool; +pub type PooledConnection = r2d2::PooledConnection; +pub const DB_FILE: &str = "nostr.db"; + +#[derive(Clone)] +pub struct SqliteRepo { + /// Metrics + metrics: NostrMetrics, + /// Pool for reading events and NIP-05 status + read_pool: SqlitePool, + /// Pool for writing events and NIP-05 verification + write_pool: SqlitePool, + /// Pool for performing checkpoints/optimization + maint_pool: SqlitePool, + /// Flag to indicate a checkpoint is underway + checkpoint_in_progress: Arc>, + /// Flag to limit writer concurrency + write_in_progress: Arc>, + /// Semaphore for readers to acquire blocking threads + reader_threads_ready: Arc, +} + +impl SqliteRepo { + // build all the pools needed + #[must_use] + pub fn new(settings: &Settings, metrics: NostrMetrics) -> SqliteRepo { + let write_pool = build_pool( + "writer", + settings, + OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE, + 0, + 2, + false, + ); + let maint_pool = build_pool( + "maintenance", + settings, + OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE, + 0, + 2, + true, + ); + let read_pool = build_pool( + "reader", + settings, + OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE, + settings.database.min_conn, + settings.database.max_conn, + true, + ); + + // this is used to block new reads during critical checkpoints + let checkpoint_in_progress = Arc::new(Mutex::new(0)); + // SQLite can only effectively write single threaded, so don't + // block multiple worker threads unnecessarily. + let write_in_progress = Arc::new(Mutex::new(0)); + // configure the number of worker threads that can be spawned + // to match the number of database reader connections. + let max_conn = settings.database.max_conn as usize; + let reader_threads_ready = Arc::new(Semaphore::new(max_conn)); + SqliteRepo { + metrics, + read_pool, + write_pool, + maint_pool, + checkpoint_in_progress, + write_in_progress, + reader_threads_ready, + } + } + + /// Persist an event to the database, returning rows added. + pub fn persist_event(conn: &mut PooledConnection, e: &Event) -> Result { + // enable auto vacuum + conn.execute_batch("pragma auto_vacuum = FULL")?; + + // start transaction + let tx = conn.transaction()?; + // get relevant fields from event and convert to blobs. + let id_blob = hex::decode(&e.id).ok(); + let pubkey_blob: Option> = hex::decode(&e.pubkey).ok(); + let delegator_blob: Option> = + e.delegated_by.as_ref().and_then(|d| hex::decode(d).ok()); + let event_str = serde_json::to_string(&e).ok(); + // check for replaceable events that would hide this one; we won't even attempt to insert these. + if e.is_replaceable() { + let repl_count = tx.query_row( + "SELECT e.id FROM event e INDEXED BY author_index WHERE e.author=? AND e.kind=? AND e.created_at >= ? LIMIT 1;", + params![pubkey_blob, e.kind, e.created_at], |row| row.get::(0)); + if repl_count.ok().is_some() { + return Ok(0); + } + } + // check for parameterized replaceable events that would be hidden; don't insert these either. + if let Some(d_tag) = e.distinct_param() { + let repl_count = tx.query_row( + "SELECT e.id FROM event e LEFT JOIN tag t ON e.id=t.event_id WHERE e.author=? AND e.kind=? AND t.name='d' AND t.value=? AND e.created_at >= ? LIMIT 1;", + params![pubkey_blob, e.kind, d_tag, e.created_at],|row| row.get::(0)); + // if any rows were returned, then some newer event with + // the same author/kind/tag value exist, and we can ignore + // this event. + if repl_count.ok().is_some() { + return Ok(0); + } + } + // ignore if the event hash is a duplicate. + let mut ins_count = tx.execute( + "INSERT OR IGNORE INTO event (event_hash, created_at, expires_at, kind, author, delegated_by, content, first_seen, hidden) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, strftime('%s','now'), FALSE);", + params![id_blob, e.created_at, e.expiration(), e.kind, pubkey_blob, delegator_blob, event_str] + )? as u64; + if ins_count == 0 { + // if the event was a duplicate, no need to insert event or + // pubkey references. + tx.rollback().ok(); + return Ok(ins_count); + } + // remember primary key of the event most recently inserted. + let ev_id = tx.last_insert_rowid(); + // add all tags to the tag table + for tag in &e.tags { + // ensure we have 2 values. + if tag.len() >= 2 { + let tagname = &tag[0]; + let tagval = &tag[1]; + // only single-char tags are searchable + let tagchar_opt = single_char_tagname(tagname); + match &tagchar_opt { + Some(_) => { + tx.execute( + "INSERT OR IGNORE INTO tag (event_id, name, value, kind, created_at) VALUES (?1, ?2, ?3, ?4, ?5)", + params![ev_id, &tagname, &tagval, e.kind, e.created_at], + )?; + } + None => {} + } + } + } + // if this event is replaceable update, remove other replaceable + // event with the same kind from the same author that was issued + // earlier than this. + if e.is_replaceable() { + let author = hex::decode(&e.pubkey).ok(); + // this is a backwards check - hide any events that were older. + let update_count = tx.execute( + "DELETE FROM event WHERE kind=? and author=? and id NOT IN (SELECT id FROM event INDEXED BY author_kind_index WHERE kind=? AND author=? ORDER BY created_at DESC LIMIT 1)", + params![e.kind, author, e.kind, author], + )?; + if update_count > 0 { + info!( + "removed {} older replaceable kind {} events for author: {:?}", + update_count, + e.kind, + e.get_author_prefix() + ); + } + } + // if this event is parameterized replaceable, remove other events. + if let Some(d_tag) = e.distinct_param() { + let update_count = tx.execute( + "DELETE FROM event WHERE kind=? AND author=? AND id IN (SELECT e.id FROM event e LEFT JOIN tag t ON e.id=t.event_id WHERE e.kind=? AND e.author=? AND t.name='d' AND t.value=? ORDER BY t.created_at DESC LIMIT -1 OFFSET 1);", + params![e.kind, pubkey_blob, e.kind, pubkey_blob, d_tag])?; + if update_count > 0 { + info!( + "removed {} older parameterized replaceable kind {} events for author: {:?}", + update_count, + e.kind, + e.get_author_prefix() + ); + } + } + // if this event is a deletion, hide the referenced events from the same author. + if e.kind == 5 { + let event_candidates = e.tag_values_by_name("e"); + // first parameter will be author + let mut params: Vec> = vec![Box::new(hex::decode(&e.pubkey)?)]; + event_candidates + .iter() + .filter(|x| is_hex(x) && x.len() == 64) + .filter_map(|x| hex::decode(x).ok()) + .for_each(|x| params.push(Box::new(x))); + let query = format!( + "UPDATE event SET hidden=TRUE WHERE kind!=5 AND author=? AND event_hash IN ({})", + repeat_vars(params.len() - 1) + ); + let mut stmt = tx.prepare(&query)?; + let update_count = stmt.execute(rusqlite::params_from_iter(params))?; + info!( + "hid {} deleted events for author {:?}", + update_count, + e.get_author_prefix() + ); + } else { + // check if a deletion has already been recorded for this event. + // Only relevant for non-deletion events + let del_count = tx.query_row( + "SELECT e.id FROM event e WHERE e.author=? AND e.id IN (SELECT t.event_id FROM tag t WHERE t.name='e' AND t.kind=5 AND t.value=?) LIMIT 1;", + params![pubkey_blob, e.id], |row| row.get::(0)); + // check if a the query returned a result, meaning we should + // hid the current event + if del_count.ok().is_some() { + // a deletion already existed, mark original event as hidden. + info!( + "hid event: {:?} due to existing deletion by author: {:?}", + e.get_event_id_prefix(), + e.get_author_prefix() + ); + let _update_count = + tx.execute("UPDATE event SET hidden=TRUE WHERE id=?", params![ev_id])?; + // event was deleted, so let caller know nothing new + // arrived, preventing this from being sent to active + // subscriptions + ins_count = 0; + } + } + tx.commit()?; + Ok(ins_count) + } +} + +#[async_trait] +impl NostrRepo for SqliteRepo { + async fn start(&self) -> Result<()> { + db_checkpoint_task( + self.maint_pool.clone(), + Duration::from_secs(60), + self.write_in_progress.clone(), + self.checkpoint_in_progress.clone(), + ) + .await?; + cleanup_expired( + self.maint_pool.clone(), + Duration::from_secs(600), + self.write_in_progress.clone(), + ) + .await + } + + async fn migrate_up(&self) -> Result { + let _write_guard = self.write_in_progress.lock().await; + let mut conn = self.write_pool.get()?; + task::spawn_blocking(move || upgrade_db(&mut conn)).await? + } + /// Persist event to database + async fn write_event(&self, e: &Event) -> Result { + let start = Instant::now(); + let max_write_attempts = 10; + let mut attempts = 0; + let _write_guard = self.write_in_progress.lock().await; + // spawn a blocking thread + //let mut conn = self.write_pool.get()?; + let pool = self.write_pool.clone(); + let e = e.clone(); + let event_count = task::spawn_blocking(move || { + let mut conn = pool.get()?; + // this could fail because the database was busy; try + // multiple times before giving up. + loop { + attempts += 1; + let wr = SqliteRepo::persist_event(&mut conn, &e); + match wr { + Err(SqlError(rusqlite::Error::SqliteFailure(e, _))) => { + // this basically means that NIP-05 or another + // writer was using the database between us + // reading and promoting the connection to a + // write lock. + info!( + "event write failed, DB locked (attempt: {}); sqlite err: {}", + attempts, e.extended_code + ); + } + _ => { + return wr; + } + } + if attempts >= max_write_attempts { + return wr; + } + } + }) + .await?; + self.metrics + .write_events + .observe(start.elapsed().as_secs_f64()); + event_count + } + + /// Perform a database query using a subscription. + /// + /// The [`Subscription`] is converted into a SQL query. Each result + /// is published on the `query_tx` channel as it is returned. If a + /// message becomes available on the `abandon_query_rx` channel, the + /// query is immediately aborted. + async fn query_subscription( + &self, + sub: Subscription, + client_id: String, + query_tx: tokio::sync::mpsc::Sender, + mut abandon_query_rx: tokio::sync::oneshot::Receiver<()>, + ) -> Result<()> { + let pre_spawn_start = Instant::now(); + // if we let every request spawn a thread, we'll exhaust the + // thread pool waiting for queries to finish under high load. + // Instead, don't bother spawning threads when they will just + // block on a database connection. + let sem = self + .reader_threads_ready + .clone() + .acquire_owned() + .await + .unwrap(); + let self = self.clone(); + let metrics = self.metrics.clone(); + task::spawn_blocking(move || { + { + // if we are waiting on a checkpoint, stop until it is complete + let _x = self.checkpoint_in_progress.blocking_lock(); + } + let db_queue_time = pre_spawn_start.elapsed(); + // if the queue time was very long (>5 seconds), spare the DB and abort. + if db_queue_time > Duration::from_secs(5) { + info!( + "shedding DB query load queued for {:?} (cid: {}, sub: {:?})", + db_queue_time, client_id, sub.id + ); + metrics.query_aborts.with_label_values(&["loadshed"]).inc(); + return Ok(()); + } + // otherwise, report queuing time if it is slow + else if db_queue_time > Duration::from_secs(1) { + debug!( + "(slow) DB query queued for {:?} (cid: {}, sub: {:?})", + db_queue_time, client_id, sub.id + ); + } + // check before getting a DB connection if the client still wants the results + if abandon_query_rx.try_recv().is_ok() { + debug!( + "query cancelled by client (before execution) (cid: {}, sub: {:?})", + client_id, sub.id + ); + return Ok(()); + } + + let start = Instant::now(); + let mut row_count: usize = 0; + // cutoff for displaying slow queries + let slow_cutoff = Duration::from_millis(250); + let mut filter_count = 0; + // remove duplicates from the filter list. + if let Ok(mut conn) = self.read_pool.get() { + { + let pool_state = self.read_pool.state(); + metrics + .db_connections + .set((pool_state.connections - pool_state.idle_connections).into()); + } + for filter in sub.filters.iter() { + let filter_start = Instant::now(); + filter_count += 1; + let sql_gen_elapsed = filter_start.elapsed(); + let (q, p, idx) = query_from_filter(filter); + if sql_gen_elapsed > Duration::from_millis(10) { + debug!("SQL (slow) generated in {:?}", filter_start.elapsed()); + } + // any client that doesn't cause us to generate new rows in 2 + // seconds gets dropped. + let abort_cutoff = Duration::from_secs(2); + let mut slow_first_event; + let mut last_successful_send = Instant::now(); + // execute the query. + // make the actual SQL query (with parameters inserted) available + conn.trace(Some(|x| trace!("SQL trace: {:?}", x))); + let mut stmt = conn.prepare_cached(&q)?; + let mut event_rows = stmt.query(rusqlite::params_from_iter(p))?; + + let mut first_result = true; + while let Some(row) = event_rows.next()? { + let first_event_elapsed = filter_start.elapsed(); + slow_first_event = first_event_elapsed >= slow_cutoff; + if first_result { + debug!( + "first result in {:?} (cid: {}, sub: {:?}, filter: {}) [used index: {:?}]", + first_event_elapsed, client_id, sub.id, filter_count, idx + ); + // logging for slow queries; show filter and SQL. + // to reduce logging; only show 1/16th of clients (leading 0) + if slow_first_event && client_id.starts_with('0') { + debug!( + "filter first result in {:?} (slow): {} (cid: {}, sub: {:?})", + first_event_elapsed, + serde_json::to_string(&filter)?, + client_id, + sub.id + ); + } + first_result = false; + } + // check if a checkpoint is trying to run, and abort + if row_count % 100 == 0 { + { + if self.checkpoint_in_progress.try_lock().is_err() { + // lock was held, abort this query + debug!( + "query aborted due to checkpoint (cid: {}, sub: {:?})", + client_id, sub.id + ); + metrics + .query_aborts + .with_label_values(&["checkpoint"]) + .inc(); + return Ok(()); + } + } + } + + // check if this is still active; every 100 rows + if row_count % 100 == 0 && abandon_query_rx.try_recv().is_ok() { + debug!( + "query cancelled by client (cid: {}, sub: {:?})", + client_id, sub.id + ); + return Ok(()); + } + row_count += 1; + let event_json = row.get(0)?; + loop { + if query_tx.capacity() != 0 { + // we have capacity to add another item + break; + } + // the queue is full + trace!("db reader thread is stalled"); + if last_successful_send + abort_cutoff < Instant::now() { + // the queue has been full for too long, abort + info!("aborting database query due to slow client (cid: {}, sub: {:?})", + client_id, sub.id); + metrics + .query_aborts + .with_label_values(&["slowclient"]) + .inc(); + let ok: Result<()> = Ok(()); + return ok; + } + // check if a checkpoint is trying to run, and abort + if self.checkpoint_in_progress.try_lock().is_err() { + // lock was held, abort this query + debug!( + "query aborted due to checkpoint (cid: {}, sub: {:?})", + client_id, sub.id + ); + metrics + .query_aborts + .with_label_values(&["checkpoint"]) + .inc(); + return Ok(()); + } + // give the queue a chance to clear before trying again + debug!( + "query thread sleeping due to full query_tx (cid: {}, sub: {:?})", + client_id, sub.id + ); + thread::sleep(Duration::from_millis(500)); + } + // TODO: we could use try_send, but we'd have to juggle + // getting the query result back as part of the error + // result. + query_tx + .blocking_send(QueryResult { + sub_id: sub.get_id(), + event: event_json, + }) + .ok(); + last_successful_send = Instant::now(); + } + metrics + .query_db + .observe(filter_start.elapsed().as_secs_f64()); + // if the filter took too much db_time, print out the JSON. + if filter_start.elapsed() > slow_cutoff && client_id.starts_with('0') { + debug!( + "query filter req (slow): {} (cid: {}, sub: {:?}, filter: {})", + serde_json::to_string(&filter)?, + client_id, + sub.id, + filter_count + ); + } + } + } else { + warn!("Could not get a database connection for querying"); + } + drop(sem); // new query can begin + debug!( + "query completed in {:?} (cid: {}, sub: {:?}, db_time: {:?}, rows: {})", + pre_spawn_start.elapsed(), + client_id, + sub.id, + start.elapsed(), + row_count + ); + query_tx + .blocking_send(QueryResult { + sub_id: sub.get_id(), + event: "EOSE".to_string(), + }) + .ok(); + metrics + .query_sub + .observe(pre_spawn_start.elapsed().as_secs_f64()); + let ok: Result<()> = Ok(()); + ok + }); + Ok(()) + } + + /// Perform normal maintenance + async fn optimize_db(&self) -> Result<()> { + let conn = self.write_pool.get()?; + task::spawn_blocking(move || { + let start = Instant::now(); + conn.execute_batch("PRAGMA optimize;").ok(); + info!("optimize ran in {:?}", start.elapsed()); + }) + .await?; + Ok(()) + } + + /// Create a new verification record connected to a specific event + async fn create_verification_record(&self, event_id: &str, name: &str) -> Result<()> { + let e = hex::decode(event_id).ok(); + let n = name.to_owned(); + let mut conn = self.write_pool.get()?; + let _write_guard = self.write_in_progress.lock().await; + tokio::task::spawn_blocking(move || { + let tx = conn.transaction()?; + { + // if we create a /new/ one, we should get rid of any old ones. or group the new ones by name and only consider the latest. + let query = "INSERT INTO user_verification (metadata_event, name, verified_at) VALUES ((SELECT id from event WHERE event_hash=?), ?, strftime('%s','now'));"; + let mut stmt = tx.prepare(query)?; + stmt.execute(params![e, n])?; + // get the row ID + let v_id = tx.last_insert_rowid(); + // delete everything else by this name + let del_query = "DELETE FROM user_verification WHERE name = ? AND id != ?;"; + let mut del_stmt = tx.prepare(del_query)?; + let count = del_stmt.execute(params![n,v_id])?; + if count > 0 { + info!("removed {} old verification records for ({:?})", count, n); + } + } + tx.commit()?; + info!("saved new verification record for ({:?})", n); + let ok: Result<()> = Ok(()); + ok + }).await? + } + + /// Update verification timestamp + async fn update_verification_timestamp(&self, id: u64) -> Result<()> { + let mut conn = self.write_pool.get()?; + let _write_guard = self.write_in_progress.lock().await; + tokio::task::spawn_blocking(move || { + // add some jitter to the verification to prevent everything from stacking up together. + let verif_time = now_jitter(600); + let tx = conn.transaction()?; + { + // update verification time and reset any failure count + let query = + "UPDATE user_verification SET verified_at=?, failure_count=0 WHERE id=?"; + let mut stmt = tx.prepare(query)?; + stmt.execute(params![verif_time, id])?; + } + tx.commit()?; + let ok: Result<()> = Ok(()); + ok + }) + .await? + } + + /// Update verification record as failed + async fn fail_verification(&self, id: u64) -> Result<()> { + let mut conn = self.write_pool.get()?; + let _write_guard = self.write_in_progress.lock().await; + tokio::task::spawn_blocking(move || { + // add some jitter to the verification to prevent everything from stacking up together. + let fail_time = now_jitter(600); + let tx = conn.transaction()?; + { + let query = "UPDATE user_verification SET failed_at=?, failure_count=failure_count+1 WHERE id=?"; + let mut stmt = tx.prepare(query)?; + stmt.execute(params![fail_time, id])?; + } + tx.commit()?; + let ok: Result<()> = Ok(()); + ok + }) + .await? + } + + /// Delete verification record + async fn delete_verification(&self, id: u64) -> Result<()> { + let mut conn = self.write_pool.get()?; + let _write_guard = self.write_in_progress.lock().await; + tokio::task::spawn_blocking(move || { + let tx = conn.transaction()?; + { + let query = "DELETE FROM user_verification WHERE id=?;"; + let mut stmt = tx.prepare(query)?; + stmt.execute(params![id])?; + } + tx.commit()?; + let ok: Result<()> = Ok(()); + ok + }) + .await? + } + + /// Get the latest verification record for a given pubkey. + async fn get_latest_user_verification(&self, pub_key: &str) -> Result { + let mut conn = self.read_pool.get()?; + let pub_key = pub_key.to_owned(); + tokio::task::spawn_blocking(move || { + let tx = conn.transaction()?; + let query = "SELECT v.id, v.name, e.event_hash, e.created_at, v.verified_at, v.failed_at, v.failure_count FROM user_verification v LEFT JOIN event e ON e.id=v.metadata_event WHERE e.author=? ORDER BY e.created_at DESC, v.verified_at DESC, v.failed_at DESC LIMIT 1;"; + let mut stmt = tx.prepare_cached(query)?; + let fields = stmt.query_row(params![hex::decode(&pub_key).ok()], |r| { + let rowid: u64 = r.get(0)?; + let rowname: String = r.get(1)?; + let eventid: Vec = r.get(2)?; + let created_at: u64 = r.get(3)?; + // create a tuple since we can't throw non-rusqlite errors in this closure + Ok(( + rowid, + rowname, + eventid, + created_at, + r.get(4).ok(), + r.get(5).ok(), + r.get(6)?, + )) + })?; + Ok(VerificationRecord { + rowid: fields.0, + name: Nip05Name::try_from(&fields.1[..])?, + address: pub_key, + event: hex::encode(fields.2), + event_created: fields.3, + last_success: fields.4, + last_failure: fields.5, + failure_count: fields.6, + }) + }).await? + } + + /// Get oldest verification before timestamp + async fn get_oldest_user_verification(&self, before: u64) -> Result { + let mut conn = self.read_pool.get()?; + tokio::task::spawn_blocking(move || { + let tx = conn.transaction()?; + let query = "SELECT v.id, v.name, e.event_hash, e.author, e.created_at, v.verified_at, v.failed_at, v.failure_count FROM user_verification v INNER JOIN event e ON e.id=v.metadata_event WHERE (v.verified_at < ? OR v.verified_at IS NULL) AND (v.failed_at < ? OR v.failed_at IS NULL) ORDER BY v.verified_at ASC, v.failed_at ASC LIMIT 1;"; + let mut stmt = tx.prepare_cached(query)?; + let fields = stmt.query_row(params![before, before], |r| { + let rowid: u64 = r.get(0)?; + let rowname: String = r.get(1)?; + let eventid: Vec = r.get(2)?; + let pubkey: Vec = r.get(3)?; + let created_at: u64 = r.get(4)?; + // create a tuple since we can't throw non-rusqlite errors in this closure + Ok(( + rowid, + rowname, + eventid, + pubkey, + created_at, + r.get(5).ok(), + r.get(6).ok(), + r.get(7)?, + )) + })?; + let vr = VerificationRecord { + rowid: fields.0, + name: Nip05Name::try_from(&fields.1[..])?, + address: hex::encode(fields.3), + event: hex::encode(fields.2), + event_created: fields.4, + last_success: fields.5, + last_failure: fields.6, + failure_count: fields.7, + }; + Ok(vr) + }).await? + } + + /// Create account + async fn create_account(&self, pub_key: &Keys) -> Result { + let pub_key = pub_key.public_key().to_string(); + + let mut conn = self.write_pool.get()?; + let ins_count = tokio::task::spawn_blocking(move || { + let tx = conn.transaction()?; + let ins_count: u64; + { + // Ignore if user is already in db + let query = "INSERT OR IGNORE INTO account (pubkey, is_admitted, balance) VALUES (?1, ?2, ?3);"; + let mut stmt = tx.prepare(query)?; + ins_count = stmt.execute(params![&pub_key, false, 0])? as u64; + } + tx.commit()?; + let ok: Result = Ok(ins_count); + ok + }).await??; + + if ins_count != 1 { + return Ok(false); + } + + Ok(true) + } + + /// Admit account + async fn admit_account(&self, pub_key: &Keys, admission_cost: u64) -> Result<()> { + let pub_key = pub_key.public_key().to_string(); + let mut conn = self.write_pool.get()?; + let pub_key = pub_key.to_owned(); + tokio::task::spawn_blocking(move || { + let tx = conn.transaction()?; + { + let query = "UPDATE account SET is_admitted = TRUE, tos_accepted_at = strftime('%s','now'), balance = balance - ?1 WHERE pubkey=?2;"; + let mut stmt = tx.prepare(query)?; + stmt.execute(params![admission_cost, pub_key])?; + } + tx.commit()?; + let ok: Result<()> = Ok(()); + ok + }) + .await? + } + + /// Gets if the account is admitted and balance + async fn get_account_balance(&self, pub_key: &Keys) -> Result<(bool, u64)> { + let pub_key = pub_key.public_key().to_string(); + let mut conn = self.write_pool.get()?; + let pub_key = pub_key.to_owned(); + tokio::task::spawn_blocking(move || { + let tx = conn.transaction()?; + let query = "SELECT is_admitted, balance FROM account WHERE pubkey = ?1;"; + let mut stmt = tx.prepare_cached(query)?; + let fields = stmt.query_row(params![pub_key], |r| { + let is_admitted: bool = r.get(0)?; + let balance: u64 = r.get(1)?; + // create a tuple since we can't throw non-rusqlite errors in this closure + Ok((is_admitted, balance)) + })?; + Ok(fields) + }) + .await? + } + + /// Update account balance + async fn update_account_balance( + &self, + pub_key: &Keys, + positive: bool, + new_balance: u64, + ) -> Result<()> { + let pub_key = pub_key.public_key().to_string(); + + let mut conn = self.write_pool.get()?; + tokio::task::spawn_blocking(move || { + let tx = conn.transaction()?; + { + let query = if positive { + "UPDATE account SET balance=balance + ?1 WHERE pubkey=?2" + } else { + "UPDATE account SET balance=balance - ?1 WHERE pubkey=?2" + }; + let mut stmt = tx.prepare(query)?; + stmt.execute(params![new_balance, pub_key])?; + } + tx.commit()?; + let ok: Result<()> = Ok(()); + ok + }) + .await? + } + + /// Create invoice record + async fn create_invoice_record(&self, pub_key: &Keys, invoice_info: InvoiceInfo) -> Result<()> { + let pub_key = pub_key.public_key().to_string(); + let pub_key = pub_key.to_owned(); + let mut conn = self.write_pool.get()?; + tokio::task::spawn_blocking(move || { + let tx = conn.transaction()?; + { + let query = "INSERT INTO invoice (pubkey, payment_hash, amount, status, description, created_at, invoice) VALUES (?1, ?2, ?3, ?4, ?5, strftime('%s','now'), ?6);"; + let mut stmt = tx.prepare(query)?; + stmt.execute(params![&pub_key, invoice_info.payment_hash, invoice_info.amount, invoice_info.status.to_string(), invoice_info.memo, invoice_info.bolt11])?; + } + tx.commit()?; + let ok: Result<()> = Ok(()); + ok + }).await??; + + Ok(()) + } + + /// Update invoice record + async fn update_invoice(&self, payment_hash: &str, status: InvoiceStatus) -> Result { + let mut conn = self.write_pool.get()?; + let payment_hash = payment_hash.to_owned(); + + tokio::task::spawn_blocking(move || { + let tx = conn.transaction()?; + let pubkey: String; + { + + // Get required invoice info for given payment hash + let query = "SELECT pubkey, status, amount FROM invoice WHERE payment_hash=?1;"; + let mut stmt = tx.prepare(query)?; + let (pub_key, prev_status, amount) = stmt.query_row(params![payment_hash], |r| { + let pub_key: String = r.get(0)?; + let status: String = r.get(1)?; + let amount: u64 = r.get(2)?; + + + Ok((pub_key, status, amount)) + + })?; + + // If the invoice is paid update the confirmed_at timestamp + let query = if status.eq(&InvoiceStatus::Paid) { + "UPDATE invoice SET status=?1, confirmed_at = strftime('%s', 'now') WHERE payment_hash=?2;" + } else { + "UPDATE invoice SET status=?1 WHERE payment_hash=?2;" + }; + let mut stmt = tx.prepare(query)?; + stmt.execute(params![status.to_string(), payment_hash])?; + + // Increase account balance by given invoice amount + if prev_status == "Unpaid" && status.eq(&InvoiceStatus::Paid) { + let query = + "UPDATE account SET balance = balance + ?1 WHERE pubkey = ?2;"; + let mut stmt = tx.prepare(query)?; + stmt.execute(params![amount, pub_key])?; + } + + pubkey = pub_key; + } + + tx.commit()?; + let ok: Result = Ok(pubkey); + ok + }) + .await? + } + + /// Get the most recent invoice for a given pubkey + /// invoice must be unpaid and not expired + async fn get_unpaid_invoice(&self, pubkey: &Keys) -> Result> { + let mut conn = self.write_pool.get()?; + + let pubkey = pubkey.to_owned(); + let pubkey_str = pubkey.clone().public_key().to_string(); + let (payment_hash, invoice, amount, description) = tokio::task::spawn_blocking(move || { + let tx = conn.transaction()?; + + let query = r#" +SELECT amount, payment_hash, description, invoice +FROM invoice +WHERE pubkey = ?1 AND status = 'Unpaid' +ORDER BY created_at DESC +LIMIT 1; + "#; + let mut stmt = tx.prepare(query).unwrap(); + stmt.query_row(params![&pubkey_str], |r| { + let amount: u64 = r.get(0)?; + let payment_hash: String = r.get(1)?; + let description: String = r.get(2)?; + let invoice: String = r.get(3)?; + + Ok((payment_hash, invoice, amount, description)) + }) + }) + .await??; + + Ok(Some(InvoiceInfo { + pubkey: pubkey.public_key().to_string(), + payment_hash, + bolt11: invoice, + amount, + status: InvoiceStatus::Unpaid, + memo: description, + confirmed_at: None, + })) + } +} + +/// Decide if there is an index that should be used explicitly +fn override_index(f: &ReqFilter) -> Option { + if f.ids.is_some() { + return Some("event_hash_index".into()); + } + // queries for multiple kinds default to kind_index, which is + // significantly slower than kind_created_at_index. + if let Some(ks) = &f.kinds { + if f.ids.is_none() + && ks.len() > 1 + && f.since.is_none() + && f.until.is_none() + && f.tags.is_none() + && f.authors.is_none() + { + return Some("kind_created_at_index".into()); + } + } + // if there is an author, it is much better to force the authors index. + if f.authors.is_some() { + if f.since.is_none() && f.until.is_none() && f.limit.is_none() { + if f.kinds.is_none() { + // with no use of kinds/created_at, just author + return Some("author_index".into()); + } + // prefer author_kind if there are kinds + return Some("author_kind_index".into()); + } + // finally, prefer author_created_at if time is provided + return Some("author_created_at_index".into()); + } + None +} + +/// Create a dynamic SQL subquery and params from a subscription filter (and optional explicit index used) +fn query_from_filter(f: &ReqFilter) -> (String, Vec>, Option) { + // build a dynamic SQL query. all user-input is either an integer + // (sqli-safe), or a string that is filtered to only contain + // hexadecimal characters. Strings that require escaping (tag + // names/values) use parameters. + + // if the filter is malformed, don't return anything. + if f.force_no_match { + let empty_query = "SELECT e.content FROM event e WHERE 1=0".to_owned(); + // query parameters for SQLite + let empty_params: Vec> = vec![]; + return (empty_query, empty_params, None); + } + + // check if the index needs to be overridden + let idx_name = override_index(f); + let idx_stmt = idx_name + .as_ref() + .map_or_else(|| "".to_owned(), |i| format!("INDEXED BY {i}")); + let mut query = format!("SELECT e.content FROM event e {idx_stmt}"); + // query parameters for SQLite + let mut params: Vec> = vec![]; + + // individual filter components (single conditions such as an author or event ID) + let mut filter_components: Vec = Vec::new(); + // Query for "authors", allowing prefix matches + if let Some(authvec) = &f.authors { + // take each author and convert to a hexsearch + let mut auth_searches: Vec = vec![]; + for auth in authvec { + match hex_range(auth) { + Some(HexSearch::Exact(ex)) => { + auth_searches.push("author=?".to_owned()); + params.push(Box::new(ex)); + } + Some(HexSearch::Range(lower, upper)) => { + auth_searches.push("(author>? AND author { + auth_searches.push("author>?".to_owned()); + params.push(Box::new(lower)); + } + None => { + info!("Could not parse hex range from author {:?}", auth); + } + } + } + if !authvec.is_empty() { + let auth_clause = format!("({})", auth_searches.join(" OR ")); + filter_components.push(auth_clause); + } else { + filter_components.push("false".to_owned()); + } + } + // Query for Kind + if let Some(ks) = &f.kinds { + // kind is number, no escaping needed + let str_kinds: Vec = ks.iter().map(std::string::ToString::to_string).collect(); + let kind_clause = format!("kind IN ({})", str_kinds.join(", ")); + filter_components.push(kind_clause); + } + // Query for event, allowing prefix matches + if let Some(idvec) = &f.ids { + // take each author and convert to a hexsearch + let mut id_searches: Vec = vec![]; + for id in idvec { + match hex_range(id) { + Some(HexSearch::Exact(ex)) => { + id_searches.push("event_hash=?".to_owned()); + params.push(Box::new(ex)); + } + Some(HexSearch::Range(lower, upper)) => { + id_searches.push("(event_hash>? AND event_hash { + id_searches.push("event_hash>?".to_owned()); + params.push(Box::new(lower)); + } + None => { + info!("Could not parse hex range from id {:?}", id); + } + } + } + if idvec.is_empty() { + // if the ids list was empty, we should never return + // any results. + filter_components.push("false".to_owned()); + } else { + let id_clause = format!("({})", id_searches.join(" OR ")); + filter_components.push(id_clause); + } + } + // Query for tags + if let Some(map) = &f.tags { + for (key, val) in map.iter() { + let mut str_vals: Vec> = vec![]; + for v in val { + str_vals.push(Box::new(v.clone())); + } + // create clauses with "?" params for each tag value being searched + let str_clause = format!("AND value IN ({})", repeat_vars(str_vals.len())); + // find evidence of the target tag name/value existing for this event. + // Query for Kind/Since/Until additionally, to reduce the number of tags that come back. + let kind_clause; + let since_clause; + let until_clause; + if let Some(ks) = &f.kinds { + // kind is number, no escaping needed + let str_kinds: Vec = + ks.iter().map(std::string::ToString::to_string).collect(); + kind_clause = format!("AND kind IN ({})", str_kinds.join(", ")); + } else { + kind_clause = String::new(); + }; + if f.since.is_some() { + since_clause = format!("AND created_at > {}", f.since.unwrap()); + } else { + since_clause = String::new(); + }; + // Query for timestamp + if f.until.is_some() { + until_clause = format!("AND created_at < {}", f.until.unwrap()); + } else { + until_clause = String::new(); + }; + + let tag_clause = format!( + "e.id IN (SELECT t.event_id FROM tag t WHERE (name=? {str_clause} {kind_clause} {since_clause} {until_clause}))" + ); + + // add the tag name as the first parameter + params.push(Box::new(key.to_string())); + // add all tag values that are blobs as params + params.append(&mut str_vals); + filter_components.push(tag_clause); + } + } + // Query for timestamp + if f.since.is_some() { + let created_clause = format!("created_at > {}", f.since.unwrap()); + filter_components.push(created_clause); + } + // Query for timestamp + if f.until.is_some() { + let until_clause = format!("created_at < {}", f.until.unwrap()); + filter_components.push(until_clause); + } + // never display hidden events + query.push_str(" WHERE hidden!=TRUE"); + // never display hidden events + filter_components.push("(expires_at IS NULL OR expires_at > ?)".to_string()); + params.push(Box::new(unix_time())); + // build filter component conditions + if !filter_components.is_empty() { + query.push_str(" AND "); + query.push_str(&filter_components.join(" AND ")); + } + // Apply per-filter limit to this subquery. + // The use of a LIMIT implies a DESC order, to capture only the most recent events. + if let Some(lim) = f.limit { + let _ = write!(query, " ORDER BY e.created_at DESC LIMIT {lim}"); + } else { + query.push_str(" ORDER BY e.created_at ASC"); + } + (query, params, idx_name) +} + +/// Create a dynamic SQL query string and params from a subscription. +fn _query_from_sub(sub: &Subscription) -> (String, Vec>, Vec) { + // build a dynamic SQL query for an entire subscription, based on + // SQL subqueries for filters. + let mut subqueries: Vec = Vec::new(); + let mut indexes = vec![]; + // subquery params + let mut params: Vec> = vec![]; + // for every filter in the subscription, generate a subquery + for f in &sub.filters { + let (f_subquery, mut f_params, index) = query_from_filter(f); + if let Some(i) = index { + indexes.push(i); + } + subqueries.push(f_subquery); + params.append(&mut f_params); + } + // encapsulate subqueries into select statements + let subqueries_selects: Vec = subqueries + .iter() + .map(|s| format!("SELECT distinct content, created_at FROM ({s})")) + .collect(); + let query: String = subqueries_selects.join(" UNION "); + (query, params, indexes) +} + +/// Build a database connection pool. +/// # Panics +/// +/// Will panic if the pool could not be created. +#[must_use] +pub fn build_pool( + name: &str, + settings: &Settings, + flags: OpenFlags, + min_size: u32, + max_size: u32, + wait_for_db: bool, +) -> SqlitePool { + let db_dir = &settings.database.data_directory; + let full_path = Path::new(db_dir).join(DB_FILE); + + // small hack; if the database doesn't exist yet, that means the + // writer thread hasn't finished. Give it a chance to work. This + // is only an issue with the first time we run. + if !settings.database.in_memory { + while !full_path.exists() && wait_for_db { + debug!("Database reader pool is waiting on the database to be created..."); + thread::sleep(Duration::from_millis(500)); + } + } + let manager = if settings.database.in_memory { + SqliteConnectionManager::file("file::memory:?cache=shared") + .with_flags(flags) + .with_init(|c| c.execute_batch(STARTUP_SQL)) + } else { + SqliteConnectionManager::file(&full_path) + .with_flags(flags) + .with_init(|c| c.execute_batch(STARTUP_SQL)) + }; + let pool: SqlitePool = r2d2::Pool::builder() + .test_on_check_out(true) // no noticeable performance hit + .min_idle(Some(min_size)) + .max_size(max_size) + .idle_timeout(Some(Duration::from_secs(10))) + .max_lifetime(Some(Duration::from_secs(30))) + .build(manager) + .unwrap(); + // retrieve a connection to ensure the startup statements run immediately + { + let _ = pool.get(); + } + + info!( + "Built a connection pool {:?} (min={}, max={})", + name, min_size, max_size + ); + pool +} + +/// Cleanup expired events on a regular basis +async fn cleanup_expired( + pool: SqlitePool, + frequency: Duration, + write_in_progress: Arc>, +) -> Result<()> { + tokio::task::spawn(async move { + loop { + tokio::select! { + _ = tokio::time::sleep(frequency) => { + if let Ok(mut conn) = pool.get() { + let mut _guard:Option> = None; + // take a write lock to prevent event writes + // from proceeding while we are deleting + // events. This isn't necessary, but + // minimizes the chances of forcing event + // persistence to be retried. + _guard = Some(write_in_progress.lock().await); + let start = Instant::now(); + let exp_res = tokio::task::spawn_blocking(move || { + delete_expired(&mut conn) + }).await; + match exp_res { + Ok(Ok(count)) => { + if count > 0 { + info!("removed {} expired events in: {:?}", count, start.elapsed()); + } + }, + _ => { + // either the task or underlying query failed + info!("there was an error cleaning up expired events: {:?}", exp_res); + } + } + } + } + }; + } + }); + Ok(()) +} + +/// Execute a query to delete all expired events +pub fn delete_expired(conn: &mut PooledConnection) -> Result { + let tx = conn.transaction()?; + let update_count = tx.execute( + "DELETE FROM event WHERE expires_at <= ?", + params![unix_time()], + )?; + tx.commit()?; + Ok(update_count) +} + +/// Perform database WAL checkpoint on a regular basis +pub async fn db_checkpoint_task( + pool: SqlitePool, + frequency: Duration, + write_in_progress: Arc>, + checkpoint_in_progress: Arc>, +) -> Result<()> { + // TODO; use acquire_many on the reader semaphore to stop them from interrupting this. + tokio::task::spawn(async move { + // WAL size in pages. + let mut current_wal_size = 0; + // WAL threshold for more aggressive checkpointing (10,000 pages, or about 40MB) + let wal_threshold = 1000 * 10; + // default threshold for the busy timer + let busy_wait_default = Duration::from_secs(1); + // if the WAL file is getting too big, switch to this + let busy_wait_default_long = Duration::from_secs(10); + loop { + tokio::select! { + _ = tokio::time::sleep(frequency) => { + if let Ok(mut conn) = pool.get() { + // block all other writers + let _write_guard = write_in_progress.lock().await; + let mut _guard:Option> = None; + // the busy timer will block writers, so don't set + // this any higher than you want max latency for event + // writes. + if current_wal_size <= wal_threshold { + conn.busy_timeout(busy_wait_default).ok(); + } else { + // if the wal size has exceeded a threshold, increase the busy timeout. + conn.busy_timeout(busy_wait_default_long).ok(); + // take a lock that will prevent new readers. + info!("blocking new readers to perform wal_checkpoint"); + _guard = Some(checkpoint_in_progress.lock().await); + } + debug!("running wal_checkpoint(TRUNCATE)"); + if let Ok(new_size) = checkpoint_db(&mut conn) { + current_wal_size = new_size; + } + } + } + }; + } + }); + + Ok(()) +} + +#[derive(Debug)] +enum SqliteStatus { + Ok, + Busy, + Error, + Other(u64), +} + +/// Checkpoint/Truncate WAL. Returns the number of WAL pages remaining. +pub fn checkpoint_db(conn: &mut PooledConnection) -> Result { + let query = "PRAGMA wal_checkpoint(TRUNCATE);"; + let start = Instant::now(); + let (cp_result, wal_size, _frames_checkpointed) = conn.query_row(query, [], |row| { + let checkpoint_result: u64 = row.get(0)?; + let wal_size: u64 = row.get(1)?; + let frames_checkpointed: u64 = row.get(2)?; + Ok((checkpoint_result, wal_size, frames_checkpointed)) + })?; + let result = match cp_result { + 0 => SqliteStatus::Ok, + 1 => SqliteStatus::Busy, + 2 => SqliteStatus::Error, + x => SqliteStatus::Other(x), + }; + info!( + "checkpoint ran in {:?} (result: {:?}, WAL size: {})", + start.elapsed(), + result, + wal_size + ); + Ok(wal_size as usize) +} + +/// Produce a arbitrary list of '?' parameters. +fn repeat_vars(count: usize) -> String { + if count == 0 { + return "".to_owned(); + } + let mut s = "?,".repeat(count); + // Remove trailing comma + s.pop(); + s +} + +/// Display database pool stats every 1 minute +pub async fn monitor_pool(name: &str, pool: SqlitePool) { + let sleep_dur = Duration::from_secs(60); + loop { + log_pool_stats(name, &pool); + tokio::time::sleep(sleep_dur).await; + } +} + +/// Log pool stats +fn log_pool_stats(name: &str, pool: &SqlitePool) { + let state: r2d2::State = pool.state(); + let in_use_cxns = state.connections - state.idle_connections; + debug!( + "DB pool {:?} usage (in_use: {}, available: {}, max: {})", + name, + in_use_cxns, + state.connections, + pool.max_size() + ); +} + +/// Check if the pool is fully utilized +fn _pool_at_capacity(pool: &SqlitePool) -> bool { + let state: r2d2::State = pool.state(); + state.idle_connections == 0 +} \ No newline at end of file diff --git a/crates/rooch-ws-relay/src/repo/sqlite_migration.rs b/crates/rooch-ws-relay/src/repo/sqlite_migration.rs new file mode 100644 index 0000000000..d5cac6afaf --- /dev/null +++ b/crates/rooch-ws-relay/src/repo/sqlite_migration.rs @@ -0,0 +1,841 @@ +//! Database schema and migrations +use crate::server::db::PooledConnection; +use crate::server::error::Result; +use crate::server::event::{single_char_tagname, Event}; +use crate::server::utils::is_lower_hex; +use const_format::formatcp; +use indicatif::{ProgressBar, ProgressStyle}; +use rusqlite::limits::Limit; +use rusqlite::params; +use rusqlite::Connection; +use std::cmp::Ordering; +use std::time::Instant; +use tracing::{debug, error, info}; + +/// Startup DB Pragmas +pub const STARTUP_SQL: &str = r##" +PRAGMA main.synchronous = NORMAL; +PRAGMA foreign_keys = ON; +PRAGMA journal_size_limit = 32768; +PRAGMA temp_store = 2; -- use memory, not temp files +PRAGMA main.cache_size = 20000; -- 80MB max cache size per conn +pragma mmap_size = 0; -- disable mmap (default) +"##; + +/// Latest database version +pub const DB_VERSION: usize = 18; + +/// Schema definition +const INIT_SQL: &str = formatcp!( + r##" +-- Database settings +PRAGMA encoding = "UTF-8"; +PRAGMA journal_mode = WAL; +PRAGMA auto_vacuum = FULL; +PRAGMA main.synchronous=NORMAL; +PRAGMA foreign_keys = ON; +PRAGMA application_id = 1654008667; +PRAGMA user_version = {}; + +-- Event Table +CREATE TABLE IF NOT EXISTS event ( +id INTEGER PRIMARY KEY, +event_hash BLOB NOT NULL, -- 4-byte hash +first_seen INTEGER NOT NULL, -- when the event was first seen (not authored!) (seconds since 1970) +created_at INTEGER NOT NULL, -- when the event was authored +expires_at INTEGER, -- when the event expires and may be deleted +author BLOB NOT NULL, -- author pubkey +delegated_by BLOB, -- delegator pubkey (NIP-26) +kind INTEGER NOT NULL, -- event kind +hidden INTEGER, -- relevant for queries +content TEXT NOT NULL -- serialized json of event object +); + +-- Event Indexes +CREATE UNIQUE INDEX IF NOT EXISTS event_hash_index ON event(event_hash); +CREATE INDEX IF NOT EXISTS author_index ON event(author); +CREATE INDEX IF NOT EXISTS kind_index ON event(kind); +CREATE INDEX IF NOT EXISTS created_at_index ON event(created_at); +CREATE INDEX IF NOT EXISTS delegated_by_index ON event(delegated_by); +CREATE INDEX IF NOT EXISTS event_composite_index ON event(kind,created_at); +CREATE INDEX IF NOT EXISTS kind_author_index ON event(kind,author); +CREATE INDEX IF NOT EXISTS kind_created_at_index ON event(kind,created_at); +CREATE INDEX IF NOT EXISTS author_created_at_index ON event(author,created_at); +CREATE INDEX IF NOT EXISTS author_kind_index ON event(author,kind); +CREATE INDEX IF NOT EXISTS event_expiration ON event(expires_at); + +-- Tag Table +-- Tag values are stored as either a BLOB (if they come in as a +-- hex-string), or TEXT otherwise. +-- This means that searches need to select the appropriate column. +-- We duplicate the kind/created_at to make indexes much more efficient. +CREATE TABLE IF NOT EXISTS tag ( +id INTEGER PRIMARY KEY, +event_id INTEGER NOT NULL, -- an event ID that contains a tag. +name TEXT, -- the tag name ("p", "e", whatever) +value TEXT, -- the tag value, if not hex. +value_hex BLOB, -- the tag value, if it can be interpreted as a lowercase hex string. +created_at INTEGER NOT NULL, -- when the event was authored +kind INTEGER NOT NULL, -- event kind +FOREIGN KEY(event_id) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE +); +CREATE INDEX IF NOT EXISTS tag_val_index ON tag(value); +CREATE INDEX IF NOT EXISTS tag_composite_index ON tag(event_id,name,value); +CREATE INDEX IF NOT EXISTS tag_name_eid_index ON tag(name,event_id,value); +CREATE INDEX IF NOT EXISTS tag_covering_index ON tag(name,kind,value,created_at,event_id); + +-- NIP-05 User Validation +CREATE TABLE IF NOT EXISTS user_verification ( +id INTEGER PRIMARY KEY, +metadata_event INTEGER NOT NULL, -- the metadata event used for this validation. +name TEXT NOT NULL, -- the nip05 field value (user@domain). +verified_at INTEGER, -- timestamp this author/nip05 was most recently verified. +failed_at INTEGER, -- timestamp a verification attempt failed (host down). +failure_count INTEGER DEFAULT 0, -- number of consecutive failures. +FOREIGN KEY(metadata_event) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE +); +CREATE INDEX IF NOT EXISTS user_verification_name_index ON user_verification(name); +CREATE INDEX IF NOT EXISTS user_verification_event_index ON user_verification(metadata_event); + +-- Create account table +CREATE TABLE IF NOT EXISTS account ( +pubkey TEXT PRIMARY KEY, +is_admitted INTEGER NOT NULL DEFAULT 0, +balance INTEGER NOT NULL DEFAULT 0, +tos_accepted_at INTEGER +); + +-- Create account index +CREATE INDEX IF NOT EXISTS user_pubkey_index ON account(pubkey); + +-- Invoice table +CREATE TABLE IF NOT EXISTS invoice ( +payment_hash TEXT PRIMARY KEY, +pubkey TEXT NOT NULL, +invoice TEXT NOT NULL, +amount INTEGER NOT NULL, +status TEXT CHECK ( status IN ('Paid', 'Unpaid', 'Expired' ) ) NOT NUll DEFAULT 'Unpaid', +description TEXT, +created_at INTEGER NOT NULL, +confirmed_at INTEGER, +CONSTRAINT invoice_pubkey_fkey FOREIGN KEY (pubkey) REFERENCES account (pubkey) ON DELETE CASCADE +); + +-- Create invoice index +CREATE INDEX IF NOT EXISTS invoice_pubkey_index ON invoice(pubkey); + + +"##, + DB_VERSION +); + +/// Determine the current application database schema version. +pub fn curr_db_version(conn: &mut Connection) -> Result { + let query = "PRAGMA user_version;"; + let curr_version = conn.query_row(query, [], |row| row.get(0))?; + Ok(curr_version) +} + +/// Determine event count +pub fn db_event_count(conn: &mut Connection) -> Result { + let query = "SELECT count(*) FROM event;"; + let count = conn.query_row(query, [], |row| row.get(0))?; + Ok(count) +} + +/// Determine tag count +pub fn db_tag_count(conn: &mut Connection) -> Result { + let query = "SELECT count(*) FROM tag;"; + let count = conn.query_row(query, [], |row| row.get(0))?; + Ok(count) +} + +fn mig_init(conn: &mut PooledConnection) -> usize { + match conn.execute_batch(INIT_SQL) { + Ok(()) => { + info!( + "database pragma/schema initialized to v{}, and ready", + DB_VERSION + ); + } + Err(err) => { + error!("update failed: {}", err); + panic!("database could not be initialized"); + } + } + DB_VERSION +} + +/// Upgrade DB to latest version, and execute pragma settings +pub fn upgrade_db(conn: &mut PooledConnection) -> Result { + // check the version. + let mut curr_version = curr_db_version(conn)?; + info!("DB version = {:?}", curr_version); + + debug!( + "SQLite max query parameters: {}", + conn.limit(Limit::SQLITE_LIMIT_VARIABLE_NUMBER) + ); + debug!( + "SQLite max table/blob/text length: {} MB", + (f64::from(conn.limit(Limit::SQLITE_LIMIT_LENGTH)) / f64::from(1024 * 1024)).floor() + ); + debug!( + "SQLite max SQL length: {} MB", + (f64::from(conn.limit(Limit::SQLITE_LIMIT_SQL_LENGTH)) / f64::from(1024 * 1024)).floor() + ); + + match curr_version.cmp(&DB_VERSION) { + // Database is new or not current + Ordering::Less => { + // initialize from scratch + if curr_version == 0 { + curr_version = mig_init(conn); + } + // for initialized but out-of-date schemas, proceed to + // upgrade sequentially until we are current. + if curr_version == 1 { + curr_version = mig_1_to_2(conn)?; + } + if curr_version == 2 { + curr_version = mig_2_to_3(conn)?; + } + if curr_version == 3 { + curr_version = mig_3_to_4(conn)?; + } + if curr_version == 4 { + curr_version = mig_4_to_5(conn)?; + } + if curr_version == 5 { + curr_version = mig_5_to_6(conn)?; + } + if curr_version == 6 { + curr_version = mig_6_to_7(conn)?; + } + if curr_version == 7 { + curr_version = mig_7_to_8(conn)?; + } + if curr_version == 8 { + curr_version = mig_8_to_9(conn)?; + } + if curr_version == 9 { + curr_version = mig_9_to_10(conn)?; + } + if curr_version == 10 { + curr_version = mig_10_to_11(conn)?; + } + if curr_version == 11 { + curr_version = mig_11_to_12(conn)?; + } + if curr_version == 12 { + curr_version = mig_12_to_13(conn)?; + } + if curr_version == 13 { + curr_version = mig_13_to_14(conn)?; + } + if curr_version == 14 { + curr_version = mig_14_to_15(conn)?; + } + if curr_version == 15 { + curr_version = mig_15_to_16(conn)?; + } + if curr_version == 16 { + curr_version = mig_16_to_17(conn)?; + } + if curr_version == 17 { + curr_version = mig_17_to_18(conn)?; + } + + if curr_version == DB_VERSION { + info!( + "All migration scripts completed successfully. Welcome to v{}.", + DB_VERSION + ); + } + } + // Database is current, all is good + Ordering::Equal => { + debug!("Database version was already current (v{DB_VERSION})"); + } + // Database is newer than what this code understands, abort + Ordering::Greater => { + panic!( + "Database version is newer than supported by this executable (v{curr_version} > v{DB_VERSION})", + ); + } + } + + // Setup PRAGMA + conn.execute_batch(STARTUP_SQL)?; + debug!("SQLite PRAGMA startup completed"); + Ok(DB_VERSION) +} + +pub fn rebuild_tags(conn: &mut PooledConnection) -> Result<()> { + // Check how many events we have to process + let count = db_event_count(conn)?; + let update_each_percent = 0.05; + let mut percent_done = 0.0; + let mut events_processed = 0; + let start = Instant::now(); + let tx = conn.transaction()?; + { + // Clear out table + tx.execute("DELETE FROM tag;", [])?; + let mut stmt = tx.prepare("select id, content from event order by id;")?; + let mut tag_rows = stmt.query([])?; + while let Some(row) = tag_rows.next()? { + if (events_processed as f32) / (count as f32) > percent_done { + info!("Tag update {}% complete...", (100.0 * percent_done).round()); + percent_done += update_each_percent; + } + // we want to capture the event_id that had the tag, the tag name, and the tag hex value. + let event_id: u64 = row.get(0)?; + let event_json: String = row.get(1)?; + let event: Event = serde_json::from_str(&event_json)?; + // look at each event, and each tag, creating new tag entries if appropriate. + for t in event.tags.iter().filter(|x| x.len() > 1) { + let tagname = t.get(0).unwrap(); + let tagnamechar_opt = single_char_tagname(tagname); + if tagnamechar_opt.is_none() { + continue; + } + // safe because len was > 1 + let tagval = t.get(1).unwrap(); + // insert as BLOB if we can restore it losslessly. + // this means it needs to be even length and lowercase. + if (tagval.len() % 2 == 0) && is_lower_hex(tagval) { + tx.execute( + "INSERT INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3);", + params![event_id, tagname, hex::decode(tagval).ok()], + )?; + } else { + // otherwise, insert as text + tx.execute( + "INSERT INTO tag (event_id, name, value) VALUES (?1, ?2, ?3);", + params![event_id, tagname, &tagval], + )?; + } + } + events_processed += 1; + } + } + tx.commit()?; + info!("rebuilt tags in {:?}", start.elapsed()); + Ok(()) +} + +//// Migration Scripts + +fn mig_1_to_2(conn: &mut PooledConnection) -> Result { + // only change is adding a hidden column to events. + let upgrade_sql = r##" +ALTER TABLE event ADD hidden INTEGER; +UPDATE event SET hidden=FALSE; +PRAGMA user_version = 2; +"##; + match conn.execute_batch(upgrade_sql) { + Ok(()) => { + info!("database schema upgraded v1 -> v2"); + } + Err(err) => { + error!("update failed: {}", err); + panic!("database could not be upgraded"); + } + } + Ok(2) +} + +fn mig_2_to_3(conn: &mut PooledConnection) -> Result { + // this version lacks the tag column + info!("database schema needs update from 2->3"); + let upgrade_sql = r##" +CREATE TABLE IF NOT EXISTS tag ( +id INTEGER PRIMARY KEY, +event_id INTEGER NOT NULL, -- an event ID that contains a tag. +name TEXT, -- the tag name ("p", "e", whatever) +value TEXT, -- the tag value, if not hex. +value_hex BLOB, -- the tag value, if it can be interpreted as a hex string. +FOREIGN KEY(event_id) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE +); +PRAGMA user_version = 3; +"##; + // TODO: load existing refs into tag table + match conn.execute_batch(upgrade_sql) { + Ok(()) => { + info!("database schema upgraded v2 -> v3"); + } + Err(err) => { + error!("update failed: {}", err); + panic!("database could not be upgraded"); + } + } + // iterate over every event/pubkey tag + let tx = conn.transaction()?; + { + let mut stmt = tx.prepare("select event_id, \"e\", lower(hex(referenced_event)) from event_ref union select event_id, \"p\", lower(hex(referenced_pubkey)) from pubkey_ref;")?; + let mut tag_rows = stmt.query([])?; + while let Some(row) = tag_rows.next()? { + // we want to capture the event_id that had the tag, the tag name, and the tag hex value. + let event_id: u64 = row.get(0)?; + let tag_name: String = row.get(1)?; + let tag_value: String = row.get(2)?; + // this will leave behind p/e tags that were non-hex, but they are invalid anyways. + if is_lower_hex(&tag_value) { + tx.execute( + "INSERT INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3);", + params![event_id, tag_name, hex::decode(&tag_value).ok()], + )?; + } + } + } + info!("Updated tag values"); + tx.commit()?; + Ok(3) +} + +fn mig_3_to_4(conn: &mut PooledConnection) -> Result { + info!("database schema needs update from 3->4"); + let upgrade_sql = r##" +-- incoming metadata events with nip05 +CREATE TABLE IF NOT EXISTS user_verification ( +id INTEGER PRIMARY KEY, +metadata_event INTEGER NOT NULL, -- the metadata event used for this validation. +name TEXT NOT NULL, -- the nip05 field value (user@domain). +verified_at INTEGER, -- timestamp this author/nip05 was most recently verified. +failed_at INTEGER, -- timestamp a verification attempt failed (host down). +failure_count INTEGER DEFAULT 0, -- number of consecutive failures. +FOREIGN KEY(metadata_event) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE +); +CREATE INDEX IF NOT EXISTS user_verification_name_index ON user_verification(name); +CREATE INDEX IF NOT EXISTS user_verification_event_index ON user_verification(metadata_event); +PRAGMA user_version = 4; +"##; + match conn.execute_batch(upgrade_sql) { + Ok(()) => { + info!("database schema upgraded v3 -> v4"); + } + Err(err) => { + error!("update failed: {}", err); + panic!("database could not be upgraded"); + } + } + Ok(4) +} + +fn mig_4_to_5(conn: &mut PooledConnection) -> Result { + info!("database schema needs update from 4->5"); + let upgrade_sql = r##" +DROP TABLE IF EXISTS event_ref; +DROP TABLE IF EXISTS pubkey_ref; +PRAGMA user_version=5; +"##; + match conn.execute_batch(upgrade_sql) { + Ok(()) => { + info!("database schema upgraded v4 -> v5"); + } + Err(err) => { + error!("update failed: {}", err); + panic!("database could not be upgraded"); + } + } + Ok(5) +} + +fn mig_5_to_6(conn: &mut PooledConnection) -> Result { + info!("database schema needs update from 5->6"); + // We need to rebuild the tags table. iterate through the + // event table. build event from json, insert tags into a + // fresh tag table. This was needed due to a logic error in + // how hex-like tags got indexed. + let start = Instant::now(); + let tx = conn.transaction()?; + { + // Clear out table + tx.execute("DELETE FROM tag;", [])?; + let mut stmt = tx.prepare("select id, content from event order by id;")?; + let mut tag_rows = stmt.query([])?; + while let Some(row) = tag_rows.next()? { + let event_id: u64 = row.get(0)?; + let event_json: String = row.get(1)?; + let event: Event = serde_json::from_str(&event_json)?; + // look at each event, and each tag, creating new tag entries if appropriate. + for t in event.tags.iter().filter(|x| x.len() > 1) { + let tagname = t.get(0).unwrap(); + let tagnamechar_opt = single_char_tagname(tagname); + if tagnamechar_opt.is_none() { + continue; + } + // safe because len was > 1 + let tagval = t.get(1).unwrap(); + // insert as BLOB if we can restore it losslessly. + // this means it needs to be even length and lowercase. + if (tagval.len() % 2 == 0) && is_lower_hex(tagval) { + tx.execute( + "INSERT INTO tag (event_id, name, value_hex) VALUES (?1, ?2, ?3);", + params![event_id, tagname, hex::decode(tagval).ok()], + )?; + } else { + // otherwise, insert as text + tx.execute( + "INSERT INTO tag (event_id, name, value) VALUES (?1, ?2, ?3);", + params![event_id, tagname, &tagval], + )?; + } + } + } + tx.execute("PRAGMA user_version = 6;", [])?; + } + tx.commit()?; + info!("database schema upgraded v5 -> v6 in {:?}", start.elapsed()); + // vacuum after large table modification + let start = Instant::now(); + conn.execute("VACUUM;", [])?; + info!("vacuumed DB after tags rebuild in {:?}", start.elapsed()); + Ok(6) +} + +fn mig_6_to_7(conn: &mut PooledConnection) -> Result { + info!("database schema needs update from 6->7"); + let upgrade_sql = r##" +ALTER TABLE event ADD delegated_by BLOB; +CREATE INDEX IF NOT EXISTS delegated_by_index ON event(delegated_by); +PRAGMA user_version = 7; +"##; + match conn.execute_batch(upgrade_sql) { + Ok(()) => { + info!("database schema upgraded v6 -> v7"); + } + Err(err) => { + error!("update failed: {}", err); + panic!("database could not be upgraded"); + } + } + Ok(7) +} + +fn mig_7_to_8(conn: &mut PooledConnection) -> Result { + info!("database schema needs update from 7->8"); + // Remove redundant indexes, and add a better multi-column index. + let upgrade_sql = r##" +DROP INDEX IF EXISTS created_at_index; +DROP INDEX IF EXISTS kind_index; +CREATE INDEX IF NOT EXISTS event_composite_index ON event(kind,created_at); +PRAGMA user_version = 8; +"##; + match conn.execute_batch(upgrade_sql) { + Ok(()) => { + info!("database schema upgraded v7 -> v8"); + } + Err(err) => { + error!("update failed: {}", err); + panic!("database could not be upgraded"); + } + } + Ok(8) +} + +fn mig_8_to_9(conn: &mut PooledConnection) -> Result { + info!("database schema needs update from 8->9"); + // Those old indexes were actually helpful... + let upgrade_sql = r##" +CREATE INDEX IF NOT EXISTS created_at_index ON event(created_at); +CREATE INDEX IF NOT EXISTS event_composite_index ON event(kind,created_at); +PRAGMA user_version = 9; +"##; + match conn.execute_batch(upgrade_sql) { + Ok(()) => { + info!("database schema upgraded v8 -> v9"); + } + Err(err) => { + error!("update failed: {}", err); + panic!("database could not be upgraded"); + } + } + Ok(9) +} + +fn mig_9_to_10(conn: &mut PooledConnection) -> Result { + info!("database schema needs update from 9->10"); + // Those old indexes were actually helpful... + let upgrade_sql = r##" +CREATE INDEX IF NOT EXISTS tag_composite_index ON tag(event_id,name,value_hex,value); +PRAGMA user_version = 10; +"##; + match conn.execute_batch(upgrade_sql) { + Ok(()) => { + info!("database schema upgraded v9 -> v10"); + } + Err(err) => { + error!("update failed: {}", err); + panic!("database could not be upgraded"); + } + } + Ok(10) +} + +fn mig_10_to_11(conn: &mut PooledConnection) -> Result { + info!("database schema needs update from 10->11"); + // Those old indexes were actually helpful... + let upgrade_sql = r##" +CREATE INDEX IF NOT EXISTS tag_name_eid_index ON tag(name,event_id,value_hex); +reindex; +pragma optimize; +PRAGMA user_version = 11; +"##; + match conn.execute_batch(upgrade_sql) { + Ok(()) => { + info!("database schema upgraded v10 -> v11"); + } + Err(err) => { + error!("update failed: {}", err); + panic!("database could not be upgraded"); + } + } + Ok(11) +} + +fn mig_11_to_12(conn: &mut PooledConnection) -> Result { + info!("database schema needs update from 11->12"); + let start = Instant::now(); + let tx = conn.transaction()?; + { + // Lookup every replaceable event + let mut stmt = tx.prepare("select kind,author from event where kind in (0,3,41) or (kind>=10000 and kind<20000) order by id;")?; + let mut replaceable_rows = stmt.query([])?; + info!("updating replaceable events; this could take awhile..."); + while let Some(row) = replaceable_rows.next()? { + // we want to capture the event_id that had the tag, the tag name, and the tag hex value. + let event_kind: u64 = row.get(0)?; + let event_author: Vec = row.get(1)?; + tx.execute( + "UPDATE event SET hidden=TRUE WHERE hidden!=TRUE and kind=? and author=? and id NOT IN (SELECT id FROM event WHERE kind=? AND author=? ORDER BY created_at DESC LIMIT 1)", + params![event_kind, event_author, event_kind, event_author], + )?; + } + tx.execute("PRAGMA user_version = 12;", [])?; + } + tx.commit()?; + info!( + "database schema upgraded v11 -> v12 in {:?}", + start.elapsed() + ); + // vacuum after large table modification + let start = Instant::now(); + conn.execute("VACUUM;", [])?; + info!( + "vacuumed DB after hidden event cleanup in {:?}", + start.elapsed() + ); + Ok(12) +} + +fn mig_12_to_13(conn: &mut PooledConnection) -> Result { + info!("database schema needs update from 12->13"); + let upgrade_sql = r##" +CREATE INDEX IF NOT EXISTS kind_author_index ON event(kind,author); +reindex; +pragma optimize; +PRAGMA user_version = 13; +"##; + match conn.execute_batch(upgrade_sql) { + Ok(()) => { + info!("database schema upgraded v12 -> v13"); + } + Err(err) => { + error!("update failed: {}", err); + panic!("database could not be upgraded"); + } + } + Ok(13) +} + +fn mig_13_to_14(conn: &mut PooledConnection) -> Result { + info!("database schema needs update from 13->14"); + let upgrade_sql = r##" +CREATE INDEX IF NOT EXISTS kind_index ON event(kind); +CREATE INDEX IF NOT EXISTS kind_created_at_index ON event(kind,created_at); +pragma optimize; +PRAGMA user_version = 14; +"##; + match conn.execute_batch(upgrade_sql) { + Ok(()) => { + info!("database schema upgraded v13 -> v14"); + } + Err(err) => { + error!("update failed: {}", err); + panic!("database could not be upgraded"); + } + } + Ok(14) +} + +fn mig_14_to_15(conn: &mut PooledConnection) -> Result { + info!("database schema needs update from 14->15"); + let upgrade_sql = r##" +CREATE INDEX IF NOT EXISTS author_created_at_index ON event(author,created_at); +CREATE INDEX IF NOT EXISTS author_kind_index ON event(author,kind); +PRAGMA user_version = 15; +"##; + match conn.execute_batch(upgrade_sql) { + Ok(()) => { + info!("database schema upgraded v14 -> v15"); + } + Err(err) => { + error!("update failed: {}", err); + panic!("database could not be upgraded"); + } + } + // clear out hidden events + let clear_hidden_sql = r##"DELETE FROM event WHERE HIDDEN=true;"##; + info!("removing hidden events; this may take awhile..."); + match conn.execute_batch(clear_hidden_sql) { + Ok(()) => { + info!("all hidden events removed"); + } + Err(err) => { + error!("delete failed: {}", err); + panic!("could not remove hidden events"); + } + } + Ok(15) +} + +fn mig_15_to_16(conn: &mut PooledConnection) -> Result { + let count = db_event_count(conn)?; + info!("database schema needs update from 15->16 (this may take a few minutes)"); + let upgrade_sql = r##" +DROP TABLE tag; +CREATE TABLE tag ( +id INTEGER PRIMARY KEY, +event_id INTEGER NOT NULL, -- an event ID that contains a tag. +name TEXT, -- the tag name ("p", "e", whatever) +value TEXT, -- the tag value, if not hex. +created_at INTEGER NOT NULL, -- when the event was authored +kind INTEGER NOT NULL, -- event kind +FOREIGN KEY(event_id) REFERENCES event(id) ON UPDATE CASCADE ON DELETE CASCADE +); +CREATE INDEX IF NOT EXISTS tag_val_index ON tag(value); +CREATE INDEX IF NOT EXISTS tag_composite_index ON tag(event_id,name,value); +CREATE INDEX IF NOT EXISTS tag_name_eid_index ON tag(name,event_id,value); +CREATE INDEX IF NOT EXISTS tag_covering_index ON tag(name,kind,value,created_at,event_id); +"##; + + let start = Instant::now(); + let tx = conn.transaction()?; + + let bar = ProgressBar::new(count.try_into().unwrap()).with_message("rebuilding tags table"); + bar.set_style( + ProgressStyle::with_template( + "[{elapsed_precise}] {bar:40.white/blue} {pos:>7}/{len:7} [{percent}%] {msg}", + ) + .unwrap(), + ); + { + tx.execute_batch(upgrade_sql)?; + let mut stmt = + tx.prepare("select id, kind, created_at, content from event order by id;")?; + let mut tag_rows = stmt.query([])?; + let mut count = 0; + while let Some(row) = tag_rows.next()? { + count += 1; + if count % 10 == 0 { + bar.inc(10); + } + let event_id: u64 = row.get(0)?; + let kind: u64 = row.get(1)?; + let created_at: u64 = row.get(2)?; + let event_json: String = row.get(3)?; + let event: Event = serde_json::from_str(&event_json)?; + // look at each event, and each tag, creating new tag entries if appropriate. + for t in event.tags.iter().filter(|x| x.len() > 1) { + let tagname = t.get(0).unwrap(); + let tagnamechar_opt = single_char_tagname(tagname); + if tagnamechar_opt.is_none() { + continue; + } + // safe because len was > 1 + let tagval = t.get(1).unwrap(); + // otherwise, insert as text + tx.execute( + "INSERT INTO tag (event_id, name, value, kind, created_at) VALUES (?1, ?2, ?3, ?4, ?5);", + params![event_id, tagname, &tagval, kind, created_at], + )?; + } + } + tx.execute("PRAGMA user_version = 16;", [])?; + } + bar.finish(); + tx.commit()?; + info!( + "database schema upgraded v15 -> v16 in {:?}", + start.elapsed() + ); + Ok(16) +} + +fn mig_16_to_17(conn: &mut PooledConnection) -> Result { + info!("database schema needs update from 16->17"); + let upgrade_sql = r##" +ALTER TABLE event ADD COLUMN expires_at INTEGER; +CREATE INDEX IF NOT EXISTS event_expiration ON event(expires_at); +PRAGMA user_version = 17; +"##; + match conn.execute_batch(upgrade_sql) { + Ok(()) => { + info!("database schema upgraded v16 -> v17"); + } + Err(err) => { + error!("update failed: {}", err); + panic!("database could not be upgraded"); + } + } + Ok(17) +} + +fn mig_17_to_18(conn: &mut PooledConnection) -> Result { + info!("database schema needs update from 17->18"); + let upgrade_sql = r##" +-- Create invoices table +CREATE TABLE IF NOT EXISTS invoice ( +payment_hash TEXT PRIMARY KEY, +pubkey TEXT NOT NULL, +invoice TEXT NOT NULL, +amount INTEGER NOT NULL, +status TEXT CHECK ( status IN ('Paid', 'Unpaid', 'Expired' ) ) NOT NUll DEFAULT 'Unpaid', +description TEXT, +created_at INTEGER NOT NULL, +confirmed_at INTEGER, +CONSTRAINT invoice_pubkey_fkey FOREIGN KEY (pubkey) REFERENCES account (pubkey) ON DELETE CASCADE +); + +-- Create invoice index +CREATE INDEX IF NOT EXISTS invoice_pubkey_index ON invoice(pubkey); + +-- Create account table + +CREATE TABLE IF NOT EXISTS account ( +pubkey TEXT PRIMARY KEY, +is_admitted INTEGER NOT NULL DEFAULT 0, +balance INTEGER NOT NULL DEFAULT 0, +tos_accepted_at INTEGER +); + +-- Create account index +CREATE INDEX IF NOT EXISTS account_pubkey_index ON account(pubkey); + + +pragma optimize; +PRAGMA user_version = 18; +"##; + match conn.execute_batch(upgrade_sql) { + Ok(()) => { + info!("database schema upgraded v17 -> v18"); + } + Err(err) => { + error!("update failed: {}", err); + panic!("database could not be upgraded"); + } + } + Ok(18) +} \ No newline at end of file diff --git a/crates/rooch-ws-relay/src/server/close.rs b/crates/rooch-ws-relay/src/server/close.rs new file mode 100644 index 0000000000..a11661103b --- /dev/null +++ b/crates/rooch-ws-relay/src/server/close.rs @@ -0,0 +1,32 @@ +//! Subscription close request parsing +//! +//! Representation and parsing of `CLOSE` messages sent from clients. +use super::error::{Error, Result}; +use serde::{Deserialize, Serialize}; + +/// Close command in network format +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +pub struct CloseCmd { + /// Protocol command, expected to always be "CLOSE". + cmd: String, + /// The subscription identifier being closed. + id: String, +} + +/// Identifier of the subscription to be closed. +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +pub struct Close { + /// The subscription identifier being closed. + pub id: String, +} + +impl From for Result { + fn from(cc: CloseCmd) -> Result { + // ensure command is correct + if cc.cmd == "CLOSE" { + Ok(Close { id: cc.id }) + } else { + Err(Error::CommandUnknownError) + } + } +} \ No newline at end of file diff --git a/crates/rooch-ws-relay/src/server/config.rs b/crates/rooch-ws-relay/src/server/config.rs new file mode 100644 index 0000000000..23b700a3d0 --- /dev/null +++ b/crates/rooch-ws-relay/src/server/config.rs @@ -0,0 +1,346 @@ +//! Configuration file and settings management +use config::{Config, ConfigError, File}; +use serde::{Deserialize, Serialize}; +use std::time::Duration; +use tracing::warn; + +use crate::payment::handler::Processor; + +#[derive(Debug, Serialize, Deserialize, Clone)] +#[allow(unused)] +pub struct Info { + pub relay_url: Option, + pub name: Option, + pub description: Option, + pub pubkey: Option, + pub contact: Option, + pub favicon: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[allow(unused)] +pub struct Database { + pub data_directory: String, + pub engine: String, + pub in_memory: bool, + pub min_conn: u32, + pub max_conn: u32, + pub connection: String, + pub connection_write: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[allow(unused)] +pub struct Grpc { + pub event_admission_server: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[allow(unused)] +pub struct Network { + pub port: u16, + pub address: String, + pub remote_ip_header: Option, // retrieve client IP from this HTTP header if present + pub ping_interval_seconds: u32, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[allow(unused)] +pub struct Options { + pub reject_future_seconds: Option, // if defined, reject any events with a timestamp more than X seconds in the future +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[allow(unused)] +pub struct Retention { + // TODO: implement + pub max_events: Option, // max events + pub max_bytes: Option, // max size + pub persist_days: Option, // oldest message + pub whitelist_addresses: Option>, // whitelisted addresses (never delete) +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[allow(unused)] +pub struct Limits { + pub messages_per_sec: Option, // Artificially slow down event writing to limit disk consumption (averaged over 1 minute) + pub subscriptions_per_min: Option, // Artificially slow down request (db query) creation to prevent abuse (averaged over 1 minute) + pub db_conns_per_client: Option, // How many concurrent database queries (not subscriptions) may a client have? + pub max_blocking_threads: usize, + pub max_event_bytes: Option, // Maximum size of an EVENT message + pub max_ws_message_bytes: Option, + pub max_ws_frame_bytes: Option, + pub broadcast_buffer: usize, // events to buffer for subscribers (prevents slow readers from consuming memory) + pub event_persist_buffer: usize, // events to buffer for database commits (block senders if database writes are too slow) + pub event_kind_blacklist: Option>, + pub event_kind_allowlist: Option>, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[allow(unused)] +pub struct Authorization { + pub pubkey_whitelist: Option>, // If present, only allow these pubkeys to publish events + pub nip42_auth: bool, // if true enables NIP-42 authentication + pub nip42_dms: bool, // if true send DMs only to their authenticated recipients +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[allow(unused)] +pub struct PayToRelay { + pub enabled: bool, + pub admission_cost: u64, // Cost to have pubkey whitelisted + pub cost_per_event: u64, // Cost author to pay per event + pub node_url: String, + pub api_secret: String, + pub terms_message: String, + pub sign_ups: bool, // allow new users to sign up to relay + pub direct_message: bool, // Send direct message to user with invoice and terms + pub secret_key: Option, + pub processor: Processor, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[allow(unused)] +pub struct Diagnostics { + pub tracing: bool, // enables tokio console-subscriber +} + +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone, Copy)] +#[serde(rename_all = "lowercase")] +pub enum VerifiedUsersMode { + Enabled, + Passive, + Disabled, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[allow(unused)] +pub struct VerifiedUsers { + pub mode: VerifiedUsersMode, // Mode of operation: "enabled" (enforce) or "passive" (check only). If none, this is simply disabled. + pub domain_whitelist: Option>, // If present, only allow verified users from these domains can publish events + pub domain_blacklist: Option>, // If present, allow all verified users from any domain except these + pub verify_expiration: Option, // how long a verification is cached for before no longer being used + pub verify_update_frequency: Option, // how often to attempt to update verification + pub verify_expiration_duration: Option, // internal result of parsing verify_expiration + pub verify_update_frequency_duration: Option, // internal result of parsing verify_update_frequency + pub max_consecutive_failures: usize, // maximum number of verification failures in a row, before ceasing future checks +} + +impl VerifiedUsers { + pub fn init(&mut self) { + self.verify_expiration_duration = self.verify_expiration_duration(); + self.verify_update_frequency_duration = self.verify_update_duration(); + } + + #[must_use] + pub fn is_enabled(&self) -> bool { + self.mode == VerifiedUsersMode::Enabled + } + + #[must_use] + pub fn is_active(&self) -> bool { + self.mode == VerifiedUsersMode::Enabled || self.mode == VerifiedUsersMode::Passive + } + + #[must_use] + pub fn is_passive(&self) -> bool { + self.mode == VerifiedUsersMode::Passive + } + + #[must_use] + pub fn verify_expiration_duration(&self) -> Option { + self.verify_expiration + .as_ref() + .and_then(|x| parse_duration::parse(x).ok()) + } + + #[must_use] + pub fn verify_update_duration(&self) -> Option { + self.verify_update_frequency + .as_ref() + .and_then(|x| parse_duration::parse(x).ok()) + } + + #[must_use] + pub fn is_valid(&self) -> bool { + self.verify_expiration_duration().is_some() && self.verify_update_duration().is_some() + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[allow(unused)] +pub struct Logging { + pub folder_path: Option, + pub file_prefix: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +#[allow(unused)] +pub struct Settings { + pub info: Info, + pub diagnostics: Diagnostics, + pub database: Database, + pub grpc: Grpc, + pub network: Network, + pub limits: Limits, + pub authorization: Authorization, + pub pay_to_relay: PayToRelay, + pub verified_users: VerifiedUsers, + pub retention: Retention, + pub options: Options, + pub logging: Logging, +} + +impl Settings { + #[must_use] + pub fn new(config_file_name: &Option) -> Self { + let default_settings = Self::default(); + // attempt to construct settings with file + let from_file = Self::new_from_default(&default_settings, config_file_name); + match from_file { + Ok(f) => f, + Err(e) => { + warn!("Error reading config file ({:?})", e); + default_settings + } + } + } + + fn new_from_default( + default: &Settings, + config_file_name: &Option, + ) -> Result { + let default_config_file_name = "config.toml".to_string(); + let config: &String = match config_file_name { + Some(value) => value, + None => &default_config_file_name, + }; + let builder = Config::builder(); + let config: Config = builder + // use defaults + .add_source(Config::try_from(default)?) + // override with file contents + .add_source(File::with_name(config)) + .build()?; + let mut settings: Settings = config.try_deserialize()?; + // ensure connection pool size is logical + assert!( + settings.database.min_conn <= settings.database.max_conn, + "Database min_conn setting ({}) cannot exceed max_conn ({})", + settings.database.min_conn, + settings.database.max_conn + ); + // ensure durations parse + assert!( + settings.verified_users.is_valid(), + "VerifiedUsers time settings could not be parsed" + ); + // initialize durations for verified users + settings.verified_users.init(); + + // Validate pay to relay settings + if settings.pay_to_relay.enabled { + assert_ne!(settings.pay_to_relay.api_secret, ""); + // Should check that url is valid + assert_ne!(settings.pay_to_relay.node_url, ""); + assert_ne!(settings.pay_to_relay.terms_message, ""); + + if settings.pay_to_relay.direct_message { + assert_ne!( + settings.pay_to_relay.secret_key, + Some("".to_string()) + ); + assert!(settings.pay_to_relay.secret_key.is_some()); + } + } + + Ok(settings) + } +} + +impl Default for Settings { + fn default() -> Self { + Settings { + info: Info { + relay_url: None, + name: Some("Unnamed nostr-rs-relay".to_owned()), + description: None, + pubkey: None, + contact: None, + favicon: None, + }, + diagnostics: Diagnostics { tracing: false }, + database: Database { + data_directory: ".".to_owned(), + engine: "sqlite".to_owned(), + in_memory: false, + min_conn: 4, + max_conn: 8, + connection: "".to_owned(), + connection_write: None, + }, + grpc: Grpc { + event_admission_server: None, + }, + network: Network { + port: 8080, + ping_interval_seconds: 300, + address: "0.0.0.0".to_owned(), + remote_ip_header: None, + }, + limits: Limits { + messages_per_sec: None, + subscriptions_per_min: None, + db_conns_per_client: None, + max_blocking_threads: 16, + max_event_bytes: Some(2 << 17), // 128K + max_ws_message_bytes: Some(2 << 17), // 128K + max_ws_frame_bytes: Some(2 << 17), // 128K + broadcast_buffer: 16384, + event_persist_buffer: 4096, + event_kind_blacklist: None, + event_kind_allowlist: None, + }, + authorization: Authorization { + pubkey_whitelist: None, // Allow any address to publish + nip42_auth: false, // Disable NIP-42 authentication + nip42_dms: false, // Send DMs to everybody + }, + pay_to_relay: PayToRelay { + enabled: false, + admission_cost: 4200, + cost_per_event: 0, + terms_message: "".to_string(), + node_url: "".to_string(), + api_secret: "".to_string(), + sign_ups: false, + direct_message: true, + secret_key: None, + processor: Processor::LNBits, + }, + verified_users: VerifiedUsers { + mode: VerifiedUsersMode::Disabled, + domain_whitelist: None, + domain_blacklist: None, + verify_expiration: Some("1 week".to_owned()), + verify_update_frequency: Some("1 day".to_owned()), + verify_expiration_duration: None, + verify_update_frequency_duration: None, + max_consecutive_failures: 20, + }, + retention: Retention { + max_events: None, // max events + max_bytes: None, // max size + persist_days: None, // oldest message + whitelist_addresses: None, // whitelisted addresses (never delete) + }, + options: Options { + reject_future_seconds: None, // Reject events in the future if defined + }, + logging: Logging { + folder_path: None, + file_prefix: None, + }, + } + } +} \ No newline at end of file diff --git a/crates/rooch-ws-relay/src/server/conn.rs b/crates/rooch-ws-relay/src/server/conn.rs new file mode 100644 index 0000000000..32846d32c2 --- /dev/null +++ b/crates/rooch-ws-relay/src/server/conn.rs @@ -0,0 +1,229 @@ +//! Client connection state +use std::collections::HashMap; + +use tracing::{debug, trace}; +use uuid::Uuid; + +use super::close::Close; +use super::conn::Nip42AuthState::{AuthPubkey, Challenge, NoAuth}; +use super::error::Error; +use super::error::Result; +use super::event::Event; +use super::subscription::Subscription; +use super::utils::{host_str, unix_time}; + +/// A subscription identifier has a maximum length +const MAX_SUBSCRIPTION_ID_LEN: usize = 256; + +/// NIP-42 authentication state +pub enum Nip42AuthState { + /// The client is not authenticated yet + NoAuth, + /// The AUTH challenge sent + Challenge(String), + /// The client is authenticated + AuthPubkey(String), +} + +/// State for a client connection +pub struct ClientConn { + /// Client IP (either from socket, or configured proxy header + client_ip_addr: String, + /// Unique client identifier generated at connection time + client_id: Uuid, + /// The current set of active client subscriptions + subscriptions: HashMap, + /// Per-connection maximum concurrent subscriptions + max_subs: usize, + /// NIP-42 AUTH + auth: Nip42AuthState, +} + +impl Default for ClientConn { + fn default() -> Self { + Self::new("unknown".to_owned()) + } +} + +impl ClientConn { + /// Create a new, empty connection state. + #[must_use] + pub fn new(client_ip_addr: String) -> Self { + let client_id = Uuid::new_v4(); + ClientConn { + client_ip_addr, + client_id, + subscriptions: HashMap::new(), + max_subs: 32, + auth: NoAuth, + } + } + + #[must_use] + pub fn subscriptions(&self) -> &HashMap { + &self.subscriptions + } + + /// Check if the given subscription already exists + #[must_use] + pub fn has_subscription(&self, sub: &Subscription) -> bool { + self.subscriptions.values().any(|x| x == sub) + } + + /// Get a short prefix of the client's unique identifier, suitable + /// for logging. + #[must_use] + pub fn get_client_prefix(&self) -> String { + self.client_id.to_string().chars().take(8).collect() + } + + #[must_use] + pub fn ip(&self) -> &str { + &self.client_ip_addr + } + + #[must_use] + pub fn auth_pubkey(&self) -> Option<&String> { + match &self.auth { + AuthPubkey(pubkey) => Some(pubkey), + _ => None, + } + } + + #[must_use] + pub fn auth_challenge(&self) -> Option<&String> { + match &self.auth { + Challenge(pubkey) => Some(pubkey), + _ => None, + } + } + + /// Add a new subscription for this connection. + /// # Errors + /// + /// Will return `Err` if the client has too many subscriptions, or + /// if the provided name is excessively long. + pub fn subscribe(&mut self, s: Subscription) -> Result<()> { + let k = s.get_id(); + let sub_id_len = k.len(); + // prevent arbitrarily long subscription identifiers from + // being used. + if sub_id_len > MAX_SUBSCRIPTION_ID_LEN { + debug!( + "ignoring sub request with excessive length: ({})", + sub_id_len + ); + return Err(Error::SubIdMaxLengthError); + } + // check if an existing subscription exists, and replace if so + if self.subscriptions.contains_key(&k) { + self.subscriptions.remove(&k); + self.subscriptions.insert(k, s.clone()); + trace!( + "replaced existing subscription (cid: {}, sub: {:?})", + self.get_client_prefix(), + s.get_id() + ); + return Ok(()); + } + + // check if there is room for another subscription. + if self.subscriptions.len() >= self.max_subs { + return Err(Error::SubMaxExceededError); + } + // add subscription + self.subscriptions.insert(k, s); + trace!( + "registered new subscription, currently have {} active subs (cid: {})", + self.subscriptions.len(), + self.get_client_prefix(), + ); + Ok(()) + } + + /// Remove the subscription for this connection. + pub fn unsubscribe(&mut self, c: &Close) { + // TODO: return notice if subscription did not exist. + self.subscriptions.remove(&c.id); + trace!( + "removed subscription, currently have {} active subs (cid: {})", + self.subscriptions.len(), + self.get_client_prefix(), + ); + } + + pub fn generate_auth_challenge(&mut self) { + self.auth = Challenge(Uuid::new_v4().to_string()); + } + + pub fn authenticate(&mut self, event: &Event, relay_url: &String) -> Result<()> { + match &self.auth { + Challenge(_) => (), + AuthPubkey(_) => { + // already authenticated + return Ok(()); + } + NoAuth => { + // unexpected AUTH request + return Err(Error::AuthFailure); + } + } + match event.validate() { + Ok(_) => { + if event.kind != 22242 { + return Err(Error::AuthFailure); + } + + let curr_time = unix_time(); + let past_cutoff = curr_time - 600; // 10 minutes + let future_cutoff = curr_time + 600; // 10 minutes + if event.created_at < past_cutoff || event.created_at > future_cutoff { + return Err(Error::AuthFailure); + } + + let mut challenge: Option<&String> = None; + let mut relay: Option<&String> = None; + + for tag in &event.tags { + if tag.len() == 2 && tag.get(0) == Some(&"challenge".into()) { + challenge = tag.get(1); + } + if tag.len() == 2 && tag.get(0) == Some(&"relay".into()) { + relay = tag.get(1); + } + } + + match (challenge, &self.auth) { + (Some(received_challenge), Challenge(sent_challenge)) => { + if received_challenge != sent_challenge { + return Err(Error::AuthFailure); + } + } + (_, _) => { + return Err(Error::AuthFailure); + } + } + + match (relay.and_then(host_str), host_str(relay_url)) { + (Some(received_relay), Some(our_relay)) => { + if received_relay != our_relay { + return Err(Error::AuthFailure); + } + } + (_, _) => { + return Err(Error::AuthFailure); + } + } + + self.auth = AuthPubkey(event.pubkey.clone()); + trace!( + "authenticated pubkey {} (cid: {})", + event.pubkey.chars().take(8).collect::(), + self.get_client_prefix() + ); + Ok(()) + } + Err(_) => Err(Error::AuthFailure), + } + } +} \ No newline at end of file diff --git a/crates/rooch-ws-relay/src/server/db.rs b/crates/rooch-ws-relay/src/server/db.rs new file mode 100644 index 0000000000..14a77face0 --- /dev/null +++ b/crates/rooch-ws-relay/src/server/db.rs @@ -0,0 +1,431 @@ +//! Event persistence and querying +use super::config::Settings; +use super::error::{Error, Result}; +use super::event::Event; +use crate::nauthz::EventAuthzService; +use super::notice::Notice; +use crate::payment::handler::PaymentMessage; +use crate::repo::sqlite::SqliteRepo; +use crate::repo::nostr::NostrRepo; +use crate::NostrMetrics; +use governor::clock::Clock; +use governor::{Quota, RateLimiter}; +use nostr::key::FromPkStr; +use nostr::key::Keys; +use std::sync::Arc; +use std::thread; +use std::time::{Instant}; +use tracing::{debug, info, trace, warn}; + +pub type SqlitePool = r2d2::Pool; +pub type PooledConnection = r2d2::PooledConnection; + +/// Events submitted from a client, with a return channel for notices +pub struct SubmittedEvent { + pub event: Event, + pub notice_tx: tokio::sync::mpsc::Sender, + pub source_ip: String, + pub origin: Option, + pub user_agent: Option, + pub auth_pubkey: Option>, +} + +/// Database file +pub const DB_FILE: &str = "nostr.db"; + +/// Build repo +/// # Panics +/// +/// Will panic if the pool could not be created. +pub async fn build_repo(settings: &Settings, metrics: NostrMetrics) -> Arc { + match settings.database.engine.as_str() { + "sqlite" => Arc::new(build_sqlite_pool(settings, metrics).await), + _ => panic!("Unknown database engine"), + } +} + +async fn build_sqlite_pool(settings: &Settings, metrics: NostrMetrics) -> SqliteRepo { + let repo = SqliteRepo::new(settings, metrics); + repo.start().await.ok(); + repo.migrate_up().await.ok(); + repo +} + +/// Spawn a database writer that persists events to the `SQLite` store. +pub async fn db_writer( + repo: Arc, + settings: Settings, + mut event_rx: tokio::sync::mpsc::Receiver, + bcast_tx: tokio::sync::broadcast::Sender, + metadata_tx: tokio::sync::broadcast::Sender, + payment_tx: tokio::sync::broadcast::Sender, + mut shutdown: tokio::sync::broadcast::Receiver<()>, +) -> Result<()> { + // are we performing NIP-05 checking? + let nip05_active = settings.verified_users.is_active(); + // are we requriing NIP-05 user verification? + let nip05_enabled = settings.verified_users.is_enabled(); + + let pay_to_relay_enabled = settings.pay_to_relay.enabled; + let cost_per_event = settings.pay_to_relay.cost_per_event; + debug!("Pay to relay: {}", pay_to_relay_enabled); + + //upgrade_db(&mut pool.get()?)?; + + // Make a copy of the whitelist + let whitelist = &settings.authorization.pubkey_whitelist.clone(); + + // get rate limit settings + let rps_setting = settings.limits.messages_per_sec; + let mut most_recent_rate_limit = Instant::now(); + let mut lim_opt = None; + let clock = governor::clock::QuantaClock::default(); + if let Some(rps) = rps_setting { + if rps > 0 { + info!("Enabling rate limits for event creation ({}/sec)", rps); + let quota = core::num::NonZeroU32::new(rps * 60).unwrap(); + lim_opt = Some(RateLimiter::direct(Quota::per_minute(quota))); + } + } + // create a client if GRPC is enabled. + // Check with externalized event admitter service, if one is defined. + let mut grpc_client = if let Some(svr) = settings.grpc.event_admission_server { + Some(EventAuthzService::connect(&svr).await) + } else { + None + }; + + //let gprc_client = settings.grpc.event_admission_server.map(|s| { + // event_admitter_connect(&s); + // }); + + loop { + if shutdown.try_recv().is_ok() { + info!("shutting down database writer"); + break; + } + // call blocking read on channel + let next_event = event_rx.recv().await; + // if the channel has closed, we will never get work + if next_event.is_none() { + break; + } + // track if an event write occurred; this is used to + // update the rate limiter + let mut event_write = false; + let subm_event = next_event.unwrap(); + let event = subm_event.event; + let notice_tx = subm_event.notice_tx; + + // Check that event kind isn't blacklisted + let kinds_blacklist = &settings.limits.event_kind_blacklist.clone(); + if let Some(event_kind_blacklist) = kinds_blacklist { + if event_kind_blacklist.contains(&event.kind) { + debug!( + "rejecting event: {}, blacklisted kind: {}", + &event.get_event_id_prefix(), + &event.kind + ); + notice_tx + .try_send(Notice::blocked(event.id, "event kind is blocked by relay")) + .ok(); + continue; + } + } + + // Check that event kind isn't allowlisted + let kinds_allowlist = &settings.limits.event_kind_allowlist.clone(); + if let Some(event_kind_allowlist) = kinds_allowlist { + if !event_kind_allowlist.contains(&event.kind) { + debug!( + "rejecting event: {}, allowlist kind: {}", + &event.get_event_id_prefix(), + &event.kind + ); + notice_tx + .try_send(Notice::blocked(event.id, "event kind is blocked by relay")) + .ok(); + continue; + } + } + + // Set to none until balance is got from db + // Will stay none if user in whitelisted and does not have to pay to post + // When pay to relay is enabled the whitelist is not a list of who can post + // It is a list of who can post for free + let mut user_balance: Option = None; + if !pay_to_relay_enabled { + // check if this event is authorized. + if let Some(allowed_addrs) = whitelist { + // TODO: incorporate delegated pubkeys + // if the event address is not in allowed_addrs. + if !allowed_addrs.contains(&event.pubkey) { + debug!( + "rejecting event: {}, unauthorized author", + event.get_event_id_prefix() + ); + notice_tx + .try_send(Notice::blocked( + event.id, + "pubkey is not allowed to publish to this relay", + )) + .ok(); + continue; + } + } + } else { + // If the user is on whitelist there is no need to check if the user is admitted or has balance to post + if whitelist.is_none() + || (whitelist.is_some() && !whitelist.as_ref().unwrap().contains(&event.pubkey)) + { + let key = Keys::from_pk_str(&event.pubkey).unwrap(); + match repo.get_account_balance(&key).await { + Ok((user_admitted, balance)) => { + // Checks to make sure user is admitted + if !user_admitted { + debug!("user: {}, is not admitted", &event.pubkey); + + // If the user is in DB but not admitted + // Send meeage to payment thread to check if outstanding invoice has been paid + payment_tx + .send(PaymentMessage::CheckAccount(event.pubkey)) + .ok(); + notice_tx + .try_send(Notice::blocked(event.id, "User is not admitted")) + .ok(); + continue; + } + + // Checks that user has enough balance to post + // TODO: this should send an invoice to user to top up + if balance < cost_per_event { + debug!("user: {}, does not have a balance", &event.pubkey,); + notice_tx + .try_send(Notice::blocked(event.id, "Insufficient balance")) + .ok(); + continue; + } + user_balance = Some(balance); + debug!("User balance: {:?}", user_balance); + } + Err( + Error::SqlError(rusqlite::Error::QueryReturnedNoRows) + | Error::SqlxError(sqlx::Error::RowNotFound), + ) => { + // User does not exist + info!("Unregistered user"); + if settings.pay_to_relay.sign_ups { + payment_tx + .send(PaymentMessage::NewAccount(event.pubkey)) + .ok(); + } + let msg = "Pubkey not registered"; + notice_tx.try_send(Notice::error(event.id, msg)).ok(); + continue; + } + Err(err) => { + warn!("Error checking admission status: {:?}", err); + let msg = "relay experienced an error checking your admission status"; + notice_tx.try_send(Notice::error(event.id, msg)).ok(); + // Other error + continue; + } + } + } + } + + // send any metadata events to the NIP-05 verifier + if nip05_active && event.is_kind_metadata() { + // we are sending this prior to even deciding if we + // persist it. this allows the nip05 module to + // inspect it, update if necessary, or persist a new + // event and broadcast it itself. + metadata_tx.send(event.clone()).ok(); + } + + // get a validation result for use in verification and GPRC + let validation = if nip05_active { + Some(repo.get_latest_user_verification(&event.pubkey).await) + } else { + None + }; + + // check for NIP-05 verification + if nip05_enabled && validation.is_some() { + match validation.as_ref().unwrap() { + Ok(uv) => { + if uv.is_valid(&settings.verified_users) { + info!( + "new event from verified author ({:?},{:?})", + uv.name.to_string(), + event.get_author_prefix() + ); + } else { + info!( + "rejecting event, author ({:?} / {:?}) verification invalid (expired/wrong domain)", + uv.name.to_string(), + event.get_author_prefix() + ); + notice_tx + .try_send(Notice::blocked( + event.id, + "NIP-05 verification is no longer valid (expired/wrong domain)", + )) + .ok(); + continue; + } + } + Err( + Error::SqlError(rusqlite::Error::QueryReturnedNoRows) + | Error::SqlxError(sqlx::Error::RowNotFound), + ) => { + debug!( + "no verification records found for pubkey: {:?}", + event.get_author_prefix() + ); + notice_tx + .try_send(Notice::blocked( + event.id, + "NIP-05 verification needed to publish events", + )) + .ok(); + continue; + } + Err(e) => { + warn!("checking nip05 verification status failed: {:?}", e); + continue; + } + } + } + + // nip05 address + let nip05_address: Option = + validation.and_then(|x| x.ok().map(|y| y.name)); + + // GRPC check + if let Some(ref mut c) = grpc_client { + trace!("checking if grpc permits"); + let grpc_start = Instant::now(); + let decision_res = c + .admit_event( + &event, + &subm_event.source_ip, + subm_event.origin, + subm_event.user_agent, + nip05_address, + subm_event.auth_pubkey, + ) + .await; + match decision_res { + Ok(decision) => { + if !decision.permitted() { + // GPRC returned a decision to reject this event + info!( + "GRPC rejected event: {:?} (kind: {}) from: {:?} in: {:?} (IP: {:?})", + event.get_event_id_prefix(), + event.kind, + event.get_author_prefix(), + grpc_start.elapsed(), + subm_event.source_ip + ); + notice_tx + .try_send(Notice::blocked( + event.id, + &decision.message().unwrap_or_default(), + )) + .ok(); + continue; + } + } + Err(e) => { + warn!("GRPC server error: {:?}", e); + } + } + } + + // TODO: cache recent list of authors to remove a DB call. + let start = Instant::now(); + if event.is_ephemeral() { + bcast_tx.send(event.clone()).ok(); + debug!( + "published ephemeral event: {:?} from: {:?} in: {:?}", + event.get_event_id_prefix(), + event.get_author_prefix(), + start.elapsed() + ); + event_write = true; + } else { + match repo.write_event(&event).await { + Ok(updated) => { + if updated == 0 { + trace!("ignoring duplicate or deleted event"); + notice_tx.try_send(Notice::duplicate(event.id)).ok(); + } else { + info!( + "persisted event: {:?} (kind: {}) from: {:?} in: {:?} (IP: {:?})", + event.get_event_id_prefix(), + event.kind, + event.get_author_prefix(), + start.elapsed(), + subm_event.source_ip, + ); + event_write = true; + // send this out to all clients + bcast_tx.send(event.clone()).ok(); + notice_tx.try_send(Notice::saved(event.id)).ok(); + } + } + Err(err) => { + warn!("event insert failed: {:?}", err); + let msg = "relay experienced an error trying to publish the latest event"; + notice_tx.try_send(Notice::error(event.id, msg)).ok(); + } + } + } + + // use rate limit, if defined, and if an event was actually written. + if event_write { + // If pay to relay is diabaled or the cost per event is 0 + // No need to update user balance + if pay_to_relay_enabled && cost_per_event > 0 { + // If the user balance is some, user was not on whitelist + // Their balance should be reduced by the cost per event + if let Some(_balance) = user_balance { + let pubkey = Keys::from_pk_str(&event.pubkey)?; + repo.update_account_balance(&pubkey, false, cost_per_event) + .await?; + } + } + if let Some(ref lim) = lim_opt { + if let Err(n) = lim.check() { + let wait_for = n.wait_time_from(clock.now()); + // check if we have recently logged rate + // limits, but print out a message only once + // per second. + if most_recent_rate_limit.elapsed().as_secs() > 10 { + warn!( + "rate limit reached for event creation (sleep for {:?}) (suppressing future messages for 10 seconds)", + wait_for + ); + // reset last rate limit message + most_recent_rate_limit = Instant::now(); + } + // block event writes, allowing them to queue up + thread::sleep(wait_for); + continue; + } + } + } + } + info!("database connection closed"); + Ok(()) +} + +/// Serialized event associated with a specific subscription request. +#[derive(PartialEq, Eq, Debug, Clone)] +pub struct QueryResult { + /// Subscription identifier + pub sub_id: String, + /// Serialized event + pub event: String, +} \ No newline at end of file diff --git a/crates/rooch-ws-relay/src/server/delegation.rs b/crates/rooch-ws-relay/src/server/delegation.rs new file mode 100644 index 0000000000..041790c9f4 --- /dev/null +++ b/crates/rooch-ws-relay/src/server/delegation.rs @@ -0,0 +1,406 @@ +//! Event parsing and validation +use super::error::Error; +use super::error::Result; +use super::event::Event; +use bitcoin_hashes::{sha256, Hash}; +use lazy_static::lazy_static; +use regex::Regex; +use secp256k1::{schnorr, Secp256k1, VerifyOnly, XOnlyPublicKey}; +use serde::{Deserialize, Serialize}; +use std::str::FromStr; +use tracing::{debug, info}; + +// This handles everything related to delegation, in particular the +// condition/rune parsing and logic. + +// Conditions are poorly specified, so we will implement the minimum +// necessary for now. + +// fields MUST be either "kind" or "created_at". +// operators supported are ">", "<", "=", "!". +// no operations on 'content' are supported. + +// this allows constraints for: +// valid date ranges (valid from X->Y dates). +// specific kinds (publish kind=1,5) +// kind ranges (publish ephemeral events, kind>19999&kind<30001) + +// for more complex scenarios (allow delegatee to publish ephemeral +// AND replacement events), it may be necessary to generate and use +// different condition strings, since we do not support grouping or +// "OR" logic. + +lazy_static! { + /// Secp256k1 verification instance. + pub static ref SECP: Secp256k1 = Secp256k1::verification_only(); +} + +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +pub enum Field { + Kind, + CreatedAt, +} + +impl FromStr for Field { + type Err = Error; + fn from_str(value: &str) -> Result { + if value == "kind" { + Ok(Field::Kind) + } else if value == "created_at" { + Ok(Field::CreatedAt) + } else { + Err(Error::DelegationParseError) + } + } +} + +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +pub enum Operator { + LessThan, + GreaterThan, + Equals, + NotEquals, +} +impl FromStr for Operator { + type Err = Error; + fn from_str(value: &str) -> Result { + if value == "<" { + Ok(Operator::LessThan) + } else if value == ">" { + Ok(Operator::GreaterThan) + } else if value == "=" { + Ok(Operator::Equals) + } else if value == "!" { + Ok(Operator::NotEquals) + } else { + Err(Error::DelegationParseError) + } + } +} + +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +pub struct ConditionQuery { + pub conditions: Vec, +} + +impl ConditionQuery { + #[must_use] + pub fn allows_event(&self, event: &Event) -> bool { + // check each condition, to ensure that the event complies + // with the restriction. + for c in &self.conditions { + if !c.allows_event(event) { + // any failing conditions invalidates the delegation + // on this event + return false; + } + } + // delegation was permitted unconditionally, or all conditions + // were true + true + } +} + +// Verify that the delegator approved the delegation; return a ConditionQuery if so. +#[must_use] +pub fn validate_delegation( + delegator: &str, + delegatee: &str, + cond_query: &str, + sigstr: &str, +) -> Option { + // form the token + let tok = format!("nostr:delegation:{delegatee}:{cond_query}"); + // form SHA256 hash + let digest: sha256::Hash = sha256::Hash::hash(tok.as_bytes()); + let sig = schnorr::Signature::from_str(sigstr).unwrap(); + if let Ok(msg) = secp256k1::Message::from_slice(digest.as_ref()) { + if let Ok(pubkey) = XOnlyPublicKey::from_str(delegator) { + let verify = SECP.verify_schnorr(&sig, &msg, &pubkey); + if verify.is_ok() { + // return the parsed condition query + cond_query.parse::().ok() + } else { + debug!("client sent an delegation signature that did not validate"); + None + } + } else { + debug!("client sent malformed delegation pubkey"); + None + } + } else { + info!("error converting delegation digest to secp256k1 message"); + None + } +} + +/// Parsed delegation condition +/// see +/// An example complex condition would be: `kind=1,2,3&created_at<1665265999` +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +pub struct Condition { + pub field: Field, + pub operator: Operator, + pub values: Vec, +} + +impl Condition { + /// Check if this condition allows the given event to be delegated + #[must_use] + pub fn allows_event(&self, event: &Event) -> bool { + // determine what the right-hand side of the operator is + let resolved_field = match &self.field { + Field::Kind => event.kind, + Field::CreatedAt => event.created_at, + }; + match &self.operator { + Operator::LessThan => { + // the less-than operator is only valid for single values. + if self.values.len() == 1 { + if let Some(v) = self.values.first() { + return resolved_field < *v; + } + } + } + Operator::GreaterThan => { + // the greater-than operator is only valid for single values. + if self.values.len() == 1 { + if let Some(v) = self.values.first() { + return resolved_field > *v; + } + } + } + Operator::Equals => { + // equals is interpreted as "must be equal to at least one provided value" + return self.values.iter().any(|&x| resolved_field == x); + } + Operator::NotEquals => { + // not-equals is interpreted as "must not be equal to any provided value" + // this is the one case where an empty list of values could be allowed; even though it is a pointless restriction. + return self.values.iter().all(|&x| resolved_field != x); + } + } + false + } +} + +fn str_to_condition(cs: &str) -> Option { + // a condition is a string (alphanum+underscore), an operator (<>=!), and values (num+comma) + lazy_static! { + static ref RE: Regex = Regex::new("([[:word:]]+)([<>=!]+)([,[[:digit:]]]*)").unwrap(); + } + // match against the regex + let caps = RE.captures(cs)?; + let field = caps.get(1)?.as_str().parse::().ok()?; + let operator = caps.get(2)?.as_str().parse::().ok()?; + // values are just comma separated numbers, but all must be parsed + let rawvals = caps.get(3)?.as_str(); + let values = rawvals + .split_terminator(',') + .map(|n| n.parse::().ok()) + .collect::>>()?; + // convert field string into Field + Some(Condition { + field, + operator, + values, + }) +} + +/// Parse a condition query from a string slice +impl FromStr for ConditionQuery { + type Err = Error; + fn from_str(value: &str) -> Result { + // split the string with '&' + let mut conditions = vec![]; + let condstrs = value.split_terminator('&'); + // parse each individual condition + for c in condstrs { + conditions.push(str_to_condition(c).ok_or(Error::DelegationParseError)?); + } + Ok(ConditionQuery { conditions }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + // parse condition strings + #[test] + fn parse_empty() -> Result<()> { + // given an empty condition query, produce an empty vector + let empty_cq = ConditionQuery { conditions: vec![] }; + let parsed = "".parse::()?; + assert_eq!(parsed, empty_cq); + Ok(()) + } + + // parse field 'kind' + #[test] + fn test_kind_field_parse() -> Result<()> { + let field = "kind".parse::()?; + assert_eq!(field, Field::Kind); + Ok(()) + } + // parse field 'created_at' + #[test] + fn test_created_at_field_parse() -> Result<()> { + let field = "created_at".parse::()?; + assert_eq!(field, Field::CreatedAt); + Ok(()) + } + // parse unknown field + #[test] + fn unknown_field_parse() { + let field = "unk".parse::(); + assert!(field.is_err()); + } + + // parse a full conditional query with an empty array + #[test] + fn parse_kind_equals_empty() -> Result<()> { + // given an empty condition query, produce an empty vector + let kind_cq = ConditionQuery { + conditions: vec![Condition { + field: Field::Kind, + operator: Operator::Equals, + values: vec![], + }], + }; + let parsed = "kind=".parse::()?; + assert_eq!(parsed, kind_cq); + Ok(()) + } + // parse a full conditional query with a single value + #[test] + fn parse_kind_equals_singleval() -> Result<()> { + // given an empty condition query, produce an empty vector + let kind_cq = ConditionQuery { + conditions: vec![Condition { + field: Field::Kind, + operator: Operator::Equals, + values: vec![1], + }], + }; + let parsed = "kind=1".parse::()?; + assert_eq!(parsed, kind_cq); + Ok(()) + } + // parse a full conditional query with multiple values + #[test] + fn parse_kind_equals_multival() -> Result<()> { + // given an empty condition query, produce an empty vector + let kind_cq = ConditionQuery { + conditions: vec![Condition { + field: Field::Kind, + operator: Operator::Equals, + values: vec![1, 2, 4], + }], + }; + let parsed = "kind=1,2,4".parse::()?; + assert_eq!(parsed, kind_cq); + Ok(()) + } + // parse multiple conditions + #[test] + fn parse_multi_conditions() -> Result<()> { + // given an empty condition query, produce an empty vector + let cq = ConditionQuery { + conditions: vec![ + Condition { + field: Field::Kind, + operator: Operator::GreaterThan, + values: vec![10000], + }, + Condition { + field: Field::Kind, + operator: Operator::LessThan, + values: vec![20000], + }, + Condition { + field: Field::Kind, + operator: Operator::NotEquals, + values: vec![10001], + }, + Condition { + field: Field::CreatedAt, + operator: Operator::LessThan, + values: vec![1_665_867_123], + }, + ], + }; + let parsed = + "kind>10000&kind<20000&kind!10001&created_at<1665867123".parse::()?; + assert_eq!(parsed, cq); + Ok(()) + } + // Check for condition logic on event w/ empty values + #[test] + fn condition_with_empty_values() { + let mut c = Condition { + field: Field::Kind, + operator: Operator::GreaterThan, + values: vec![], + }; + let e = Event::simple_event(); + assert!(!c.allows_event(&e)); + c.operator = Operator::LessThan; + assert!(!c.allows_event(&e)); + c.operator = Operator::Equals; + assert!(!c.allows_event(&e)); + // Not Equals applied to an empty list *is* allowed + // (pointless, but logically valid). + c.operator = Operator::NotEquals; + assert!(c.allows_event(&e)); + } + + // Check for condition logic on event w/ single value + #[test] + fn condition_kind_gt_event_single() { + let c = Condition { + field: Field::Kind, + operator: Operator::GreaterThan, + values: vec![10], + }; + let mut e = Event::simple_event(); + // kind is not greater than 10, not allowed + e.kind = 1; + assert!(!c.allows_event(&e)); + // kind is greater than 10, allowed + e.kind = 100; + assert!(c.allows_event(&e)); + // kind is 10, not allowed + e.kind = 10; + assert!(!c.allows_event(&e)); + } + // Check for condition logic on event w/ multi values + #[test] + fn condition_with_multi_values() { + let mut c = Condition { + field: Field::Kind, + operator: Operator::Equals, + values: vec![0, 10, 20], + }; + let mut e = Event::simple_event(); + // Allow if event kind is in list for Equals + e.kind = 10; + assert!(c.allows_event(&e)); + // Deny if event kind is not in list for Equals + e.kind = 11; + assert!(!c.allows_event(&e)); + // Deny if event kind is in list for NotEquals + e.kind = 10; + c.operator = Operator::NotEquals; + assert!(!c.allows_event(&e)); + // Allow if event kind is not in list for NotEquals + e.kind = 99; + c.operator = Operator::NotEquals; + assert!(c.allows_event(&e)); + // Always deny if GreaterThan/LessThan for a list + c.operator = Operator::LessThan; + assert!(!c.allows_event(&e)); + c.operator = Operator::GreaterThan; + assert!(!c.allows_event(&e)); + } +} \ No newline at end of file diff --git a/crates/rooch-ws-relay/src/server/error.rs b/crates/rooch-ws-relay/src/server/error.rs new file mode 100644 index 0000000000..54af851030 --- /dev/null +++ b/crates/rooch-ws-relay/src/server/error.rs @@ -0,0 +1,192 @@ +//! Error handling +use std::result; +use thiserror::Error; +use tungstenite::error::Error as WsError; + +/// Simple `Result` type for errors in this module +pub type Result = result::Result; + +/// Custom error type for Nostr +#[derive(Error, Debug)] +pub enum Error { + #[error("Protocol parse error")] + ProtoParseError, + #[error("Connection error")] + ConnError, + #[error("Client write error")] + ConnWriteError, + #[error("EVENT parse failed")] + EventParseFailed, + #[error("CLOSE message parse failed")] + CloseParseFailed, + #[error("Event invalid signature")] + EventInvalidSignature, + #[error("Event invalid id")] + EventInvalidId, + #[error("Event malformed pubkey")] + EventMalformedPubkey, + #[error("Event could not canonicalize")] + EventCouldNotCanonicalize, + #[error("Event too large")] + EventMaxLengthError(usize), + #[error("Subscription identifier max length exceeded")] + SubIdMaxLengthError, + #[error("Maximum concurrent subscription count reached")] + SubMaxExceededError, + // this should be used if the JSON is invalid + #[error("JSON parsing failed")] + JsonParseFailed(serde_json::Error), + #[error("WebSocket proto error")] + WebsocketError(WsError), + #[error("Command unknown")] + CommandUnknownError, + #[error("SQL error")] + SqlError(rusqlite::Error), + #[error("Config error")] + ConfigError(config::ConfigError), + #[error("Data directory does not exist")] + DatabaseDirError, + #[error("Database Connection Pool Error")] + DatabasePoolError(r2d2::Error), + #[error("SQL error")] + SqlxError(sqlx::Error), + #[error("Database Connection Pool Error")] + SqlxDatabasePoolError(sqlx::Error), + #[error("Custom Error : {0}")] + CustomError(String), + #[error("Task join error")] + JoinError, + #[error("Hyper Client error")] + HyperError(hyper::Error), + #[error("Hex encoding error")] + HexError(hex::FromHexError), + #[error("Delegation parse error")] + DelegationParseError, + #[error("Channel closed error")] + ChannelClosed, + #[error("Authz error")] + AuthzError, + #[error("Tonic GRPC error")] + TonicError(tonic::Status), + #[error("Invalid AUTH message")] + AuthFailure, + #[error("I/O Error")] + IoError(std::io::Error), + #[error("Event builder error")] + EventError(nostr::event::builder::Error), + #[error("Nostr key error")] + NostrKeyError(nostr::key::Error), + #[error("Payment hash mismatch")] + PaymentHash, + #[error("Error parsing url")] + URLParseError(url::ParseError), + #[error("HTTP error")] + HTTPError(http::Error), + #[error("Unknown/Undocumented")] + UnknownError, +} + +//impl From> for Error { +// fn from(e: Box) -> Self { +// Error::CustomError("error".to_owned()) +// } +//} + +impl From for Error { + fn from(h: hex::FromHexError) -> Self { + Error::HexError(h) + } +} + +impl From for Error { + fn from(h: hyper::Error) -> Self { + Error::HyperError(h) + } +} + +impl From for Error { + fn from(d: r2d2::Error) -> Self { + Error::DatabasePoolError(d) + } +} + +impl From for Error { + /// Wrap SQL error + fn from(_j: tokio::task::JoinError) -> Self { + Error::JoinError + } +} + +impl From for Error { + /// Wrap SQL error + fn from(r: rusqlite::Error) -> Self { + Error::SqlError(r) + } +} + +impl From for Error { + fn from(d: sqlx::Error) -> Self { + Error::SqlxDatabasePoolError(d) + } +} + +impl From for Error { + /// Wrap JSON error + fn from(r: serde_json::Error) -> Self { + Error::JsonParseFailed(r) + } +} + +impl From for Error { + /// Wrap Websocket error + fn from(r: WsError) -> Self { + Error::WebsocketError(r) + } +} + +impl From for Error { + /// Wrap Config error + fn from(r: config::ConfigError) -> Self { + Error::ConfigError(r) + } +} + +impl From for Error { + /// Wrap Config error + fn from(r: tonic::Status) -> Self { + Error::TonicError(r) + } +} + +impl From for Error { + fn from(r: std::io::Error) -> Self { + Error::IoError(r) + } +} +impl From for Error { + /// Wrap event builder error + fn from(r: nostr::event::builder::Error) -> Self { + Error::EventError(r) + } +} + +impl From for Error { + /// Wrap nostr key error + fn from(r: nostr::key::Error) -> Self { + Error::NostrKeyError(r) + } +} + +impl From for Error { + /// Wrap nostr key error + fn from(r: url::ParseError) -> Self { + Error::URLParseError(r) + } +} + +impl From for Error { + /// Wrap nostr key error + fn from(r: http::Error) -> Self { + Error::HTTPError(r) + } +} \ No newline at end of file diff --git a/crates/rooch-ws-relay/src/server/event.rs b/crates/rooch-ws-relay/src/server/event.rs new file mode 100644 index 0000000000..233ea08803 --- /dev/null +++ b/crates/rooch-ws-relay/src/server/event.rs @@ -0,0 +1,794 @@ +//! Event parsing and validation +use super::delegation::validate_delegation; +use super::error::Error::{ + CommandUnknownError, EventCouldNotCanonicalize, EventInvalidId, EventInvalidSignature, + EventMalformedPubkey, +}; +use super::error::Result; +use super::event::EventWrapper::WrappedAuth; +use super::event::EventWrapper::WrappedEvent; +use super::nip05; +use super::utils::unix_time; +use bitcoin_hashes::{sha256, Hash}; +use lazy_static::lazy_static; +use secp256k1::{schnorr, Secp256k1, VerifyOnly, XOnlyPublicKey}; +use serde::{Deserialize, Deserializer, Serialize}; +use serde_json::value::Value; +use serde_json::Number; +use std::collections::HashMap; +use std::collections::HashSet; +use std::str::FromStr; +use tracing::{debug, info}; + +lazy_static! { + /// Secp256k1 verification instance. + pub static ref SECP: Secp256k1 = Secp256k1::verification_only(); +} + +/// Event command in network format. +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +pub struct EventCmd { + cmd: String, // expecting static "EVENT" + event: Event, +} + +impl EventCmd { + #[must_use] + pub fn event_id(&self) -> &str { + &self.event.id + } +} + +/// Parsed nostr event. +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +pub struct Event { + pub id: String, + pub pubkey: String, + #[serde(skip)] + pub delegated_by: Option, + pub created_at: u64, + pub kind: u64, + #[serde(deserialize_with = "tag_from_string")] + // NOTE: array-of-arrays may need to be more general than a string container + pub tags: Vec>, + pub content: String, + pub sig: String, + // Optimization for tag search, built on demand. + #[serde(skip)] + pub tagidx: Option>>, +} + +/// Simple tag type for array of array of strings. +type Tag = Vec>; + +/// Deserializer that ensures we always have a [`Tag`]. +fn tag_from_string<'de, D>(deserializer: D) -> Result +where + D: Deserializer<'de>, +{ + let opt = Option::deserialize(deserializer)?; + Ok(opt.unwrap_or_default()) +} + +/// Attempt to form a single-char tag name. +#[must_use] +pub fn single_char_tagname(tagname: &str) -> Option { + // We return the tag character if and only if the tagname consists + // of a single char. + let mut tagnamechars = tagname.chars(); + let firstchar = tagnamechars.next(); + match firstchar { + Some(_) => { + // check second char + if tagnamechars.next().is_none() { + firstchar + } else { + None + } + } + None => None, + } +} + +pub enum EventWrapper { + WrappedEvent(Event), + WrappedAuth(Event), +} + +/// Convert network event to parsed/validated event. +impl From for Result { + fn from(ec: EventCmd) -> Result { + // ensure command is correct + if ec.cmd == "EVENT" { + ec.event.validate().map(|_| { + let mut e = ec.event; + e.build_index(); + e.update_delegation(); + WrappedEvent(e) + }) + } else if ec.cmd == "AUTH" { + // we don't want to validate the event here, because NIP-42 can be disabled + // it will be validated later during the authentication process + Ok(WrappedAuth(ec.event)) + } else { + Err(CommandUnknownError) + } + } +} + +impl Event { + #[cfg(test)] + #[must_use] + pub fn simple_event() -> Event { + Event { + id: "0".to_owned(), + pubkey: "0".to_owned(), + delegated_by: None, + created_at: 0, + kind: 0, + tags: vec![], + content: "".to_owned(), + sig: "0".to_owned(), + tagidx: None, + } + } + + #[must_use] + pub fn is_kind_metadata(&self) -> bool { + self.kind == 0 + } + + /// Should this event be persisted? + #[must_use] + pub fn is_ephemeral(&self) -> bool { + self.kind >= 20000 && self.kind < 30000 + } + + /// Is this event currently expired? + pub fn is_expired(&self) -> bool { + if let Some(exp) = self.expiration() { + exp <= unix_time() + } else { + false + } + } + + /// Determine the time at which this event should expire + pub fn expiration(&self) -> Option { + let default = "".to_string(); + let dvals: Vec<&String> = self + .tags + .iter() + .filter(|x| !x.is_empty()) + .filter(|x| x.get(0).unwrap() == "expiration") + .map(|x| x.get(1).unwrap_or(&default)) + .take(1) + .collect(); + let val_first = dvals.get(0); + val_first.and_then(|t| t.parse::().ok()) + } + + /// Should this event be replaced with newer timestamps from same author? + #[must_use] + pub fn is_replaceable(&self) -> bool { + self.kind == 0 + || self.kind == 3 + || self.kind == 41 + || (self.kind >= 10000 && self.kind < 20000) + } + + /// Should this event be replaced with newer timestamps from same author, for distinct `d` tag values? + #[must_use] + pub fn is_param_replaceable(&self) -> bool { + self.kind >= 30000 && self.kind < 40000 + } + + /// Should this event be replaced with newer timestamps from same author, for distinct `d` tag values? + #[must_use] + pub fn distinct_param(&self) -> Option { + if self.is_param_replaceable() { + let default = "".to_string(); + let dvals: Vec<&String> = self + .tags + .iter() + .filter(|x| !x.is_empty()) + .filter(|x| x.get(0).unwrap() == "d") + .map(|x| x.get(1).unwrap_or(&default)) + .take(1) + .collect(); + let dval_first = dvals.get(0); + match dval_first { + Some(_) => dval_first.map(|x| x.to_string()), + None => Some(default), + } + } else { + None + } + } + + /// Pull a NIP-05 Name out of the event, if one exists + #[must_use] + pub fn get_nip05_addr(&self) -> Option { + if self.is_kind_metadata() { + // very quick check if we should attempt to parse this json + if self.content.contains("\"nip05\"") { + // Parse into JSON + let md_parsed: Value = serde_json::from_str(&self.content).ok()?; + let md_map = md_parsed.as_object()?; + let nip05_str = md_map.get("nip05")?.as_str()?; + return nip05::Nip05Name::try_from(nip05_str).ok(); + } + } + None + } + + // is this event delegated (properly)? + // does the signature match, and are conditions valid? + // if so, return an alternate author for the event + #[must_use] + pub fn delegated_author(&self) -> Option { + // is there a delegation tag? + let delegation_tag: Vec = self + .tags + .iter() + .filter(|x| x.len() == 4) + .filter(|x| x.get(0).unwrap() == "delegation") + .take(1) + .next()? + .clone(); // get first tag + + //let delegation_tag = self.tag_values_by_name("delegation"); + // delegation tags should have exactly 3 elements after the name (pubkey, condition, sig) + // the event is signed by the delagatee + let delegatee = &self.pubkey; + // the delegation tag references the claimed delagator + let delegator: &str = delegation_tag.get(1)?; + let querystr: &str = delegation_tag.get(2)?; + let sig: &str = delegation_tag.get(3)?; + + // attempt to get a condition query; this requires the delegation to have a valid signature. + if let Some(cond_query) = validate_delegation(delegator, delegatee, querystr, sig) { + // The signature was valid, now we ensure the delegation + // condition is valid for this event: + if cond_query.allows_event(self) { + // since this is allowed, we will provide the delegatee + Some(delegator.into()) + } else { + debug!("an event failed to satisfy delegation conditions"); + None + } + } else { + debug!("event had had invalid delegation signature"); + None + } + } + + /// Update delegation status + pub fn update_delegation(&mut self) { + self.delegated_by = self.delegated_author(); + } + /// Build an event tag index + pub fn build_index(&mut self) { + // if there are no tags; just leave the index as None + if self.tags.is_empty() { + return; + } + // otherwise, build an index + let mut idx: HashMap> = HashMap::new(); + // iterate over tags that have at least 2 elements + for t in self.tags.iter().filter(|x| x.len() > 1) { + let tagname = t.get(0).unwrap(); + let tagnamechar_opt = single_char_tagname(tagname); + if tagnamechar_opt.is_none() { + continue; + } + let tagnamechar = tagnamechar_opt.unwrap(); + let tagval = t.get(1).unwrap(); + // ensure a vector exists for this tag + idx.entry(tagnamechar).or_insert_with(HashSet::new); + // get the tag vec and insert entry + let idx_tag_vec = idx.get_mut(&tagnamechar).expect("could not get tag vector"); + idx_tag_vec.insert(tagval.clone()); + } + // save the tag structure + self.tagidx = Some(idx); + } + + /// Create a short event identifier, suitable for logging. + #[must_use] + pub fn get_event_id_prefix(&self) -> String { + self.id.chars().take(8).collect() + } + #[must_use] + pub fn get_author_prefix(&self) -> String { + self.pubkey.chars().take(8).collect() + } + + /// Retrieve tag initial values across all tags matching the name + #[must_use] + pub fn tag_values_by_name(&self, tag_name: &str) -> Vec { + self.tags + .iter() + .filter(|x| x.len() > 1) + .filter(|x| x.get(0).unwrap() == tag_name) + .map(|x| x.get(1).unwrap().clone()) + .collect() + } + + #[must_use] + pub fn is_valid_timestamp(&self, reject_future_seconds: Option) -> bool { + if let Some(allowable_future) = reject_future_seconds { + let curr_time = unix_time(); + // calculate difference, plus how far future we allow + if curr_time + (allowable_future as u64) < self.created_at { + let delta = self.created_at - curr_time; + debug!( + "event is too far in the future ({} seconds), rejecting", + delta + ); + return false; + } + } + true + } + + /// Check if this event has a valid signature. + pub fn validate(&self) -> Result<()> { + // TODO: return a Result with a reason for invalid events + // validation is performed by: + // * parsing JSON string into event fields + // * create an array: + // ** [0, pubkey-hex-string, created-at-num, kind-num, tags-array-of-arrays, content-string] + // * serialize with no spaces/newlines + let c_opt = self.to_canonical(); + if c_opt.is_none() { + debug!("could not canonicalize"); + return Err(EventCouldNotCanonicalize); + } + let c = c_opt.unwrap(); + // * compute the sha256sum. + let digest: sha256::Hash = sha256::Hash::hash(c.as_bytes()); + let hex_digest = format!("{digest:x}"); + // * ensure the id matches the computed sha256sum. + if self.id != hex_digest { + debug!("event id does not match digest"); + return Err(EventInvalidId); + } + // * validate the message digest (sig) using the pubkey & computed sha256 message hash. + let sig = schnorr::Signature::from_str(&self.sig).unwrap(); + if let Ok(msg) = secp256k1::Message::from_slice(digest.as_ref()) { + if let Ok(pubkey) = XOnlyPublicKey::from_str(&self.pubkey) { + SECP.verify_schnorr(&sig, &msg, &pubkey) + .map_err(|_| EventInvalidSignature) + } else { + debug!("client sent malformed pubkey"); + Err(EventMalformedPubkey) + } + } else { + info!("error converting digest to secp256k1 message"); + Err(EventInvalidSignature) + } + } + + /// Convert event to canonical representation for signing. + pub fn to_canonical(&self) -> Option { + // create a JsonValue for each event element + let mut c: Vec = vec![]; + // id must be set to 0 + let id = Number::from(0_u64); + c.push(serde_json::Value::Number(id)); + // public key + c.push(Value::String(self.pubkey.clone())); + // creation time + let created_at = Number::from(self.created_at); + c.push(serde_json::Value::Number(created_at)); + // kind + let kind = Number::from(self.kind); + c.push(serde_json::Value::Number(kind)); + // tags + c.push(self.tags_to_canonical()); + // content + c.push(Value::String(self.content.clone())); + serde_json::to_string(&Value::Array(c)).ok() + } + + /// Convert tags to a canonical form for signing. + fn tags_to_canonical(&self) -> Value { + let mut tags = Vec::::new(); + // iterate over self tags, + for t in &self.tags { + // each tag is a vec of strings + let mut a = Vec::::new(); + for v in t.iter() { + a.push(serde_json::Value::String(v.clone())); + } + tags.push(serde_json::Value::Array(a)); + } + serde_json::Value::Array(tags) + } + + /// Determine if the given tag and value set intersect with tags in this event. + #[must_use] + pub fn generic_tag_val_intersect(&self, tagname: char, check: &HashSet) -> bool { + match &self.tagidx { + // check if this is indexable tagname + Some(idx) => match idx.get(&tagname) { + Some(valset) => { + let common = valset.intersection(check); + common.count() > 0 + } + None => false, + }, + None => false, + } + } +} + +impl From for Event { + fn from(nostr_event: nostr::Event) -> Self { + Event { + id: nostr_event.id.to_hex(), + pubkey: nostr_event.pubkey.to_string(), + created_at: nostr_event.created_at.as_u64(), + kind: nostr_event.kind.as_u64(), + tags: nostr_event.tags.iter().map(|x| x.as_vec()).collect(), + content: nostr_event.content, + sig: nostr_event.sig.to_string(), + delegated_by: None, + tagidx: None, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn event_creation() { + // create an event + let event = Event::simple_event(); + assert_eq!(event.id, "0"); + } + + #[test] + fn event_serialize() -> Result<()> { + // serialize an event to JSON string + let event = Event::simple_event(); + let j = serde_json::to_string(&event)?; + assert_eq!(j, "{\"id\":\"0\",\"pubkey\":\"0\",\"created_at\":0,\"kind\":0,\"tags\":[],\"content\":\"\",\"sig\":\"0\"}"); + Ok(()) + } + + #[test] + fn empty_event_tag_match() { + let event = Event::simple_event(); + assert!(!event + .generic_tag_val_intersect('e', &HashSet::from(["foo".to_owned(), "bar".to_owned()]))); + } + + #[test] + fn single_event_tag_match() { + let mut event = Event::simple_event(); + event.tags = vec![vec!["e".to_owned(), "foo".to_owned()]]; + event.build_index(); + assert!( + event.generic_tag_val_intersect( + 'e', + &HashSet::from(["foo".to_owned(), "bar".to_owned()]) + ) + ); + } + + #[test] + fn event_tags_serialize() -> Result<()> { + // serialize an event with tags to JSON string + let mut event = Event::simple_event(); + event.tags = vec![ + vec![ + "e".to_owned(), + "xxxx".to_owned(), + "wss://example.com".to_owned(), + ], + vec![ + "p".to_owned(), + "yyyyy".to_owned(), + "wss://example.com:3033".to_owned(), + ], + ]; + let j = serde_json::to_string(&event)?; + assert_eq!(j, "{\"id\":\"0\",\"pubkey\":\"0\",\"created_at\":0,\"kind\":0,\"tags\":[[\"e\",\"xxxx\",\"wss://example.com\"],[\"p\",\"yyyyy\",\"wss://example.com:3033\"]],\"content\":\"\",\"sig\":\"0\"}"); + Ok(()) + } + + #[test] + fn event_deserialize() -> Result<()> { + let raw_json = r#"{"id":"1384757da583e6129ce831c3d7afc775a33a090578f888dd0d010328ad047d0c","pubkey":"bbbd9711d357df4f4e498841fd796535c95c8e751fa35355008a911c41265fca","created_at":1612650459,"kind":1,"tags":null,"content":"hello world","sig":"59d0cc47ab566e81f72fe5f430bcfb9b3c688cb0093d1e6daa49201c00d28ecc3651468b7938642869ed98c0f1b262998e49a05a6ed056c0d92b193f4e93bc21"}"#; + let e: Event = serde_json::from_str(raw_json)?; + assert_eq!(e.kind, 1); + assert_eq!(e.tags.len(), 0); + Ok(()) + } + + #[test] + fn event_canonical() { + let e = Event { + id: "999".to_owned(), + pubkey: "012345".to_owned(), + delegated_by: None, + created_at: 501_234, + kind: 1, + tags: vec![], + content: "this is a test".to_owned(), + sig: "abcde".to_owned(), + tagidx: None, + }; + let c = e.to_canonical(); + let expected = Some(r#"[0,"012345",501234,1,[],"this is a test"]"#.to_owned()); + assert_eq!(c, expected); + } + + #[test] + fn event_tag_select() { + let e = Event { + id: "999".to_owned(), + pubkey: "012345".to_owned(), + delegated_by: None, + created_at: 501_234, + kind: 1, + tags: vec![ + vec!["j".to_owned(), "abc".to_owned()], + vec!["e".to_owned(), "foo".to_owned()], + vec!["e".to_owned(), "bar".to_owned()], + vec!["e".to_owned(), "baz".to_owned()], + vec![ + "p".to_owned(), + "aaaa".to_owned(), + "ws://example.com".to_owned(), + ], + ], + content: "this is a test".to_owned(), + sig: "abcde".to_owned(), + tagidx: None, + }; + let v = e.tag_values_by_name("e"); + assert_eq!(v, vec!["foo", "bar", "baz"]); + } + + #[test] + fn event_no_tag_select() { + let e = Event { + id: "999".to_owned(), + pubkey: "012345".to_owned(), + delegated_by: None, + created_at: 501_234, + kind: 1, + tags: vec![ + vec!["j".to_owned(), "abc".to_owned()], + vec!["e".to_owned(), "foo".to_owned()], + vec!["e".to_owned(), "baz".to_owned()], + vec![ + "p".to_owned(), + "aaaa".to_owned(), + "ws://example.com".to_owned(), + ], + ], + content: "this is a test".to_owned(), + sig: "abcde".to_owned(), + tagidx: None, + }; + let v = e.tag_values_by_name("x"); + // asking for tags that don't exist just returns zero-length vector + assert_eq!(v.len(), 0); + } + + #[test] + fn event_canonical_with_tags() { + let e = Event { + id: "999".to_owned(), + pubkey: "012345".to_owned(), + delegated_by: None, + created_at: 501_234, + kind: 1, + tags: vec![ + vec!["#e".to_owned(), "aoeu".to_owned()], + vec![ + "#p".to_owned(), + "aaaa".to_owned(), + "ws://example.com".to_owned(), + ], + ], + content: "this is a test".to_owned(), + sig: "abcde".to_owned(), + tagidx: None, + }; + let c = e.to_canonical(); + let expected_json = r###"[0,"012345",501234,1,[["#e","aoeu"],["#p","aaaa","ws://example.com"]],"this is a test"]"###; + let expected = Some(expected_json.to_owned()); + assert_eq!(c, expected); + } + + #[test] + fn ephemeral_event() { + let mut event = Event::simple_event(); + event.kind = 20000; + assert!(event.is_ephemeral()); + event.kind = 29999; + assert!(event.is_ephemeral()); + event.kind = 30000; + assert!(!event.is_ephemeral()); + event.kind = 19999; + assert!(!event.is_ephemeral()); + } + + #[test] + fn replaceable_event() { + let mut event = Event::simple_event(); + event.kind = 0; + assert!(event.is_replaceable()); + event.kind = 3; + assert!(event.is_replaceable()); + event.kind = 10000; + assert!(event.is_replaceable()); + event.kind = 19999; + assert!(event.is_replaceable()); + event.kind = 20000; + assert!(!event.is_replaceable()); + } + + #[test] + fn param_replaceable_event() { + let mut event = Event::simple_event(); + event.kind = 30000; + assert!(event.is_param_replaceable()); + event.kind = 39999; + assert!(event.is_param_replaceable()); + event.kind = 29999; + assert!(!event.is_param_replaceable()); + event.kind = 40000; + assert!(!event.is_param_replaceable()); + } + + #[test] + fn param_replaceable_value_case_1() { + // NIP case #1: "tags":[["d",""]] + let mut event = Event::simple_event(); + event.kind = 30000; + event.tags = vec![vec!["d".to_owned(), "".to_owned()]]; + assert_eq!(event.distinct_param(), Some("".to_string())); + } + + #[test] + fn param_replaceable_value_case_2() { + // NIP case #2: "tags":[]: implicit d tag with empty value + let mut event = Event::simple_event(); + event.kind = 30000; + assert_eq!(event.distinct_param(), Some("".to_string())); + } + + #[test] + fn param_replaceable_value_case_3() { + // NIP case #3: "tags":[["d"]]: implicit empty value "" + let mut event = Event::simple_event(); + event.kind = 30000; + event.tags = vec![vec!["d".to_owned()]]; + assert_eq!(event.distinct_param(), Some("".to_string())); + } + + #[test] + fn param_replaceable_value_case_4() { + // NIP case #4: "tags":[["d",""],["d","not empty"]]: only first d tag is considered + let mut event = Event::simple_event(); + event.kind = 30000; + event.tags = vec![ + vec!["d".to_owned(), "".to_string()], + vec!["d".to_owned(), "not empty".to_string()], + ]; + assert_eq!(event.distinct_param(), Some("".to_string())); + } + + #[test] + fn param_replaceable_value_case_4b() { + // Variation of #4 with + // NIP case #4: "tags":[["d","not empty"],["d",""]]: only first d tag is considered + let mut event = Event::simple_event(); + event.kind = 30000; + event.tags = vec![ + vec!["d".to_owned(), "not empty".to_string()], + vec!["d".to_owned(), "".to_string()], + ]; + assert_eq!(event.distinct_param(), Some("not empty".to_string())); + } + + #[test] + fn param_replaceable_value_case_5() { + // NIP case #5: "tags":[["d"],["d","some value"]]: only first d tag is considered + let mut event = Event::simple_event(); + event.kind = 30000; + event.tags = vec![ + vec!["d".to_owned()], + vec!["d".to_owned(), "second value".to_string()], + vec!["d".to_owned(), "third value".to_string()], + ]; + assert_eq!(event.distinct_param(), Some("".to_string())); + } + + #[test] + fn param_replaceable_value_case_6() { + // NIP case #6: "tags":[["e"]]: same as no tags + let mut event = Event::simple_event(); + event.kind = 30000; + event.tags = vec![vec!["e".to_owned()]]; + assert_eq!(event.distinct_param(), Some("".to_string())); + } + + #[test] + fn expiring_event_none() { + // regular events do not expire + let mut event = Event::simple_event(); + event.kind = 7; + event.tags = vec![vec!["test".to_string(), "foo".to_string()]]; + assert_eq!(event.expiration(), None); + } + + #[test] + fn expiring_event_empty() { + // regular events do not expire + let mut event = Event::simple_event(); + event.kind = 7; + event.tags = vec![vec!["expiration".to_string()]]; + assert_eq!(event.expiration(), None); + } + + #[test] + fn expiring_event_future() { + // a normal expiring event + let exp: u64 = 1676264138; + let mut event = Event::simple_event(); + event.kind = 1; + event.tags = vec![vec!["expiration".to_string(), exp.to_string()]]; + assert_eq!(event.expiration(), Some(exp)); + } + + #[test] + fn expiring_event_negative() { + // expiration set to a negative value (invalid) + let exp: i64 = -90; + let mut event = Event::simple_event(); + event.kind = 1; + event.tags = vec![vec!["expiration".to_string(), exp.to_string()]]; + assert_eq!(event.expiration(), None); + } + + #[test] + fn expiring_event_zero() { + // a normal expiring event set to zero + let exp: i64 = 0; + let mut event = Event::simple_event(); + event.kind = 1; + event.tags = vec![vec!["expiration".to_string(), exp.to_string()]]; + assert_eq!(event.expiration(), Some(0)); + } + + #[test] + fn expiring_event_fraction() { + // expiration is fractional (invalid) + let exp: f64 = 23.334; + let mut event = Event::simple_event(); + event.kind = 1; + event.tags = vec![vec!["expiration".to_string(), exp.to_string()]]; + assert_eq!(event.expiration(), None); + } + + #[test] + fn expiring_event_multiple() { + // multiple values, we just take the first + let mut event = Event::simple_event(); + event.kind = 1; + event.tags = vec![ + vec!["expiration".to_string(), (10).to_string()], + vec!["expiration".to_string(), (20).to_string()], + ]; + assert_eq!(event.expiration(), Some(10)); + } +} \ No newline at end of file diff --git a/crates/rooch-ws-relay/src/server/hexrange.rs b/crates/rooch-ws-relay/src/server/hexrange.rs new file mode 100644 index 0000000000..521353dd97 --- /dev/null +++ b/crates/rooch-ws-relay/src/server/hexrange.rs @@ -0,0 +1,159 @@ +//! Utilities for searching hexadecimal +use super::utils::is_hex; +use hex; + +/// Types of hexadecimal queries. +#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone)] +pub enum HexSearch { + // when no range is needed, exact 32-byte + Exact(Vec), + // lower (inclusive) and upper range (exclusive) + Range(Vec, Vec), + // lower bound only, upper bound is MAX inclusive + LowerOnly(Vec), +} + +/// Check if a string contains only f chars +fn is_all_fs(s: &str) -> bool { + s.chars().all(|x| x == 'f' || x == 'F') +} + +/// Find the next hex sequence greater than the argument. +#[must_use] +pub fn hex_range(s: &str) -> Option { + let mut hash_base = s.to_owned(); + if !is_hex(&hash_base) || hash_base.len() > 64 { + return None; + } + if hash_base.len() == 64 { + return Some(HexSearch::Exact(hex::decode(&hash_base).ok()?)); + } + // if s is odd, add a zero + let mut odd = hash_base.len() % 2 != 0; + if odd { + // extend the string to make it even + hash_base.push('0'); + } + let base = hex::decode(hash_base).ok()?; + // check for all ff's + if is_all_fs(s) { + // there is no higher bound, we only want to search for blobs greater than this. + return Some(HexSearch::LowerOnly(base)); + } + + // return a range + let mut upper = base.clone(); + let mut byte_len = upper.len(); + + // for odd strings, we made them longer, but we want to increment the upper char (+16). + // we know we can do this without overflowing because we explicitly set the bottom half to 0's. + while byte_len > 0 { + byte_len -= 1; + // check if byte can be incremented, or if we need to carry. + let b = upper[byte_len]; + if b == u8::MAX { + // reset and carry + upper[byte_len] = 0; + } else if odd { + // check if first char in this byte is NOT 'f' + if b < 240 { + // bump up the first character in this byte + upper[byte_len] = b + 16; + // increment done, stop iterating through the vec + break; + } + // if it is 'f', reset the byte to 0 and do a carry + // reset and carry + upper[byte_len] = 0; + // done with odd logic, so don't repeat this + odd = false; + } else { + // bump up the first character in this byte + upper[byte_len] = b + 1; + // increment done, stop iterating + break; + } + } + Some(HexSearch::Range(base, upper)) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::server::error::Result; + + #[test] + fn hex_range_exact() -> Result<()> { + let hex = "abcdef00abcdef00abcdef00abcdef00abcdef00abcdef00abcdef00abcdef00"; + let r = hex_range(hex); + assert_eq!( + r, + Some(HexSearch::Exact(hex::decode(hex).expect("invalid hex"))) + ); + Ok(()) + } + #[test] + fn hex_full_range() -> Result<()> { + let hex = "aaaa"; + let hex_upper = "aaab"; + let r = hex_range(hex); + assert_eq!( + r, + Some(HexSearch::Range( + hex::decode(hex).expect("invalid hex"), + hex::decode(hex_upper).expect("invalid hex") + )) + ); + Ok(()) + } + + #[test] + fn hex_full_range_odd() -> Result<()> { + let r = hex_range("abc"); + assert_eq!( + r, + Some(HexSearch::Range( + hex::decode("abc0").expect("invalid hex"), + hex::decode("abd0").expect("invalid hex") + )) + ); + Ok(()) + } + + #[test] + fn hex_full_range_odd_end_f() -> Result<()> { + let r = hex_range("abf"); + assert_eq!( + r, + Some(HexSearch::Range( + hex::decode("abf0").expect("invalid hex"), + hex::decode("ac00").expect("invalid hex") + )) + ); + Ok(()) + } + + #[test] + fn hex_no_upper() -> Result<()> { + let r = hex_range("ffff"); + assert_eq!( + r, + Some(HexSearch::LowerOnly( + hex::decode("ffff").expect("invalid hex") + )) + ); + Ok(()) + } + + #[test] + fn hex_no_upper_odd() -> Result<()> { + let r = hex_range("fff"); + assert_eq!( + r, + Some(HexSearch::LowerOnly( + hex::decode("fff0").expect("invalid hex") + )) + ); + Ok(()) + } +} \ No newline at end of file diff --git a/crates/rooch-ws-relay/src/server/info.rs b/crates/rooch-ws-relay/src/server/info.rs new file mode 100644 index 0000000000..2f2a42c2c2 --- /dev/null +++ b/crates/rooch-ws-relay/src/server/info.rs @@ -0,0 +1,129 @@ +//! Relay metadata using NIP-11 +/// Relay Info +use crate::server::config::Settings; +use serde::{Deserialize, Serialize}; + +pub const CARGO_PKG_VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION"); +pub const UNIT: &str = "sats"; + +/// Limitations of the relay as specified in NIP-111 +/// (This nip isn't finalized so may change) +#[derive(Debug, Serialize, Deserialize)] +#[allow(unused)] +pub struct Limitation { + #[serde(skip_serializing_if = "Option::is_none")] + payment_required: Option, +} + +#[derive(Serialize, Deserialize, Debug)] +#[allow(unused)] +pub struct Fees { + #[serde(skip_serializing_if = "Option::is_none")] + admission: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + publication: Option>, +} + +#[derive(Serialize, Deserialize, Debug)] +#[allow(unused)] +pub struct Fee { + amount: u64, + unit: String, +} + +#[derive(Debug, Serialize, Deserialize)] +#[allow(unused)] +pub struct RelayInfo { + #[serde(skip_serializing_if = "Option::is_none")] + pub id: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub description: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub pubkey: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub contact: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub supported_nips: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + pub software: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub version: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub limitation: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub payment_url: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub fees: Option, +} + +/// Convert an Info configuration into public Relay Info +impl From for RelayInfo { + fn from(c: Settings) -> Self { + let mut supported_nips = vec![1, 2, 9, 11, 12, 15, 16, 20, 22, 33, 40, 42]; + + if c.authorization.nip42_auth { + supported_nips.push(42); + supported_nips.sort(); + } + + let i = c.info; + let p = c.pay_to_relay; + + let limitations = Limitation { + payment_required: Some(p.enabled), + }; + + let (payment_url, fees) = if p.enabled { + let admission_fee = if p.admission_cost > 0 { + Some(vec![Fee { + amount: p.admission_cost, + unit: UNIT.to_string(), + }]) + } else { + None + }; + + let post_fee = if p.cost_per_event > 0 { + Some(vec![Fee { + amount: p.cost_per_event, + unit: UNIT.to_string(), + }]) + } else { + None + }; + + let fees = Fees { + admission: admission_fee, + publication: post_fee, + }; + + let payment_url = if p.enabled && i.relay_url.is_some() { + Some(format!( + "{}join", + i.relay_url.clone().unwrap().replace("ws", "http") + )) + } else { + None + }; + (payment_url, Some(fees)) + } else { + (None, None) + }; + + RelayInfo { + id: i.relay_url, + name: i.name, + description: i.description, + pubkey: i.pubkey, + contact: i.contact, + supported_nips: Some(supported_nips), + software: Some("https://git.sr.ht/~gheartsfield/nostr-rs-relay".to_owned()), + version: CARGO_PKG_VERSION.map(std::borrow::ToOwned::to_owned), + limitation: Some(limitations), + payment_url, + fees, + } + } +} \ No newline at end of file diff --git a/crates/rooch-ws-relay/src/server/mod.rs b/crates/rooch-ws-relay/src/server/mod.rs new file mode 100644 index 0000000000..cd49e6df01 --- /dev/null +++ b/crates/rooch-ws-relay/src/server/mod.rs @@ -0,0 +1,17 @@ +// Copyright (c) RoochNetwork +// SPDX-License-Identifier: Apache-2.0 + +pub mod close; +pub mod config; +pub mod conn; +pub mod delegation; +pub mod error; +pub mod event; +pub mod info; +pub mod nip05; +pub mod notice; +pub mod subscription; +pub mod utils; +pub mod db; +// pub mod nauthz; +pub mod hexrange; diff --git a/crates/rooch-ws-relay/src/server/nip05.rs b/crates/rooch-ws-relay/src/server/nip05.rs new file mode 100644 index 0000000000..6d15be1afa --- /dev/null +++ b/crates/rooch-ws-relay/src/server/nip05.rs @@ -0,0 +1,607 @@ +//! User verification using NIP-05 names +//! +//! NIP-05 defines a mechanism for authors to associate an internet +//! address with their public key, in metadata events. This module +//! consumes a stream of metadata events, and keeps a database table +//! updated with the current NIP-05 verification status. +use crate::repo::nostr::NostrRepo; +use crate::server::config::VerifiedUsers; +use crate::server::error::{Error, Result}; +use crate::server::event::Event; +use hyper::body::HttpBody; +use hyper::client::connect::HttpConnector; +use hyper::Client; +use hyper_tls::HttpsConnector; +use std::sync::Arc; +use std::time::Duration; +use std::time::Instant; +use std::time::SystemTime; +use tokio::time::Interval; +use tracing::{debug, info, warn}; + +/// NIP-05 verifier state +pub struct Verifier { + /// Repository for saving/retrieving events and records + repo: Arc, + /// Metadata events for us to inspect + metadata_rx: tokio::sync::broadcast::Receiver, + /// Newly validated events get written and then broadcast on this channel to subscribers + event_tx: tokio::sync::broadcast::Sender, + /// Settings + settings: crate::server::config::Settings, + /// HTTP client + client: hyper::Client, hyper::Body>, + /// After all accounts are updated, wait this long before checking again. + wait_after_finish: Duration, + /// Minimum amount of time between HTTP queries + http_wait_duration: Duration, + /// Interval for updating verification records + reverify_interval: Interval, +} + +/// A NIP-05 identifier is a local part and domain. +#[derive(PartialEq, Eq, Debug, Clone)] +pub struct Nip05Name { + pub local: String, + pub domain: String, +} + +impl Nip05Name { + /// Does this name represent the entire domain? + #[must_use] + pub fn is_domain_only(&self) -> bool { + self.local == "_" + } + + /// Determine the URL to query for verification + fn to_url(&self) -> Option { + format!( + "https://{}/.well-known/nostr.json?name={}", + self.domain, self.local + ) + .parse::() + .ok() + } +} + +// Parsing Nip05Names from strings +impl std::convert::TryFrom<&str> for Nip05Name { + type Error = Error; + fn try_from(inet: &str) -> Result { + // break full name at the @ boundary. + let components: Vec<&str> = inet.split('@').collect(); + if components.len() == 2 { + // check if local name is valid + let local = components[0]; + let domain = components[1]; + if local + .chars() + .all(|x| x.is_alphanumeric() || x == '_' || x == '-' || x == '.') + { + if domain + .chars() + .all(|x| x.is_alphanumeric() || x == '-' || x == '.') + { + Ok(Nip05Name { + local: local.to_owned(), + domain: domain.to_owned(), + }) + } else { + Err(Error::CustomError( + "invalid character in domain part".to_owned(), + )) + } + } else { + Err(Error::CustomError( + "invalid character in local part".to_owned(), + )) + } + } else { + Err(Error::CustomError("too many/few components".to_owned())) + } + } +} + +impl std::fmt::Display for Nip05Name { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}@{}", self.local, self.domain) + } +} + +/// Check if the specified username and address are present and match in this response body +fn body_contains_user(username: &str, address: &str, bytes: &hyper::body::Bytes) -> Result { + // convert the body into json + let body: serde_json::Value = serde_json::from_slice(bytes)?; + // ensure we have a names object. + let names_map = body + .as_object() + .and_then(|x| x.get("names")) + .and_then(serde_json::Value::as_object) + .ok_or_else(|| Error::CustomError("not a map".to_owned()))?; + // get the pubkey for the requested user + let check_name = names_map.get(username).and_then(serde_json::Value::as_str); + // ensure the address is a match + Ok(check_name.map_or(false, |x| x == address)) +} + +impl Verifier { + pub fn new( + repo: Arc, + metadata_rx: tokio::sync::broadcast::Receiver, + event_tx: tokio::sync::broadcast::Sender, + settings: crate::server::config::Settings, + ) -> Result { + info!("creating NIP-05 verifier"); + // setup hyper client + let https = HttpsConnector::new(); + let client = Client::builder().build::<_, hyper::Body>(https); + + // After all accounts have been re-verified, don't check again + // for this long. + let wait_after_finish = Duration::from_secs(60 * 10); + // when we have an active queue of accounts to validate, we + // will wait this duration between HTTP requests. + let http_wait_duration = Duration::from_secs(1); + // setup initial interval for re-verification. If we find + // there is no work to be done, it will be reset to a longer + // duration. + let reverify_interval = tokio::time::interval(http_wait_duration); + Ok(Verifier { + repo, + metadata_rx, + event_tx, + settings, + client, + wait_after_finish, + http_wait_duration, + reverify_interval, + }) + } + + /// Perform web verification against a NIP-05 name and address. + pub async fn get_web_verification( + &mut self, + nip: &Nip05Name, + pubkey: &str, + ) -> UserWebVerificationStatus { + self.get_web_verification_res(nip, pubkey) + .await + .unwrap_or(UserWebVerificationStatus::Unknown) + } + + /// Perform web verification against an `Event` (must be metadata). + pub async fn get_web_verification_from_event( + &mut self, + e: &Event, + ) -> UserWebVerificationStatus { + let nip_parse = e.get_nip05_addr(); + if let Some(nip) = nip_parse { + self.get_web_verification_res(&nip, &e.pubkey) + .await + .unwrap_or(UserWebVerificationStatus::Unknown) + } else { + UserWebVerificationStatus::Unknown + } + } + + /// Perform web verification, with a `Result` return. + async fn get_web_verification_res( + &mut self, + nip: &Nip05Name, + pubkey: &str, + ) -> Result { + // determine if this domain should be checked + if !is_domain_allowed( + &nip.domain, + &self.settings.verified_users.domain_whitelist, + &self.settings.verified_users.domain_blacklist, + ) { + return Ok(UserWebVerificationStatus::DomainNotAllowed); + } + let url = nip + .to_url() + .ok_or_else(|| Error::CustomError("invalid NIP-05 URL".to_owned()))?; + let req = hyper::Request::builder() + .method(hyper::Method::GET) + .uri(url) + .header("Accept", "application/json") + .header( + "User-Agent", + format!( + "nostr-rs-relay/{} NIP-05 Verifier", + crate::server::info::CARGO_PKG_VERSION.unwrap() + ), + ) + .body(hyper::Body::empty()) + .expect("request builder"); + + let response_fut = self.client.request(req); + + if let Ok(response_res) = tokio::time::timeout(Duration::from_secs(5), response_fut).await { + // limit size of verification document to 1MB. + const MAX_ALLOWED_RESPONSE_SIZE: u64 = 1024 * 1024; + let response = response_res?; + // determine content length from response + let response_content_length = match response.body().size_hint().upper() { + Some(v) => v, + None => MAX_ALLOWED_RESPONSE_SIZE + 1, // reject missing content length + }; + // TODO: test how hyper handles the client providing an inaccurate content-length. + if response_content_length <= MAX_ALLOWED_RESPONSE_SIZE { + let (parts, body) = response.into_parts(); + // TODO: consider redirects + if parts.status == http::StatusCode::OK { + // parse body, determine if the username / key / address is present + let body_bytes = hyper::body::to_bytes(body).await?; + let body_matches = body_contains_user(&nip.local, pubkey, &body_bytes)?; + if body_matches { + return Ok(UserWebVerificationStatus::Verified); + } + // successful response, parsed as a nip-05 + // document, but this name/pubkey was not + // present. + return Ok(UserWebVerificationStatus::Unverified); + } + } else { + info!( + "content length missing or exceeded limits for account: {:?}", + nip.to_string() + ); + } + } else { + info!("timeout verifying account {:?}", nip); + return Ok(UserWebVerificationStatus::Unknown); + } + Ok(UserWebVerificationStatus::Unknown) + } + + /// Perform NIP-05 verifier tasks. + pub async fn run(&mut self) { + // use this to schedule periodic re-validation tasks + // run a loop, restarting on failure + loop { + let res = self.run_internal().await; + match res { + Err(Error::ChannelClosed) => { + // channel was closed, we are shutting down + return; + } + Err(e) => { + info!("error in verifier: {:?}", e); + } + _ => {} + } + } + } + + /// Internal select loop for performing verification + async fn run_internal(&mut self) -> Result<()> { + tokio::select! { + m = self.metadata_rx.recv() => { + match m { + Ok(e) => { + if let Some(naddr) = e.get_nip05_addr() { + info!("got metadata event for ({:?},{:?})", naddr.to_string() ,e.get_author_prefix()); + // Process a new author, checking if they are verified: + let check_verified = self.repo.get_latest_user_verification(&e.pubkey).await; + // ensure the event we got is more recent than the one we have, otherwise we can ignore it. + if let Ok(last_check) = check_verified { + if e.created_at <= last_check.event_created { + // this metadata is from the same author as an existing verification. + // it is older than what we have, so we can ignore it. + debug!("received older metadata event for author {:?}", e.get_author_prefix()); + return Ok(()); + } + } + // old, or no existing record for this user. In either case, we just create a new one. + let start = Instant::now(); + let v = self.get_web_verification_from_event(&e).await; + info!( + "checked name {:?}, result: {:?}, in: {:?}", + naddr.to_string(), + v, + start.elapsed() + ); + // sleep to limit how frequently we make HTTP requests for new metadata events. This should limit us to 4 req/sec. + tokio::time::sleep(Duration::from_millis(250)).await; + // if this user was verified, we need to write the + // record, persist the event, and broadcast. + if let UserWebVerificationStatus::Verified = v { + self.create_new_verified_user(&naddr.to_string(), &e).await?; + } + } + }, + Err(tokio::sync::broadcast::error::RecvError::Lagged(c)) => { + warn!("incoming metadata events overwhelmed buffer, {} events dropped",c); + } + Err(tokio::sync::broadcast::error::RecvError::Closed) => { + info!("metadata broadcast channel closed"); + return Err(Error::ChannelClosed); + } + } + }, + _ = self.reverify_interval.tick() => { + // check and see if there is an old account that needs + // to be reverified + self.do_reverify().await?; + }, + } + Ok(()) + } + + /// Reverify the oldest user verification record. + async fn do_reverify(&mut self) -> Result<()> { + let reverify_setting = self + .settings + .verified_users + .verify_update_frequency_duration; + let max_failures = self.settings.verified_users.max_consecutive_failures; + // get from settings, but default to 6hrs between re-checking an account + let reverify_dur = reverify_setting.unwrap_or_else(|| Duration::from_secs(60 * 60 * 6)); + // find all verification records that have success or failure OLDER than the reverify_dur. + let now = SystemTime::now(); + let earliest = now - reverify_dur; + let earliest_epoch = earliest + .duration_since(SystemTime::UNIX_EPOCH) + .map(|x| x.as_secs()) + .unwrap_or(0); + let vr = self.repo.get_oldest_user_verification(earliest_epoch).await; + match vr { + Ok(ref v) => { + let new_status = self.get_web_verification(&v.name, &v.address).await; + match new_status { + UserWebVerificationStatus::Verified => { + // freshly verified account, update the + // timestamp. + self.repo.update_verification_timestamp(v.rowid).await?; + info!("verification updated for {}", v.to_string()); + } + UserWebVerificationStatus::DomainNotAllowed + | UserWebVerificationStatus::Unknown => { + // server may be offline, or temporarily + // blocked by the config file. Note the + // failure so we can process something + // else. + + // have we had enough failures to give up? + if v.failure_count >= max_failures as u64 { + info!( + "giving up on verifying {:?} after {} failures", + v.name, v.failure_count + ); + self.repo.delete_verification(v.rowid).await?; + } else { + // record normal failure, incrementing failure count + info!("verification failed for {}", v.to_string()); + self.repo.fail_verification(v.rowid).await?; + } + } + UserWebVerificationStatus::Unverified => { + // domain has removed the verification, drop + // the record on our side. + info!("verification rescinded for {}", v.to_string()); + self.repo.delete_verification(v.rowid).await?; + } + } + } + Err( + Error::SqlError(rusqlite::Error::QueryReturnedNoRows) + | Error::SqlxError(sqlx::Error::RowNotFound), + ) => { + // No users need verification. Reset the interval to + // the next verification attempt. + let start = tokio::time::Instant::now() + self.wait_after_finish; + self.reverify_interval = tokio::time::interval_at(start, self.http_wait_duration); + } + Err(ref e) => { + warn!( + "Error when checking for NIP-05 verification records: {:?}", + e + ); + } + } + Ok(()) + } + + /// Persist an event, create a verification record, and broadcast. + // TODO: have more event-writing logic handled in the db module. + // Right now, these events avoid the rate limit. That is + // acceptable since as soon as the user is registered, this path + // is no longer used. + // TODO: refactor these into spawn_blocking + // calls to get them off the async executors. + async fn create_new_verified_user(&mut self, name: &str, event: &Event) -> Result<()> { + let start = Instant::now(); + // we should only do this if we are enabled. if we are + // disabled/passive, the event has already been persisted. + let should_write_event = self.settings.verified_users.is_enabled(); + if should_write_event { + match self.repo.write_event(event).await { + Ok(updated) => { + if updated != 0 { + info!( + "persisted event (new verified pubkey): {:?} in {:?}", + event.get_event_id_prefix(), + start.elapsed() + ); + self.event_tx.send(event.clone()).ok(); + } + } + Err(err) => { + warn!("event insert failed: {:?}", err); + if let Error::SqlError(r) = err { + warn!("because: : {:?}", r); + } + } + } + } + // write the verification record + self.repo + .create_verification_record(&event.id, name) + .await?; + Ok(()) + } +} + +/// Result of checking user's verification status against DNS/HTTP. +#[derive(PartialEq, Eq, Debug, Clone)] +pub enum UserWebVerificationStatus { + Verified, // user is verified, as of now. + DomainNotAllowed, // domain blacklist or whitelist denied us from attempting a verification + Unknown, // user's status could not be determined (timeout, server error) + Unverified, // user's status is not verified (successful check, name / addr do not match) +} + +/// A NIP-05 verification record. +#[derive(PartialEq, Eq, Debug, Clone)] +// Basic information for a verification event. Gives us all we need to assert a NIP-05 address is good. +pub struct VerificationRecord { + pub rowid: u64, // database row for this verification event + pub name: Nip05Name, // address being verified + pub address: String, // pubkey + pub event: String, // event ID hash providing the verification + pub event_created: u64, // when the metadata event was published + pub last_success: Option, // the most recent time a verification was provided. None if verification under this name has never succeeded. + pub last_failure: Option, // the most recent time verification was attempted, but could not be completed. + pub failure_count: u64, // how many consecutive failures have been observed. +} + +/// Check with settings to determine if a given domain is allowed to +/// publish. +#[must_use] +pub fn is_domain_allowed( + domain: &str, + whitelist: &Option>, + blacklist: &Option>, +) -> bool { + // if there is a whitelist, domain must be present in it. + if let Some(wl) = whitelist { + // workaround for Vec contains not accepting &str + return wl.iter().any(|x| x == domain); + } + // otherwise, check that user is not in the blacklist + if let Some(bl) = blacklist { + return !bl.iter().any(|x| x == domain); + } + true +} + +impl VerificationRecord { + /// Check if the record is recent enough to be considered valid, + /// and the domain is allowed. + #[must_use] + pub fn is_valid(&self, verified_users_settings: &VerifiedUsers) -> bool { + //let settings = SETTINGS.read().unwrap(); + // how long a verification record is good for + let nip05_expiration = &verified_users_settings.verify_expiration_duration; + if let Some(e) = nip05_expiration { + if !self.is_current(e) { + return false; + } + } + // check domains + is_domain_allowed( + &self.name.domain, + &verified_users_settings.domain_whitelist, + &verified_users_settings.domain_blacklist, + ) + } + + /// Check if this record has been validated since the given + /// duration. + fn is_current(&self, d: &Duration) -> bool { + match self.last_success { + Some(s) => { + // current time - duration + let now = SystemTime::now(); + let cutoff = now - *d; + let cutoff_epoch = cutoff + .duration_since(SystemTime::UNIX_EPOCH) + .map(|x| x.as_secs()) + .unwrap_or(0); + s > cutoff_epoch + } + None => false, + } + } +} + +impl std::fmt::Display for VerificationRecord { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "({:?},{:?})", + self.name.to_string(), + self.address.chars().take(8).collect::() + ) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn local_from_inet() { + let addr = "bob@example.com"; + let parsed = Nip05Name::try_from(addr); + assert!(parsed.is_ok()); + let v = parsed.unwrap(); + assert_eq!(v.local, "bob"); + assert_eq!(v.domain, "example.com"); + } + + #[test] + fn not_enough_sep() { + let addr = "bob_example.com"; + let parsed = Nip05Name::try_from(addr); + assert!(parsed.is_err()); + } + + #[test] + fn too_many_sep() { + let addr = "foo@bob@example.com"; + let parsed = Nip05Name::try_from(addr); + assert!(parsed.is_err()); + } + + #[test] + fn invalid_local_name() { + // non-permitted ascii chars + assert!(Nip05Name::try_from("foo!@example.com").is_err()); + assert!(Nip05Name::try_from("foo @example.com").is_err()); + assert!(Nip05Name::try_from(" foo@example.com").is_err()); + assert!(Nip05Name::try_from("f oo@example.com").is_err()); + assert!(Nip05Name::try_from("foo<@example.com").is_err()); + // unicode dash + assert!(Nip05Name::try_from("foo‐bar@example.com").is_err()); + // emoji + assert!(Nip05Name::try_from("foo😭bar@example.com").is_err()); + } + #[test] + fn invalid_domain_name() { + // non-permitted ascii chars + assert!(Nip05Name::try_from("foo@examp!e.com").is_err()); + assert!(Nip05Name::try_from("foo@ example.com").is_err()); + assert!(Nip05Name::try_from("foo@exa mple.com").is_err()); + assert!(Nip05Name::try_from("foo@example .com").is_err()); + assert!(Nip05Name::try_from("foo@exa bool { + match self { + Self::Duplicate | Self::Saved => true, + Self::Invalid | Self::Blocked | Self::RateLimited | Self::Error | Self::Restricted => false, + } + } + + #[must_use] + pub fn prefix(&self) -> &'static str { + match self { + Self::Saved => "saved", + Self::Duplicate => "duplicate", + Self::Invalid => "invalid", + Self::Blocked => "blocked", + Self::RateLimited => "rate-limited", + Self::Error => "error", + Self::Restricted => "restricted", + } + } +} + +impl Notice { + //pub fn err(err: error::Error, id: String) -> Notice { + // Notice::err_msg(format!("{}", err), id) + //} + + #[must_use] + pub fn message(msg: String) -> Notice { + Notice::Message(msg) + } + + fn prefixed(id: String, msg: &str, status: EventResultStatus) -> Notice { + let msg = format!("{}: {}", status.prefix(), msg); + Notice::EventResult(EventResult { id, msg, status }) + } + + #[must_use] + pub fn invalid(id: String, msg: &str) -> Notice { + Notice::prefixed(id, msg, EventResultStatus::Invalid) + } + + #[must_use] + pub fn blocked(id: String, msg: &str) -> Notice { + Notice::prefixed(id, msg, EventResultStatus::Blocked) + } + + #[must_use] + pub fn rate_limited(id: String, msg: &str) -> Notice { + Notice::prefixed(id, msg, EventResultStatus::RateLimited) + } + + #[must_use] + pub fn duplicate(id: String) -> Notice { + Notice::prefixed(id, "", EventResultStatus::Duplicate) + } + + #[must_use] + pub fn error(id: String, msg: &str) -> Notice { + Notice::prefixed(id, msg, EventResultStatus::Error) + } + + #[must_use] + pub fn restricted(id: String, msg: &str) -> Notice { + Notice::prefixed(id, msg, EventResultStatus::Restricted) + } + + #[must_use] + pub fn saved(id: String) -> Notice { + Notice::EventResult(EventResult { + id, + msg: "".into(), + status: EventResultStatus::Saved, + }) + } +} \ No newline at end of file diff --git a/crates/rooch-ws-relay/src/server/subscription.rs b/crates/rooch-ws-relay/src/server/subscription.rs new file mode 100644 index 0000000000..ce4abef3dd --- /dev/null +++ b/crates/rooch-ws-relay/src/server/subscription.rs @@ -0,0 +1,650 @@ +//! Subscription and filter parsing +use super::error::Result; +use super::event::Event; +use serde::de::Unexpected; +use serde::ser::SerializeMap; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; +use serde_json::Value; +use std::collections::HashMap; +use std::collections::HashSet; + +/// Subscription identifier and set of request filters +#[derive(Serialize, PartialEq, Eq, Debug, Clone)] +pub struct Subscription { + pub id: String, + pub filters: Vec, +} + +/// Filter for requests +/// +/// Corresponds to client-provided subscription request elements. Any +/// element can be present if it should be used in filtering, or +/// absent ([`None`]) if it should be ignored. +#[derive(PartialEq, Eq, Debug, Clone)] +pub struct ReqFilter { + /// Event hashes + pub ids: Option>, + /// Event kinds + pub kinds: Option>, + /// Events published after this time + pub since: Option, + /// Events published before this time + pub until: Option, + /// List of author public keys + pub authors: Option>, + /// Limit number of results + pub limit: Option, + /// Set of tags + pub tags: Option>>, + /// Force no matches due to malformed data + // we can't represent it in the req filter, so we don't want to + // erroneously match. This basically indicates the req tried to + // do something invalid. + pub force_no_match: bool, +} + +impl Serialize for ReqFilter { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut map = serializer.serialize_map(None)?; + if let Some(ids) = &self.ids { + map.serialize_entry("ids", &ids)?; + } + if let Some(kinds) = &self.kinds { + map.serialize_entry("kinds", &kinds)?; + } + if let Some(until) = &self.until { + map.serialize_entry("until", until)?; + } + if let Some(since) = &self.since { + map.serialize_entry("since", since)?; + } + if let Some(limit) = &self.limit { + map.serialize_entry("limit", limit)?; + } + if let Some(authors) = &self.authors { + map.serialize_entry("authors", &authors)?; + } + // serialize tags + if let Some(tags) = &self.tags { + for (k, v) in tags { + let vals: Vec<&String> = v.iter().collect(); + map.serialize_entry(&format!("#{k}"), &vals)?; + } + } + map.end() + } +} + +impl<'de> Deserialize<'de> for ReqFilter { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let received: Value = Deserialize::deserialize(deserializer)?; + let filter = received.as_object().ok_or_else(|| { + serde::de::Error::invalid_type( + Unexpected::Other("reqfilter is not an object"), + &"a json object", + ) + })?; + let mut rf = ReqFilter { + ids: None, + kinds: None, + since: None, + until: None, + authors: None, + limit: None, + tags: None, + force_no_match: false, + }; + let empty_string = "".into(); + let mut ts = None; + // iterate through each key, and assign values that exist + for (key, val) in filter { + // ids + if key == "ids" { + let raw_ids: Option> = Deserialize::deserialize(val).ok(); + if let Some(a) = raw_ids.as_ref() { + if a.contains(&empty_string) { + return Err(serde::de::Error::invalid_type( + Unexpected::Other("prefix matches must not be empty strings"), + &"a json object", + )); + } + } + rf.ids = raw_ids; + } else if key == "kinds" { + rf.kinds = Deserialize::deserialize(val).ok(); + } else if key == "since" { + rf.since = Deserialize::deserialize(val).ok(); + } else if key == "until" { + rf.until = Deserialize::deserialize(val).ok(); + } else if key == "limit" { + rf.limit = Deserialize::deserialize(val).ok(); + } else if key == "authors" { + let raw_authors: Option> = Deserialize::deserialize(val).ok(); + if let Some(a) = raw_authors.as_ref() { + if a.contains(&empty_string) { + return Err(serde::de::Error::invalid_type( + Unexpected::Other("prefix matches must not be empty strings"), + &"a json object", + )); + } + } + rf.authors = raw_authors; + } else if key.starts_with('#') && key.len() > 1 && val.is_array() { + if let Some(tag_search) = tag_search_char_from_filter(key) { + if ts.is_none() { + // Initialize the tag if necessary + ts = Some(HashMap::new()); + } + if let Some(m) = ts.as_mut() { + let tag_vals: Option> = Deserialize::deserialize(val).ok(); + if let Some(v) = tag_vals { + let hs = v.into_iter().collect::>(); + m.insert(tag_search.to_owned(), hs); + } + }; + } else { + // tag search that is multi-character, don't add to subscription + rf.force_no_match = true; + continue; + } + } + } + rf.tags = ts; + Ok(rf) + } +} + +/// Attempt to form a single-char identifier from a tag search filter +fn tag_search_char_from_filter(tagname: &str) -> Option { + let tagname_nohash = &tagname[1..]; + // We return the tag character if and only if the tagname consists + // of a single char. + let mut tagnamechars = tagname_nohash.chars(); + let firstchar = tagnamechars.next(); + match firstchar { + Some(_) => { + // check second char + if tagnamechars.next().is_none() { + firstchar + } else { + None + } + } + None => None, + } +} + +impl<'de> Deserialize<'de> for Subscription { + /// Custom deserializer for subscriptions, which have a more + /// complex structure than the other message types. + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let mut v: Value = Deserialize::deserialize(deserializer)?; + // this should be a 3-or-more element array. + // verify the first element is a String, REQ + // get the subscription from the second element. + // convert each of the remaining objects into filters + + // check for array + let va = v + .as_array_mut() + .ok_or_else(|| serde::de::Error::custom("not array"))?; + + // check length + if va.len() < 3 { + return Err(serde::de::Error::custom("not enough fields")); + } + let mut i = va.iter_mut(); + // get command ("REQ") and ensure it is a string + let req_cmd_str: serde_json::Value = i.next().unwrap().take(); + let req = req_cmd_str + .as_str() + .ok_or_else(|| serde::de::Error::custom("first element of request was not a string"))?; + if req != "REQ" { + return Err(serde::de::Error::custom("missing REQ command")); + } + + // ensure sub id is a string + let sub_id_str: serde_json::Value = i.next().unwrap().take(); + let sub_id = sub_id_str + .as_str() + .ok_or_else(|| serde::de::Error::custom("missing subscription id"))?; + + let mut filters = vec![]; + for fv in i { + let f: ReqFilter = serde_json::from_value(fv.take()) + .map_err(|_| serde::de::Error::custom("could not parse filter"))?; + // create indexes + filters.push(f); + } + filters.dedup(); + Ok(Subscription { + id: sub_id.to_owned(), + filters, + }) + } +} + +impl Subscription { + /// Get a copy of the subscription identifier. + #[must_use] + pub fn get_id(&self) -> String { + self.id.clone() + } + + /// Determine if any filter is requesting historical (database) + /// queries. If every filter has limit:0, we do not need to query the DB. + #[must_use] + pub fn needs_historical_events(&self) -> bool { + self.filters.iter().any(|f| f.limit != Some(0)) + } + + /// Determine if this subscription matches a given [`Event`]. Any + /// individual filter match is sufficient. + #[must_use] + pub fn interested_in_event(&self, event: &Event) -> bool { + for f in &self.filters { + if f.interested_in_event(event) { + return true; + } + } + false + } +} + +fn prefix_match(prefixes: &[String], target: &str) -> bool { + for prefix in prefixes { + if target.starts_with(prefix) { + return true; + } + } + // none matched + false +} + +impl ReqFilter { + fn ids_match(&self, event: &Event) -> bool { + self.ids + .as_ref() + .map_or(true, |vs| prefix_match(vs, &event.id)) + } + + fn authors_match(&self, event: &Event) -> bool { + self.authors + .as_ref() + .map_or(true, |vs| prefix_match(vs, &event.pubkey)) + } + + fn delegated_authors_match(&self, event: &Event) -> bool { + if let Some(delegated_pubkey) = &event.delegated_by { + self.authors + .as_ref() + .map_or(true, |vs| prefix_match(vs, delegated_pubkey)) + } else { + false + } + } + + fn tag_match(&self, event: &Event) -> bool { + // get the hashset from the filter. + if let Some(map) = &self.tags { + for (key, val) in map.iter() { + let tag_match = event.generic_tag_val_intersect(*key, val); + // if there is no match for this tag, the match fails. + if !tag_match { + return false; + } + // if there was a match, we move on to the next one. + } + } + // if the tag map is empty, the match succeeds (there was no filter) + true + } + + /// Check if this filter either matches, or does not care about the kind. + fn kind_match(&self, kind: u64) -> bool { + self.kinds.as_ref().map_or(true, |ks| ks.contains(&kind)) + } + + /// Determine if all populated fields in this filter match the provided event. + #[must_use] + pub fn interested_in_event(&self, event: &Event) -> bool { + // self.id.as_ref().map(|v| v == &event.id).unwrap_or(true) + self.ids_match(event) + && self.since.map_or(true, |t| event.created_at > t) + && self.until.map_or(true, |t| event.created_at < t) + && self.kind_match(event.kind) + && (self.authors_match(event) || self.delegated_authors_match(event)) + && self.tag_match(event) + && !self.force_no_match + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn empty_request_parse() -> Result<()> { + let raw_json = "[\"REQ\",\"some-id\",{}]"; + let s: Subscription = serde_json::from_str(raw_json)?; + assert_eq!(s.id, "some-id"); + assert_eq!(s.filters.len(), 1); + assert_eq!(s.filters.get(0).unwrap().authors, None); + Ok(()) + } + + #[test] + fn incorrect_header() { + let raw_json = "[\"REQUEST\",\"some-id\",\"{}\"]"; + assert!(serde_json::from_str::(raw_json).is_err()); + } + + #[test] + fn req_missing_filters() { + let raw_json = "[\"REQ\",\"some-id\"]"; + assert!(serde_json::from_str::(raw_json).is_err()); + } + + #[test] + fn req_empty_authors_prefix() { + let raw_json = "[\"REQ\",\"some-id\",{\"authors\": [\"\"]}]"; + assert!(serde_json::from_str::(raw_json).is_err()); + } + + #[test] + fn req_empty_ids_prefix() { + let raw_json = "[\"REQ\",\"some-id\",{\"ids\": [\"\"]}]"; + assert!(serde_json::from_str::(raw_json).is_err()); + } + + #[test] + fn req_empty_ids_prefix_mixed() { + let raw_json = "[\"REQ\",\"some-id\",{\"ids\": [\"\",\"aaa\"]}]"; + assert!(serde_json::from_str::(raw_json).is_err()); + } + + #[test] + fn legacy_filter() { + // legacy field in filter + let raw_json = "[\"REQ\",\"some-id\",{\"kind\": 3}]"; + assert!(serde_json::from_str::(raw_json).is_ok()); + } + + #[test] + fn dupe_filter() -> Result<()> { + let raw_json = r#"["REQ","some-id",{"kinds": [1984]}, {"kinds": [1984]}]"#; + let s: Subscription = serde_json::from_str(raw_json)?; + assert_eq!(s.filters.len(), 1); + Ok(()) + } + + #[test] + fn dupe_filter_many() -> Result<()> { + // duplicate filters in different order + let raw_json = r#"["REQ","some-id",{"kinds":[1984]},{"kinds":[1984]},{"kinds":[1984]},{"kinds":[1984]}]"#; + let s: Subscription = serde_json::from_str(raw_json)?; + assert_eq!(s.filters.len(), 1); + Ok(()) + } + + #[test] + fn author_filter() -> Result<()> { + let raw_json = r#"["REQ","some-id",{"authors": ["test-author-id"]}]"#; + let s: Subscription = serde_json::from_str(raw_json)?; + assert_eq!(s.id, "some-id"); + assert_eq!(s.filters.len(), 1); + let first_filter = s.filters.get(0).unwrap(); + assert_eq!( + first_filter.authors, + Some(vec!("test-author-id".to_owned())) + ); + Ok(()) + } + + #[test] + fn interest_author_prefix_match() -> Result<()> { + // subscription with a filter for ID + let s: Subscription = serde_json::from_str(r#"["REQ","xyz",{"authors": ["abc"]}]"#)?; + let e = Event { + id: "foo".to_owned(), + pubkey: "abcd".to_owned(), + delegated_by: None, + created_at: 0, + kind: 0, + tags: Vec::new(), + content: "".to_owned(), + sig: "".to_owned(), + tagidx: None, + }; + assert!(s.interested_in_event(&e)); + Ok(()) + } + + #[test] + fn interest_id_prefix_match() -> Result<()> { + // subscription with a filter for ID + let s: Subscription = serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"]}]"#)?; + let e = Event { + id: "abcd".to_owned(), + pubkey: "".to_owned(), + delegated_by: None, + created_at: 0, + kind: 0, + tags: Vec::new(), + content: "".to_owned(), + sig: "".to_owned(), + tagidx: None, + }; + assert!(s.interested_in_event(&e)); + Ok(()) + } + + #[test] + fn interest_id_nomatch() -> Result<()> { + // subscription with a filter for ID + let s: Subscription = serde_json::from_str(r#"["REQ","xyz",{"ids": ["xyz"]}]"#)?; + let e = Event { + id: "abcde".to_owned(), + pubkey: "".to_owned(), + delegated_by: None, + created_at: 0, + kind: 0, + tags: Vec::new(), + content: "".to_owned(), + sig: "".to_owned(), + tagidx: None, + }; + assert!(!s.interested_in_event(&e)); + Ok(()) + } + + #[test] + fn interest_until() -> Result<()> { + // subscription with a filter for ID and time + let s: Subscription = + serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"], "until": 1000}]"#)?; + let e = Event { + id: "abc".to_owned(), + pubkey: "".to_owned(), + delegated_by: None, + created_at: 50, + kind: 0, + tags: Vec::new(), + content: "".to_owned(), + sig: "".to_owned(), + tagidx: None, + }; + assert!(s.interested_in_event(&e)); + Ok(()) + } + + #[test] + fn interest_range() -> Result<()> { + // subscription with a filter for ID and time + let s_in: Subscription = + serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"], "since": 100, "until": 200}]"#)?; + let s_before: Subscription = + serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"], "since": 100, "until": 140}]"#)?; + let s_after: Subscription = + serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"], "since": 160, "until": 200}]"#)?; + let e = Event { + id: "abc".to_owned(), + pubkey: "".to_owned(), + delegated_by: None, + created_at: 150, + kind: 0, + tags: Vec::new(), + content: "".to_owned(), + sig: "".to_owned(), + tagidx: None, + }; + assert!(s_in.interested_in_event(&e)); + assert!(!s_before.interested_in_event(&e)); + assert!(!s_after.interested_in_event(&e)); + Ok(()) + } + + #[test] + fn interest_time_and_id() -> Result<()> { + // subscription with a filter for ID and time + let s: Subscription = + serde_json::from_str(r#"["REQ","xyz",{"ids": ["abc"], "since": 1000}]"#)?; + let e = Event { + id: "abc".to_owned(), + pubkey: "".to_owned(), + delegated_by: None, + created_at: 50, + kind: 0, + tags: Vec::new(), + content: "".to_owned(), + sig: "".to_owned(), + tagidx: None, + }; + assert!(!s.interested_in_event(&e)); + Ok(()) + } + + #[test] + fn interest_time_and_id2() -> Result<()> { + // subscription with a filter for ID and time + let s: Subscription = serde_json::from_str(r#"["REQ","xyz",{"id":"abc", "since": 1000}]"#)?; + let e = Event { + id: "abc".to_owned(), + pubkey: "".to_owned(), + delegated_by: None, + created_at: 1001, + kind: 0, + tags: Vec::new(), + content: "".to_owned(), + sig: "".to_owned(), + tagidx: None, + }; + assert!(s.interested_in_event(&e)); + Ok(()) + } + + #[test] + fn interest_id() -> Result<()> { + // subscription with a filter for ID + let s: Subscription = serde_json::from_str(r#"["REQ","xyz",{"id":"abc"}]"#)?; + let e = Event { + id: "abc".to_owned(), + pubkey: "".to_owned(), + delegated_by: None, + created_at: 0, + kind: 0, + tags: Vec::new(), + content: "".to_owned(), + sig: "".to_owned(), + tagidx: None, + }; + assert!(s.interested_in_event(&e)); + Ok(()) + } + + #[test] + fn authors_single() -> Result<()> { + // subscription with a filter for ID + let s: Subscription = serde_json::from_str(r#"["REQ","xyz",{"authors":["abc"]}]"#)?; + let e = Event { + id: "123".to_owned(), + pubkey: "abc".to_owned(), + delegated_by: None, + created_at: 0, + kind: 0, + tags: Vec::new(), + content: "".to_owned(), + sig: "".to_owned(), + tagidx: None, + }; + assert!(s.interested_in_event(&e)); + Ok(()) + } + + #[test] + fn authors_multi_pubkey() -> Result<()> { + // check for any of a set of authors, against the pubkey + let s: Subscription = serde_json::from_str(r#"["REQ","xyz",{"authors":["abc", "bcd"]}]"#)?; + let e = Event { + id: "123".to_owned(), + pubkey: "bcd".to_owned(), + delegated_by: None, + created_at: 0, + kind: 0, + tags: Vec::new(), + content: "".to_owned(), + sig: "".to_owned(), + tagidx: None, + }; + assert!(s.interested_in_event(&e)); + Ok(()) + } + + #[test] + fn authors_multi_no_match() -> Result<()> { + // check for any of a set of authors, against the pubkey + let s: Subscription = serde_json::from_str(r#"["REQ","xyz",{"authors":["abc", "bcd"]}]"#)?; + let e = Event { + id: "123".to_owned(), + pubkey: "xyz".to_owned(), + delegated_by: None, + created_at: 0, + kind: 0, + tags: Vec::new(), + content: "".to_owned(), + sig: "".to_owned(), + tagidx: None, + }; + assert!(!s.interested_in_event(&e)); + Ok(()) + } + + #[test] + fn serialize_filter() -> Result<()> { + let s: Subscription = serde_json::from_str( + r##"["REQ","xyz",{"authors":["abc", "bcd"], "since": 10, "until": 20, "limit":100, "#e": ["foo", "bar"], "#d": ["test"]}]"##, + )?; + let f = s.filters.get(0); + let serialized = serde_json::to_string(&f)?; + let serialized_wrapped = format!(r##"["REQ", "xyz",{}]"##, serialized); + let parsed: Subscription = serde_json::from_str(&serialized_wrapped)?; + let parsed_filter = parsed.filters.get(0); + if let Some(pf) = parsed_filter { + assert_eq!(pf.since, Some(10)); + assert_eq!(pf.until, Some(20)); + assert_eq!(pf.limit, Some(100)); + } else { + assert!(false, "filter could not be parsed"); + } + Ok(()) + } +} \ No newline at end of file diff --git a/crates/rooch-ws-relay/src/server/utils.rs b/crates/rooch-ws-relay/src/server/utils.rs new file mode 100644 index 0000000000..172fb58ec3 --- /dev/null +++ b/crates/rooch-ws-relay/src/server/utils.rs @@ -0,0 +1,72 @@ +//! Common utility functions +use bech32::FromBase32; +use std::time::SystemTime; +use url::Url; + +/// Seconds since 1970. +#[must_use] +pub fn unix_time() -> u64 { + SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .map(|x| x.as_secs()) + .unwrap_or(0) +} + +/// Check if a string contains only hex characters. +#[must_use] +pub fn is_hex(s: &str) -> bool { + s.chars().all(|x| char::is_ascii_hexdigit(&x)) +} + +/// Check if string is a nip19 string +pub fn is_nip19(s: &str) -> bool { + s.starts_with("npub") || s.starts_with("note") +} + +pub fn nip19_to_hex(s: &str) -> Result { + let (_hrp, data, _checksum) = bech32::decode(s)?; + let data = Vec::::from_base32(&data)?; + Ok(hex::encode(data)) +} + +/// Check if a string contains only lower-case hex chars. +#[must_use] +pub fn is_lower_hex(s: &str) -> bool { + s.chars().all(|x| { + (char::is_ascii_lowercase(&x) || char::is_ascii_digit(&x)) && char::is_ascii_hexdigit(&x) + }) +} + +pub fn host_str(url: &String) -> Option { + Url::parse(url) + .ok() + .and_then(|u| u.host_str().map(|s| s.to_string())) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn lower_hex() { + let hexstr = "abcd0123"; + assert!(is_lower_hex(hexstr)); + } + + #[test] + fn nip19() { + let hexkey = "3bf0c63fcb93463407af97a5e5ee64fa883d107ef9e558472c4eb9aaaefa459d"; + let nip19key = "npub180cvv07tjdrrgpa0j7j7tmnyl2yr6yr7l8j4s3evf6u64th6gkwsyjh6w6"; + assert!(!is_nip19(hexkey)); + assert!(is_nip19(nip19key)); + } + + #[test] + fn nip19_hex() { + let nip19key = "npub180cvv07tjdrrgpa0j7j7tmnyl2yr6yr7l8j4s3evf6u64th6gkwsyjh6w6"; + let expected = "3bf0c63fcb93463407af97a5e5ee64fa883d107ef9e558472c4eb9aaaefa459d"; + let got = nip19_to_hex(nip19key).unwrap(); + + assert_eq!(expected, got); + } +} \ No newline at end of file diff --git a/crates/rooch/Cargo.toml b/crates/rooch/Cargo.toml index 4917c4e01c..b9b44e8833 100644 --- a/crates/rooch/Cargo.toml +++ b/crates/rooch/Cargo.toml @@ -32,6 +32,9 @@ termcolor = { workspace = true } itertools = { workspace = true } hex = { workspace = true } regex = { workspace = true } +tracing-appender = { workspace = true } +tracing-subscriber = { workspace = true } +console-subscriber = { workspace = true } move-bytecode-utils = { workspace = true } move-binary-format = { workspace = true } @@ -65,3 +68,4 @@ rooch-rpc-api = { workspace = true } rooch-rpc-server = { workspace = true } rooch-rpc-client = { workspace = true } rooch-integration-test-runner = { workspace = true } +rooch-ws-relay = { workspace = true } diff --git a/crates/rooch/src/commands/mod.rs b/crates/rooch/src/commands/mod.rs index c8d2060caa..de3ccf7c0c 100644 --- a/crates/rooch/src/commands/mod.rs +++ b/crates/rooch/src/commands/mod.rs @@ -8,5 +8,6 @@ pub mod move_cli; pub mod object; pub mod resource; pub mod server; +pub mod relay; pub mod state; pub mod transaction; diff --git a/crates/rooch/src/commands/relay/commands/mod.rs b/crates/rooch/src/commands/relay/commands/mod.rs new file mode 100644 index 0000000000..51cfca8c2e --- /dev/null +++ b/crates/rooch/src/commands/relay/commands/mod.rs @@ -0,0 +1,4 @@ +// Copyright (c) RoochNetwork +// SPDX-License-Identifier: Apache-2.0 + +pub mod start; diff --git a/crates/rooch/src/commands/relay/commands/start.rs b/crates/rooch/src/commands/relay/commands/start.rs new file mode 100644 index 0000000000..f7f9939aa8 --- /dev/null +++ b/crates/rooch/src/commands/relay/commands/start.rs @@ -0,0 +1,111 @@ +// Copyright (c) RoochNetwork +// SPDX-License-Identifier: Apache-2.0 + +use crate::cli_types::CommandAction; +use async_trait::async_trait; +use clap::Parser; +use rooch_ws_relay::Service; +use rooch_types::error::{RoochError, RoochResult}; +use tokio::signal::ctrl_c; +#[cfg(unix)] +use tokio::signal::unix::{signal, SignalKind}; +use tracing::info; +use std::sync::mpsc as syncmpsc; +use std::sync::mpsc::{Receiver as MpscReceiver, Sender as MpscSender}; +use tracing_appender::non_blocking::WorkerGuard; +use tracing_subscriber::EnvFilter; +use rooch_ws_relay::server::config; +use console_subscriber::ConsoleLayer; + +#[derive(Debug, Parser)] +pub struct StartCommand { + #[clap(long = "database")] + /// Use a directory as the location of the database + db: Option, + + #[clap(long)] + /// Use a file name as the location of the config file + config: Option, +} + +#[async_trait] +impl CommandAction<()> for StartCommand { + async fn execute(self) -> RoochResult<()> { + // get config file name from args + let config_file_arg = self.config; + + let mut _log_guard: Option = None; + + // configure settings from the config file (defaults to config.toml) + // replace default settings with those read from the config file + let mut settings = config::Settings::new(&config_file_arg); + + // setup tracing + if settings.diagnostics.tracing { + // enable tracing with tokio-console + ConsoleLayer::builder().with_default_env().init(); + } else { + // standard logging + if let Some(path) = &settings.logging.folder_path { + // write logs to a folder + let prefix = match &settings.logging.file_prefix { + Some(p) => p.as_str(), + None => "relay", + }; + let file_appender = tracing_appender::rolling::daily(path, prefix); + let (non_blocking, guard) = tracing_appender::non_blocking(file_appender); + let filter = EnvFilter::from_default_env(); + // assign to a variable that is not dropped till the program ends + _log_guard = Some(guard); + + tracing_subscriber::fmt() + .with_env_filter(filter) + .with_writer(non_blocking) + .try_init() + .unwrap(); + } else { + // write to stdout + tracing_subscriber::fmt::try_init().unwrap(); + } + } + info!("Starting up ws relay"); + + // get database directory from args + let db_dir_arg = self.db; + + // update with database location from args, if provided + if let Some(db_dir) = db_dir_arg { + settings.database.data_directory = db_dir; + } + // we should have a 'control plane' channel to monitor and bump + // the server. this will let us do stuff like clear the database, + // shutdown, etc.; for now all this does is initiate shutdown if + // `()` is sent. This will change in the future, this is just a + // stopgap to shutdown the relay when it is used as a library. + let (_, ctrl_rx): (MpscSender<()>, MpscReceiver<()>) = syncmpsc::channel(); + let mut service = Service::new(); + service.start(settings, ctrl_rx).map_err(RoochError::from)?; + + #[cfg(unix)] + { + let mut sig_int = signal(SignalKind::interrupt()).map_err(RoochError::from)?; + let mut sig_term = signal(SignalKind::terminate()).map_err(RoochError::from)?; + tokio::select! { + _ = sig_int.recv() => info!("receive SIGINT"), + _ = sig_term.recv() => info!("receive SIGTERM"), + _ = ctrl_c() => info!("receive Ctrl C"), + } + } + #[cfg(not(unix))] + { + tokio::select! { + _ = ctrl_c() => info!("receive Ctrl C"), + } + } + + service.stop().map_err(RoochError::from)?; + + info!("Shutdown Relay"); + Ok(()) + } +} diff --git a/crates/rooch/src/commands/relay/mod.rs b/crates/rooch/src/commands/relay/mod.rs new file mode 100644 index 0000000000..ba9f9c6cda --- /dev/null +++ b/crates/rooch/src/commands/relay/mod.rs @@ -0,0 +1,31 @@ +// Copyright (c) RoochNetwork +// SPDX-License-Identifier: Apache-2.0 + +pub mod commands; + +use crate::cli_types::CommandAction; +use async_trait::async_trait; +use clap::Parser; +use commands::start::StartCommand; +use rooch_types::error::RoochResult; + +#[derive(Parser)] +pub struct Relay { + #[clap(subcommand)] + cmd: RelayCommand, +} + +#[async_trait] +impl CommandAction for Relay { + async fn execute(self) -> RoochResult { + match self.cmd { + RelayCommand::Start(start) => start.execute_serialized().await, + } + } +} + +#[derive(clap::Subcommand)] +#[clap(name = "relay")] +pub enum RelayCommand { + Start(StartCommand), +} diff --git a/crates/rooch/src/lib.rs b/crates/rooch/src/lib.rs index d41a72b365..8e60198884 100644 --- a/crates/rooch/src/lib.rs +++ b/crates/rooch/src/lib.rs @@ -6,6 +6,7 @@ use cli_types::CommandAction; use commands::{ account::Account, init::Init, move_cli::MoveCli, object::ObjectCommand, resource::ResourceCommand, server::Server, state::StateCommand, transaction::Transaction, + relay::Relay, }; use rooch_types::error::RoochResult; @@ -26,6 +27,7 @@ pub enum Command { Init(Init), Move(MoveCli), Server(Server), + Relay(Relay), State(StateCommand), Object(ObjectCommand), Resource(ResourceCommand), @@ -38,6 +40,7 @@ pub async fn run_cli(opt: RoochCli) -> RoochResult { Command::Account(account) => account.execute().await, Command::Move(move_cli) => move_cli.execute().await, Command::Server(server) => server.execute().await, + Command::Relay(relay) => relay.execute().await, Command::Init(init) => init.execute_serialized().await, Command::State(state) => state.execute_serialized().await, Command::Object(object) => object.execute_serialized().await, From 5a3607a855b37abc452db787958c974544cc4337 Mon Sep 17 00:00:00 2001 From: Feliciss <10203-feliciss@users.noreply.0xacab.org> Date: Thu, 6 Jul 2023 21:42:30 -0600 Subject: [PATCH 3/3] [naming] rename start to serve to differentiate. --- crates/rooch/src/commands/relay/commands/mod.rs | 2 +- .../src/commands/relay/commands/{start.rs => serve.rs} | 6 +++--- crates/rooch/src/commands/relay/mod.rs | 6 +++--- 3 files changed, 7 insertions(+), 7 deletions(-) rename crates/rooch/src/commands/relay/commands/{start.rs => serve.rs} (97%) diff --git a/crates/rooch/src/commands/relay/commands/mod.rs b/crates/rooch/src/commands/relay/commands/mod.rs index 51cfca8c2e..7fd7008620 100644 --- a/crates/rooch/src/commands/relay/commands/mod.rs +++ b/crates/rooch/src/commands/relay/commands/mod.rs @@ -1,4 +1,4 @@ // Copyright (c) RoochNetwork // SPDX-License-Identifier: Apache-2.0 -pub mod start; +pub mod serve; diff --git a/crates/rooch/src/commands/relay/commands/start.rs b/crates/rooch/src/commands/relay/commands/serve.rs similarity index 97% rename from crates/rooch/src/commands/relay/commands/start.rs rename to crates/rooch/src/commands/relay/commands/serve.rs index f7f9939aa8..df8f71cdc0 100644 --- a/crates/rooch/src/commands/relay/commands/start.rs +++ b/crates/rooch/src/commands/relay/commands/serve.rs @@ -18,7 +18,7 @@ use rooch_ws_relay::server::config; use console_subscriber::ConsoleLayer; #[derive(Debug, Parser)] -pub struct StartCommand { +pub struct ServeCommand { #[clap(long = "database")] /// Use a directory as the location of the database db: Option, @@ -29,7 +29,7 @@ pub struct StartCommand { } #[async_trait] -impl CommandAction<()> for StartCommand { +impl CommandAction<()> for ServeCommand { async fn execute(self) -> RoochResult<()> { // get config file name from args let config_file_arg = self.config; @@ -68,7 +68,7 @@ impl CommandAction<()> for StartCommand { tracing_subscriber::fmt::try_init().unwrap(); } } - info!("Starting up ws relay"); + info!("Serving Rooch ws relay"); // get database directory from args let db_dir_arg = self.db; diff --git a/crates/rooch/src/commands/relay/mod.rs b/crates/rooch/src/commands/relay/mod.rs index ba9f9c6cda..6c4a71d092 100644 --- a/crates/rooch/src/commands/relay/mod.rs +++ b/crates/rooch/src/commands/relay/mod.rs @@ -6,7 +6,7 @@ pub mod commands; use crate::cli_types::CommandAction; use async_trait::async_trait; use clap::Parser; -use commands::start::StartCommand; +use commands::serve::ServeCommand; use rooch_types::error::RoochResult; #[derive(Parser)] @@ -19,7 +19,7 @@ pub struct Relay { impl CommandAction for Relay { async fn execute(self) -> RoochResult { match self.cmd { - RelayCommand::Start(start) => start.execute_serialized().await, + RelayCommand::Serve(serve) => serve.execute_serialized().await, } } } @@ -27,5 +27,5 @@ impl CommandAction for Relay { #[derive(clap::Subcommand)] #[clap(name = "relay")] pub enum RelayCommand { - Start(StartCommand), + Serve(ServeCommand), }